text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
//! # Denoising Diffusion Implicit Models //! //! The Denoising Diffusion Implicit Models (DDIM) is a simple scheduler //! similar to Denoising Diffusion Probabilistic Models (DDPM). The DDPM //! generative process is the reverse of a Markovian process, DDIM generalizes //! this to non-Markovian guidance. //! //! Denoising Diffusion Implicit Models, J. Song et al, 2020. //! https://arxiv.org/abs/2010.02502 use super::schedulers::{ betas_for_alpha_bar, BetaSchedule, PredictionType, Scheduler, SchedulerConfig, TimestepSpacing, }; use candle::{Result, Tensor}; /// The configuration for the DDIM scheduler. #[derive(Debug, Clone, Copy)] pub struct DDIMSchedulerConfig { /// The value of beta at the beginning of training. pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// The amount of noise to be added at each step. pub eta: f64, /// Adjust the indexes of the inference schedule by this value. pub steps_offset: usize, /// prediction type of the scheduler function, one of `epsilon` (predicting /// the noise of the diffusion process), `sample` (directly predicting the noisy sample`) /// or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) pub prediction_type: PredictionType, /// number of diffusion steps used to train the model pub train_timesteps: usize, /// time step spacing for the diffusion process pub timestep_spacing: TimestepSpacing, } impl Default for DDIMSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085f64, beta_end: 0.012f64, beta_schedule: BetaSchedule::ScaledLinear, eta: 0., steps_offset: 1, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, timestep_spacing: TimestepSpacing::Leading, } } } impl SchedulerConfig for DDIMSchedulerConfig { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>> { Ok(Box::new(DDIMScheduler::new(inference_steps, *self)?)) } } /// The DDIM scheduler. #[derive(Debug, Clone)] pub struct DDIMScheduler { timesteps: Vec<usize>, alphas_cumprod: Vec<f64>, step_ratio: usize, init_noise_sigma: f64, pub config: DDIMSchedulerConfig, } // clip_sample: False, set_alpha_to_one: False impl DDIMScheduler { /// Creates a new DDIM scheduler given the number of steps to be /// used for inference as well as the number of steps that was used /// during training. fn new(inference_steps: usize, config: DDIMSchedulerConfig) -> Result<Self> { let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = match config.timestep_spacing { TimestepSpacing::Leading => (0..(inference_steps)) .map(|s| s * step_ratio + config.steps_offset) .rev() .collect(), TimestepSpacing::Trailing => std::iter::successors(Some(config.train_timesteps), |n| { if *n > step_ratio { Some(n - step_ratio) } else { None } }) .map(|n| n - 1) .collect(), TimestepSpacing::Linspace => { super::utils::linspace(0.0, (config.train_timesteps - 1) as f64, inference_steps)? .to_vec1::<f64>()? .iter() .map(|&f| f as usize) .rev() .collect() } }; let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } Ok(Self { alphas_cumprod, timesteps, step_ratio, init_noise_sigma: 1., config, }) } } impl Scheduler for DDIMScheduler { /// Performs a backward step during inference. fn step(&mut self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let timestep = if timestep >= self.alphas_cumprod.len() { timestep - 1 } else { timestep }; // https://github.com/huggingface/diffusers/blob/6e099e2c8ce4c4f5c7318e970a8c093dc5c7046e/src/diffusers/schedulers/scheduling_ddim.py#L195 let prev_timestep = timestep.saturating_sub(self.step_ratio); let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = self.alphas_cumprod[prev_timestep]; let beta_prod_t = 1. - alpha_prod_t; let beta_prod_t_prev = 1. - alpha_prod_t_prev; let (pred_original_sample, pred_epsilon) = match self.config.prediction_type { PredictionType::Epsilon => { let pred_original_sample = ((sample - (model_output * beta_prod_t.sqrt())?)? * (1. / alpha_prod_t.sqrt()))?; (pred_original_sample, model_output.clone()) } PredictionType::VPrediction => { let pred_original_sample = ((sample * alpha_prod_t.sqrt())? - (model_output * beta_prod_t.sqrt())?)?; let pred_epsilon = ((model_output * alpha_prod_t.sqrt())? + (sample * beta_prod_t.sqrt())?)?; (pred_original_sample, pred_epsilon) } PredictionType::Sample => { let pred_original_sample = model_output.clone(); let pred_epsilon = ((sample - &pred_original_sample * alpha_prod_t.sqrt())? * (1. / beta_prod_t.sqrt()))?; (pred_original_sample, pred_epsilon) } }; let variance = (beta_prod_t_prev / beta_prod_t) * (1. - alpha_prod_t / alpha_prod_t_prev); let std_dev_t = self.config.eta * variance.sqrt(); let pred_sample_direction = (pred_epsilon * (1. - alpha_prod_t_prev - std_dev_t * std_dev_t).sqrt())?; let prev_sample = ((pred_original_sample * alpha_prod_t_prev.sqrt())? + pred_sample_direction)?; if self.config.eta > 0. { &prev_sample + Tensor::randn( 0f32, std_dev_t as f32, prev_sample.shape(), prev_sample.device(), )? } else { Ok(prev_sample) } } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor> { Ok(sample) } fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor> { let timestep = if timestep >= self.alphas_cumprod.len() { timestep - 1 } else { timestep }; let sqrt_alpha_prod = self.alphas_cumprod[timestep].sqrt(); let sqrt_one_minus_alpha_prod = (1.0 - self.alphas_cumprod[timestep]).sqrt(); (original * sqrt_alpha_prod)? + (noise * sqrt_one_minus_alpha_prod)? } fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
candle/candle-transformers/src/models/stable_diffusion/ddim.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/ddim.rs", "repo_id": "candle", "token_count": 3904 }
63
//! TrOCR model implementation. //! //! TrOCR is a Transformer-based OCR model that uses a Vision Transformer encoder //! and a BART-like decoder for optical character recognition. //! //! Key characteristics: //! - Vision Transformer encoder for image processing //! - BART-style decoder for text generation //! - Learned positional embeddings //! - Layer normalization and self-attention //! //! References: //! - [Paper](https://arxiv.org/abs/2109.10282) //! - [Model Card](https://huggingface.co/microsoft/trocr-base-handwritten) //! use crate::models::vit::{Config, Embeddings, Encoder}; use candle::{DType, Result, Tensor}; use candle_nn::{ embedding, layer_norm, linear_no_bias, Embedding, LayerNorm, Linear, Module, VarBuilder, }; fn default_tie_word_embeddings() -> bool { true } fn default_use_learned_position_embeddings() -> bool { true } #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct TrOCRConfig { pub vocab_size: usize, pub d_model: usize, pub cross_attention_hidden_size: usize, pub decoder_layers: usize, pub decoder_attention_heads: usize, pub decoder_ffn_dim: usize, pub activation_function: candle_nn::Activation, pub max_position_embeddings: usize, pub dropout: f64, pub attention_dropout: f64, pub activation_dropout: f64, pub decoder_start_token_id: u32, pub init_std: f64, pub decoder_layerdrop: f64, pub use_cache: bool, pub scale_embedding: bool, pub pad_token_id: usize, pub bos_token_id: usize, pub eos_token_id: u32, pub decoder_vocab_size: Option<usize>, #[serde(default = "default_use_learned_position_embeddings")] pub use_learned_position_embeddings: bool, #[serde(default = "default_tie_word_embeddings")] pub tie_word_embeddings: bool, } impl Default for TrOCRConfig { fn default() -> Self { Self { vocab_size: 50265, d_model: 1024, cross_attention_hidden_size: 768, decoder_layers: 12, decoder_attention_heads: 16, decoder_ffn_dim: 4096, activation_function: candle_nn::Activation::Gelu, max_position_embeddings: 512, dropout: 0.1, attention_dropout: 0.0, activation_dropout: 0.0, decoder_start_token_id: 2, init_std: 0.02, decoder_layerdrop: 0.0, use_cache: true, scale_embedding: false, pad_token_id: 1, bos_token_id: 0, eos_token_id: 2, decoder_vocab_size: Some(50265), use_learned_position_embeddings: true, tie_word_embeddings: true, } } } #[derive(Debug, Clone)] struct TrOCRLearnedPositionalEmbedding { offset: usize, weights: Embedding, } impl TrOCRLearnedPositionalEmbedding { fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { let offset: usize = 2; let num_embeddings = cfg.max_position_embeddings; let embedding_dim = cfg.d_model; let weights = embedding(num_embeddings + offset, embedding_dim, vb)?; Ok(Self { offset, weights }) } fn new_sinusoidal(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { // https://github.com/huggingface/transformers/blob/58e3d23e97078f361a533b9ec4a6a2de674ea52a/src/transformers/models/trocr/modeling_trocr.py#L81 let embedding_dim = cfg.d_model; let half_dim = embedding_dim / 2; let num_positions = cfg.max_position_embeddings + cfg.pad_token_id + 1; let dev = vb.device(); let inv_freq: Vec<_> = (0..half_dim) .map(|i| 1f32 / 10000f32.powf(i as f32 / (half_dim - 1) as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, num_positions as u32, dev)? .to_dtype(DType::F32)? .reshape((num_positions, 1))?; let freqs = t.matmul(&inv_freq)?; let emb = Tensor::cat(&[freqs.sin()?, freqs.cos()?], 1)?; let emb = Tensor::cat( &[ emb.narrow(0, 0, cfg.pad_token_id)?, Tensor::zeros((1, embedding_dim), DType::F32, dev)?, emb.narrow(0, cfg.pad_token_id + 1, cfg.max_position_embeddings)?, ], 0, )? .contiguous()?; let emb = Embedding::new(emb, embedding_dim); Ok(Self { offset: cfg.pad_token_id + 1, weights: emb, }) } fn forward(&mut self, input_ids: &Tensor, past_key_values_length: u32) -> Result<Tensor> { let (b_sz, seq_len) = input_ids.dims2()?; let positions = Tensor::arange( past_key_values_length, seq_len as u32 + past_key_values_length, input_ids.device(), )? .expand((b_sz, seq_len))?; let positions = positions.broadcast_add(&Tensor::new(self.offset as u32, input_ids.device())?)?; self.weights.forward(&positions) } } #[derive(Debug, Clone)] struct TrOCRAttention { head_dim: usize, num_heads: usize, is_decoder: bool, scaling: f64, k_proj: Linear, v_proj: Linear, q_proj: Linear, out_proj: Linear, kv_cache: Option<(Tensor, Tensor)>, } impl TrOCRAttention { fn load( vb: VarBuilder, cfg: &TrOCRConfig, kdim: Option<usize>, vdim: Option<usize>, ) -> Result<Self> { let embed_dim = cfg.d_model; let num_heads = cfg.decoder_attention_heads; let head_dim = embed_dim / num_heads; let kdim = kdim.unwrap_or(embed_dim); let vdim = vdim.unwrap_or(embed_dim); let k_proj = linear_no_bias(kdim, embed_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(vdim, embed_dim, vb.pp("v_proj"))?; let q_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("q_proj"))?; let out_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("out_proj"))?; Ok(Self { head_dim, num_heads, is_decoder: true, scaling: 1. / (head_dim as f64).sqrt(), k_proj, v_proj, q_proj, out_proj, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn _shape(&self, tensor: &Tensor, bsz: usize) -> Result<Tensor> { tensor .reshape((bsz, (), self.num_heads, self.head_dim))? .transpose(1, 2)? .contiguous() } fn forward( &mut self, xs: &Tensor, kv_states: Option<&Tensor>, attn_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_sz, tgt_len, _) = xs.dims3()?; let query_states = (xs.apply(&self.q_proj)? * self.scaling)?; let (key_states, value_states) = match kv_states { None => { let key_states = self._shape(&xs.apply(&self.k_proj)?, b_sz)?; let value_states = self._shape(&xs.apply(&self.v_proj)?, b_sz)?; if self.is_decoder { let kv_states = match &self.kv_cache { None => (key_states, value_states), Some((p_key_states, p_value_states)) => { let key_states = Tensor::cat(&[p_key_states, &key_states], 2)?; let value_states = Tensor::cat(&[p_value_states, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some(kv_states.clone()); kv_states } else { (key_states, value_states) } } Some(kv_states) => { let key_states = self._shape(&kv_states.apply(&self.k_proj)?, b_sz)?; let value_states = self._shape(&kv_states.apply(&self.v_proj)?, b_sz)?; (key_states, value_states) } }; let proj_shape = (b_sz * self.num_heads, (), self.head_dim); let query_states = self._shape(&query_states, b_sz)?.reshape(proj_shape)?; let key_states = key_states.reshape(proj_shape)?; let value_states = value_states.reshape(proj_shape)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let attn_weights = match attn_mask { None => attn_weights, Some(attn_mask) => attn_weights.broadcast_add(attn_mask)?, }; let attn_probs = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_probs.matmul(&value_states)?; attn_output .reshape((b_sz, self.num_heads, tgt_len, self.head_dim))? .transpose(1, 2)? .reshape((b_sz, tgt_len, self.head_dim * self.num_heads))? .apply(&self.out_proj) } } #[derive(Debug, Clone)] struct TrOCRDecoderLayer { self_attn: TrOCRAttention, activation_fn: candle_nn::Activation, self_attn_layer_norm: LayerNorm, encoder_attn: TrOCRAttention, encoder_attn_layer_norm: LayerNorm, fc1: Linear, fc2: Linear, final_layer_norm: LayerNorm, } impl TrOCRDecoderLayer { fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { let embed_dim = cfg.d_model; let self_attn = TrOCRAttention::load(vb.pp("self_attn"), cfg, None, None)?; let self_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("self_attn_layer_norm"))?; let encoder_attn = TrOCRAttention::load( vb.pp("encoder_attn"), cfg, Some(cfg.cross_attention_hidden_size), Some(cfg.cross_attention_hidden_size), )?; let encoder_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("encoder_attn_layer_norm"))?; let fc1 = linear_no_bias(embed_dim, cfg.decoder_ffn_dim, vb.pp("fc1"))?; let fc2 = linear_no_bias(cfg.decoder_ffn_dim, embed_dim, vb.pp("fc2"))?; let final_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("final_layer_norm"))?; Ok(Self { self_attn, activation_fn: cfg.activation_function, self_attn_layer_norm, encoder_attn, encoder_attn_layer_norm, fc1, fc2, final_layer_norm, }) } fn reset_kv_cache(&mut self) { self.self_attn.reset_kv_cache(); } fn forward( &mut self, xs: &Tensor, attention_mask: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let residual = xs.clone(); let xs = self.self_attn.forward(xs, None, Some(attention_mask))?; let xs = (xs + residual)?; let mut xs = self.self_attn_layer_norm.forward(&xs)?; if let Some(encoder_hidden_states) = &encoder_hidden_states { let residual = xs.clone(); let encoder_attention_mask = attention_mask.clone(); // TODO xs = self.encoder_attn.forward( &xs, Some(encoder_hidden_states), Some(&encoder_attention_mask), )?; xs = (xs + residual)?; xs = self.encoder_attn_layer_norm.forward(&xs)? } let residual = xs.clone(); let xs = self.fc1.forward(&xs)?; let xs = self.activation_fn.forward(&xs)?; let xs = self.fc2.forward(&xs)?; let xs = (xs + residual)?; let xs = self.final_layer_norm.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] pub struct TrOCRDecoder { layers: Vec<TrOCRDecoderLayer>, embed_scale: Option<f64>, embed_tokens: Embedding, embed_positions: TrOCRLearnedPositionalEmbedding, } impl TrOCRDecoder { fn new(cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("decoder.model.decoder"); let embed_tokens = embedding(cfg.vocab_size, cfg.d_model, vb.pp("embed_tokens"))?; let embed_positions = if cfg.use_learned_position_embeddings { TrOCRLearnedPositionalEmbedding::load(vb.pp("embed_positions"), cfg)? } else { TrOCRLearnedPositionalEmbedding::new_sinusoidal(vb.pp("embed_positions"), cfg)? }; let mut layers = Vec::with_capacity(cfg.decoder_layers); let vb_l = vb.pp("layers"); for idx in 0..cfg.decoder_layers { let layer = TrOCRDecoderLayer::load(vb_l.pp(idx), cfg)?; layers.push(layer) } let embed_scale = if cfg.scale_embedding { Some((cfg.d_model as f64).sqrt()) } else { None }; Ok(Self { layers, embed_scale, embed_tokens, embed_positions, }) } fn reset_kv_cache(&mut self) { self.layers.iter_mut().for_each(|l| l.reset_kv_cache()) } pub fn forward( &mut self, xs: &Tensor, encoder_xs: Option<&Tensor>, past_kv_len: usize, attn_mask: &Tensor, ) -> Result<Tensor> { let embed_pos = self.embed_positions.forward(xs, past_kv_len as u32)?; let xs = xs.apply(&self.embed_tokens)?; let xs = match self.embed_scale { None => xs, Some(scale) => (xs * scale)?, }; let mut xs = xs.broadcast_add(&embed_pos)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attn_mask, encoder_xs)?; } Ok(xs) } } #[derive(Debug, Clone)] pub struct TrOCREncoder { embeddings: Embeddings, encoder: Encoder, layernorm: LayerNorm, } impl TrOCREncoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_v = vb.pp("encoder"); let embeddings = Embeddings::new(cfg, false, vb_v.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb_v.pp("encoder"))?; let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_v.pp("layernorm"))?; Ok(Self { embeddings, encoder, layernorm, }) } pub fn forward(&self, xs: &Tensor) -> Result<Tensor> { let embedding_output = self.embeddings.forward(xs, None, false)?; let encoder_outputs = self.encoder.forward(&embedding_output)?; self.layernorm.forward(&encoder_outputs) } } #[derive(Debug, Clone)] pub struct TrOCRForCausalLM { decoder: TrOCRDecoder, output_projection: Linear, } impl TrOCRForCausalLM { pub fn new(decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let decoder = TrOCRDecoder::new(decoder_cfg, vb.clone())?; let output_projection = if decoder_cfg.tie_word_embeddings { candle_nn::Linear::new(decoder.embed_tokens.embeddings().clone(), None) } else { candle_nn::linear_no_bias( decoder_cfg.d_model, decoder_cfg.vocab_size, vb.pp("decoder.output_projection"), )? }; Ok(Self { decoder, output_projection, }) } pub fn forward( &mut self, xs: &Tensor, encoder_xs: Option<&Tensor>, past_kv_len: usize, attn_mask: &Tensor, ) -> Result<Tensor> { let xs = self .decoder .forward(xs, encoder_xs, past_kv_len, attn_mask)?; let xs = xs.apply(&self.output_projection)?; Ok(xs) } fn reset_kv_cache(&mut self) { self.decoder.reset_kv_cache(); } } #[derive(Debug, Clone)] pub struct TrOCRModel { encoder: TrOCREncoder, decoder: TrOCRForCausalLM, } impl TrOCRModel { pub fn new(encoder_cfg: &Config, decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let encoder = TrOCREncoder::new(encoder_cfg, vb.clone())?; let decoder = TrOCRForCausalLM::new(decoder_cfg, vb)?; Ok(Self { encoder, decoder }) } pub fn encoder(&mut self) -> &mut TrOCREncoder { &mut self.encoder } pub fn decoder(&mut self) -> &mut TrOCRForCausalLM { &mut self.decoder } pub fn decode( &mut self, xs: &Tensor, encoder_xs: &Tensor, past_kv_len: usize, ) -> Result<Tensor> { let seq_len = xs.dim(1)?; let mask: Vec<_> = (0..seq_len) .flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (seq_len, seq_len), xs.device())?; self.decoder .forward(xs, Some(encoder_xs), past_kv_len, &mask) } pub fn reset_kv_cache(&mut self) { self.decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/trocr.rs/0
{ "file_path": "candle/candle-transformers/src/models/trocr.rs", "repo_id": "candle", "token_count": 8631 }
64
//! Würstchen Efficient Diffusion Model //! //! Würstchen is an efficient diffusion model architecture for generating images using //! a two-stage approach with a small decoder and prior network. //! //! - 💻 [GH Link](https://github.com/dome272/Wuerstchen) //! - 🤗 [HF Link](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py) //! - 📝 [Paper](https://openreview.net/pdf?id=gU58AyJlYz) //! //! ## Example //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/wuerstchen/assets/cat.jpg" alt="" width=320> //! <p>"Anthropomorphic cat dressed as a fire fighter"</p> //! </div> pub mod attention_processor; pub mod common; pub mod ddpm; pub mod diffnext; pub mod paella_vq; pub mod prior;
candle/candle-transformers/src/models/wuerstchen/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/mod.rs", "repo_id": "candle", "token_count": 302 }
65
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::{ generation::LogitsProcessor, models::{moondream, quantized_moondream}, }; use candle_wasm_example_moondream::console_log; use js_sys::Date; use serde::{Deserialize, Serialize}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; enum SelectedModel { Moondream(moondream::Model), Quantized(quantized_moondream::Model), } #[wasm_bindgen] pub struct Model { model: SelectedModel, tokenizer: Tokenizer, logits_processor: LogitsProcessor, tokens: Vec<u32>, repeat_penalty: f32, repeat_last_n: usize, index: usize, bos_token: Option<Tensor>, image_embeddings: Option<Tensor>, } #[derive(Serialize, Deserialize)] struct Output { token: String, token_id: u32, } #[derive(Serialize, Deserialize)] struct InitInput { prompt: String, seed: u64, temp: f64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load(weights: Vec<u8>, tokenizer: Vec<u8>, quantized: bool) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = Device::Cpu; let config = moondream::Config::v2(); console_log!("config loaded in {:?}", Date::now()); let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let start = Date::now(); console_log!("weights len: {:?}", weights.len()); let model = if quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf_buffer( &weights, &device, )?; console_log!("weights loaded"); let model = quantized_moondream::Model::new(&config, vb)?; SelectedModel::Quantized(model) } else { let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let model = moondream::Model::new(&config, vb)?; SelectedModel::Moondream(model) }; console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.); let logits_processor = LogitsProcessor::new(299792458, None, None); Ok(Self { model, tokenizer, tokens: vec![], logits_processor, repeat_penalty: 1., repeat_last_n: 64, bos_token: None, image_embeddings: None, index: 0, }) } pub fn set_image_embeddings(&mut self, image: Vec<u8>) -> Result<(), JsError> { let device = Device::Cpu; console_log!("loading image as tensor"); let start = Date::now(); let image: Tensor = self.load_image(image)?.to_device(&device)?; console_log!("image loaded in {:?}s", (Date::now() - start) / 1000.); let start = Date::now(); let image_embeds = &image.unsqueeze(0)?; let image_embeds = match &self.model { SelectedModel::Moondream(ref m) => image_embeds.apply(m.vision_encoder())?, SelectedModel::Quantized(ref m) => image_embeds.apply(m.vision_encoder())?, }; console_log!( "loaded and encoded the image {image:?} in {:?}", (Date::now() - start) / 1000. ); self.image_embeddings = Some(image_embeds); Ok(()) } #[wasm_bindgen] pub fn init_with_image_prompt(&mut self, input: JsValue) -> Result<JsValue, JsError> { let InitInput { prompt, seed, temp, top_p, repeat_penalty, repeat_last_n, verbose_prompt, } = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = Device::Cpu; let prompt = format!("\n\nQuestion: {prompt}\n\nAnswer:"); match &mut self.model { SelectedModel::Moondream(m) => m.text_model.clear_kv_cache(), SelectedModel::Quantized(m) => m.text_model.clear_kv_cache(), }; let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1. { None } else { Some(top_p) }; self.logits_processor = LogitsProcessor::new(seed, temp, top_p); self.repeat_penalty = repeat_penalty; self.repeat_last_n = repeat_last_n; self.tokens.clear(); self.index = 0; // Moondream tokenizer bos_token is "<|endoftext|>" // https://huggingface.co/vikhyatk/moondream2/blob/main/special_tokens_map.json let special_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") { Some(token) => *token, None => return Err(JsError::new("BOS token not found in the tokenizer.")), }; self.bos_token = Some(Tensor::new(&[special_token], &device)?.unsqueeze(0)?); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))?; if tokens.is_empty() { return Err(JsError::new( "Empty prompts are not supported in the Moondream model.", )); } if verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let tokens = tokens.get_ids().to_vec(); let text = match self.process(&tokens) { Ok(text) => text, Err(_e) => { console_log!("error decoding token"); Output { token: "".to_string(), token_id: 0, } } }; Ok(serde_wasm_bindgen::to_value(&text)?) } #[wasm_bindgen] pub fn next_token(&mut self) -> Result<JsValue, JsError> { let last_token = *self.tokens.last().unwrap(); let text = match self.process(&[last_token]) { Ok(text) => text, Err(_e) => { console_log!("error decoding token"); Output { token: "".to_string(), token_id: 0, } } }; Ok(serde_wasm_bindgen::to_value(&text)?) } } impl Model { fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> { let img = image::ImageReader::new(std::io::Cursor::new(image)) .with_guessed_format()? .decode() .map_err(|e| JsError::new(&e.to_string()))? .resize_to_fill(378, 378, image::imageops::FilterType::Triangle); // Adjusted to 378x378 let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (378, 378, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) .map_err(|e| JsError::new(&e.to_string())) } } impl Model { fn process(&mut self, tokens: &[u32]) -> Result<Output, JsError> { let image_embeddings = match &self.image_embeddings { Some(embeddings) => embeddings, None => return Err(JsError::new("Image embeddings are not set.")), }; let bos_token = match &self.bos_token { Some(token) => token, None => return Err(JsError::new("BOS token is not set.")), }; let device = Device::Cpu; let context_size = if self.index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &device)?.unsqueeze(0)?; let logits = if self.index > 0 { match self.model { SelectedModel::Moondream(ref mut model) => model.text_model.forward(&input)?, SelectedModel::Quantized(ref mut model) => model.text_model.forward(&input)?, } } else { match self.model { SelectedModel::Moondream(ref mut model) => { model .text_model .forward_with_img(bos_token, &input, image_embeddings)? } SelectedModel::Quantized(ref mut model) => { model .text_model .forward_with_img(bos_token, &input, image_embeddings)? } } }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; self.tokens.push(next_token); let token = match self.tokenizer.decode(&[next_token], true) { Ok(token) => token, Err(e) => { console_log!("error decoding token: {:?}", e); "".to_string() } }; self.index += 1; Ok(Output { token, token_id: next_token, }) } } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/moondream/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/moondream/src/bin/m.rs", "repo_id": "candle", "token_count": 4975 }
66
use crate::console_log; use crate::worker::{ModelData, Segment, Worker, WorkerInput, WorkerOutput}; use js_sys::Date; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use yew::{html, Component, Context, Html}; use yew_agent::{Bridge, Bridged}; const SAMPLE_NAMES: [&str; 6] = [ "audios/samples_jfk.wav", "audios/samples_a13.wav", "audios/samples_gb0.wav", "audios/samples_gb1.wav", "audios/samples_hp0.wav", "audios/samples_mm0.wav", ]; async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> { use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response}; let window = web_sys::window().ok_or("window")?; let opts = RequestInit::new(); opts.set_method("GET"); opts.set_mode(RequestMode::Cors); opts.set_cache(RequestCache::NoCache); let request = Request::new_with_str_and_init(url, &opts)?; let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?; // `resp_value` is a `Response` object. assert!(resp_value.is_instance_of::<Response>()); let resp: Response = resp_value.dyn_into()?; let data = JsFuture::from(resp.blob()?).await?; let blob = web_sys::Blob::from(data); let array_buffer = JsFuture::from(blob.array_buffer()).await?; let data = js_sys::Uint8Array::new(&array_buffer).to_vec(); Ok(data) } pub enum Msg { Run(usize), UpdateStatus(String), SetDecoder(ModelData), WorkerIn(WorkerInput), WorkerOut(Result<WorkerOutput, String>), } pub struct CurrentDecode { start_time: Option<f64>, } pub struct App { status: String, loaded: bool, segments: Vec<Segment>, current_decode: Option<CurrentDecode>, worker: Box<dyn Bridge<Worker>>, } async fn model_data_load() -> Result<ModelData, JsValue> { let quantized = false; let is_multilingual = false; let (tokenizer, mel_filters, weights, config) = if quantized { console_log!("loading quantized weights"); let tokenizer = fetch_url("quantized/tokenizer-tiny-en.json").await?; let mel_filters = fetch_url("mel_filters.safetensors").await?; let weights = fetch_url("quantized/model-tiny-en-q80.gguf").await?; let config = fetch_url("quantized/config-tiny-en.json").await?; (tokenizer, mel_filters, weights, config) } else { console_log!("loading float weights"); if is_multilingual { let mel_filters = fetch_url("mel_filters.safetensors").await?; let tokenizer = fetch_url("whisper-tiny/tokenizer.json").await?; let weights = fetch_url("whisper-tiny/model.safetensors").await?; let config = fetch_url("whisper-tiny/config.json").await?; (tokenizer, mel_filters, weights, config) } else { let mel_filters = fetch_url("mel_filters.safetensors").await?; let tokenizer = fetch_url("whisper-tiny.en/tokenizer.json").await?; let weights = fetch_url("whisper-tiny.en/model.safetensors").await?; let config = fetch_url("whisper-tiny.en/config.json").await?; (tokenizer, mel_filters, weights, config) } }; let timestamps = true; let _task = Some("transcribe".to_string()); console_log!("{}", weights.len()); Ok(ModelData { tokenizer, mel_filters, weights, config, quantized, timestamps, task: None, is_multilingual, language: None, }) } fn performance_now() -> Option<f64> { let window = web_sys::window()?; let performance = window.performance()?; Some(performance.now() / 1000.) } impl Component for App { type Message = Msg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let status = "loading weights".to_string(); let cb = { let link = ctx.link().clone(); move |e| link.send_message(Self::Message::WorkerOut(e)) }; let worker = Worker::bridge(std::rc::Rc::new(cb)); Self { status, segments: vec![], current_decode: None, worker, loaded: false, } } fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) { if first_render { ctx.link().send_future(async { match model_data_load().await { Err(err) => { let status = format!("{err:?}"); Msg::UpdateStatus(status) } Ok(model_data) => Msg::SetDecoder(model_data), } }); } } fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool { match msg { Msg::SetDecoder(md) => { self.status = "weights loaded successfully!".to_string(); self.loaded = true; console_log!("loaded weights"); self.worker.send(WorkerInput::ModelData(md)); true } Msg::Run(sample_index) => { let sample = SAMPLE_NAMES[sample_index]; if self.current_decode.is_some() { self.status = "already decoding some sample at the moment".to_string() } else { let start_time = performance_now(); self.current_decode = Some(CurrentDecode { start_time }); self.status = format!("decoding {sample}"); self.segments.clear(); ctx.link().send_future(async move { match fetch_url(sample).await { Err(err) => { let output = Err(format!("decoding error: {err:?}")); // Mimic a worker output to so as to release current_decode Msg::WorkerOut(output) } Ok(wav_bytes) => Msg::WorkerIn(WorkerInput::DecodeTask { wav_bytes }), } }) } // true } Msg::WorkerOut(output) => { let dt = self.current_decode.as_ref().and_then(|current_decode| { current_decode.start_time.and_then(|start_time| { performance_now().map(|stop_time| stop_time - start_time) }) }); self.current_decode = None; match output { Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(), Ok(WorkerOutput::Decoded(segments)) => { self.status = match dt { None => "decoding succeeded!".to_string(), Some(dt) => format!("decoding succeeded in {dt:.2}s"), }; self.segments = segments; } Err(err) => { self.status = format!("decoding error {err:?}"); } } true } Msg::WorkerIn(inp) => { self.worker.send(inp); true } Msg::UpdateStatus(status) => { self.status = status; true } } } fn view(&self, ctx: &Context<Self>) -> Html { html! { <div> <table> <thead> <tr> <th>{"Sample"}</th> <th></th> <th></th> </tr> </thead> <tbody> { SAMPLE_NAMES.iter().enumerate().map(|(i, name)| { html! { <tr> <th>{name}</th> <th><audio controls=true src={format!("./{name}")}></audio></th> { if self.loaded { html!(<th><button class="button" onclick={ctx.link().callback(move |_| Msg::Run(i))}> { "run" }</button></th>) }else{html!()} } </tr> } }).collect::<Html>() } </tbody> </table> <h2> {&self.status} </h2> { if !self.loaded{ html! { <progress id="progress-bar" aria-label="loading weights…"></progress> } } else if self.current_decode.is_some() { html! { <progress id="progress-bar" aria-label="decoding…"></progress> } } else { html!{ <blockquote> <p> { self.segments.iter().map(|segment| { html! { <> <i> { format!("{:.2}s-{:.2}s: (avg-logprob: {:.4}, no-speech-prob: {:.4})", segment.start, segment.start + segment.duration, segment.dr.avg_logprob, segment.dr.no_speech_prob, ) } </i> <br/ > {&segment.dr.text} <br/ > </> } }).collect::<Html>() } </p> </blockquote> } } } // Display the current date and time the page was rendered <p class="footer"> { "Rendered: " } { String::from(Date::new_0().to_string()) } </p> </div> } } }
candle/candle-wasm-examples/whisper/src/app.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/app.rs", "repo_id": "candle", "token_count": 5668 }
67
use candle_wasm_example_yolo::coco_classes; use candle_wasm_example_yolo::model::Bbox; use candle_wasm_example_yolo::worker::Model as M; use candle_wasm_example_yolo::worker::ModelPose as P; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { inner: M, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn new(data: Vec<u8>, model_size: &str) -> Result<Model, JsError> { let inner = M::load_(data, model_size)?; Ok(Self { inner }) } #[wasm_bindgen] pub fn run( &self, image: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<String, JsError> { let bboxes = self.inner.run(image, conf_threshold, iou_threshold)?; let mut detections: Vec<(String, Bbox)> = vec![]; for (class_index, bboxes_for_class) in bboxes.into_iter().enumerate() { for b in bboxes_for_class.into_iter() { detections.push((coco_classes::NAMES[class_index].to_string(), b)); } } let json = serde_json::to_string(&detections)?; Ok(json) } } #[wasm_bindgen] pub struct ModelPose { inner: P, } #[wasm_bindgen] impl ModelPose { #[wasm_bindgen(constructor)] pub fn new(data: Vec<u8>, model_size: &str) -> Result<ModelPose, JsError> { let inner = P::load_(data, model_size)?; Ok(Self { inner }) } #[wasm_bindgen] pub fn run( &self, image: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<String, JsError> { let bboxes = self.inner.run(image, conf_threshold, iou_threshold)?; let json = serde_json::to_string(&bboxes)?; Ok(json) } } fn main() {}
candle/candle-wasm-examples/yolo/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/bin/m.rs", "repo_id": "candle", "token_count": 840 }
68
# Use .env.local to change these variables # DO NOT EDIT THIS FILE WITH SENSITIVE DATA ### Config ### ENABLE_CONFIG_MANAGER=true ### MongoDB ### MONGODB_URL=#your mongodb URL here, use chat-ui-db image if you don't want to set this MONGODB_DB_NAME=chat-ui MONGODB_DIRECT_CONNECTION=false ### Local Storage ### MODELS_STORAGE_PATH= # where are .gguf for model inference stored MONGO_STORAGE_PATH= # where is the db folder stored ### Endpoints config ### HF_API_ROOT=https://api-inference.huggingface.co/models # HF_TOKEN is used for a lot of things, not only for inference but also fetching tokenizers, etc. # We recommend using an HF_TOKEN even if you use a local endpoint. HF_TOKEN= #get it from https://huggingface.co/settings/token # API Keys for providers, you will need to specify models in the MODELS section but these keys can be kept secret OPENAI_API_KEY=#your openai api key here ANTHROPIC_API_KEY=#your anthropic api key here CLOUDFLARE_ACCOUNT_ID=#your cloudflare account id here CLOUDFLARE_API_TOKEN=#your cloudflare api token here COHERE_API_TOKEN=#your cohere api token here GOOGLE_GENAI_API_KEY=#your google genai api token here ### Models ### ## Models can support many different endpoints, check the documentation for more details MODELS=`[ { "name": "NousResearch/Hermes-3-Llama-3.1-8B", "description": "Nous Research's latest Hermes 3 release in 8B size.", "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ] } ]` LOAD_GGUF_MODELS=true ## Text Embedding Models used for websearch # Default is a model that runs locally on CPU. TEXT_EMBEDDING_MODELS = `[ { "name": "Xenova/gte-small", "displayName": "Xenova/gte-small", "description": "Local embedding model running on the server.", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]` REASONING_SUMMARY=true # Change this to false to disable reasoning summary ## Removed models, useful for migrating conversations # { name: string, displayName?: string, id?: string, transferTo?: string }` OLD_MODELS=`[]` ## Task model # name of the model used for tasks such as summarizing title, creating query, etc. # if not set, the first model in MODELS will be used TASK_MODEL= ### Authentication ### # Parameters to enable open id login OPENID_CONFIG= MESSAGES_BEFORE_LOGIN=# how many messages a user can send in a conversation before having to login. set to 0 to force login right away # if it's defined, only these emails will be allowed to use login ALLOWED_USER_EMAILS=`[]` # If it's defined, users with emails matching these domains will also be allowed to use login ALLOWED_USER_DOMAINS=`[]` # valid alternative redirect URLs for OAuth, used for HuggingChat apps ALTERNATIVE_REDIRECT_URLS=`[]` ### Cookies # name of the cookie used to store the session COOKIE_NAME=hf-chat # specify secure behaviour for cookies COOKIE_SAMESITE=# can be "lax", "strict", "none" or left empty COOKIE_SECURE=# set to true to only allow cookies over https TRUSTED_EMAIL_HEADER=# header to use to get the user email, only use if you know what you are doing ### Admin stuff ### ADMIN_CLI_LOGIN=true # set to false to disable the CLI login ADMIN_TOKEN=#We recommend leaving this empty, you can get the token from the terminal. ### Websearch ### ## API Keys used to activate search with web functionality. websearch is disabled if none are defined. choose one of the following: YDC_API_KEY=#your docs.you.com api key here SERPER_API_KEY=#your serper.dev api key here SERPAPI_KEY=#your serpapi key here SERPSTACK_API_KEY=#your serpstack api key here SEARCHAPI_KEY=#your searchapi api key here USE_LOCAL_WEBSEARCH=#set to true to parse google results yourself, overrides other API keys SEARXNG_QUERY_URL=# where '<query>' will be replaced with query keywords see https://docs.searxng.org/dev/search_api.html eg https://searxng.yourdomain.com/search?q=<query>&engines=duckduckgo,google&format=json BING_SUBSCRIPTION_KEY=#your key ## Websearch configuration PLAYWRIGHT_ADBLOCKER=true WEBSEARCH_ALLOWLIST=`[]` # if it's defined, allow websites from only this list. WEBSEARCH_BLOCKLIST=`[]` # if it's defined, block websites from this list. WEBSEARCH_JAVASCRIPT=true # CPU usage reduces by 60% on average by disabling javascript. Enable to improve website compatibility WEBSEARCH_TIMEOUT = 3500 # in milliseconds, determines how long to wait to load a page before timing out ENABLE_LOCAL_FETCH=false #set to true to allow fetches on the local network. /!\ Only enable this if you have the proper firewall rules to prevent SSRF attacks and understand the implications. ## Public app configuration ## PUBLIC_APP_GUEST_MESSAGE=# a message to the guest user. If not set, no message will be shown. Only used if you have authentication enabled. PUBLIC_APP_NAME=ChatUI # name used as title throughout the app PUBLIC_APP_ASSETS=chatui # used to find logos & favicons in static/$PUBLIC_APP_ASSETS PUBLIC_APP_DESCRIPTION=# description used throughout the app PUBLIC_APP_DATA_SHARING=# Set to 1 to enable an option in the user settings to share conversations with model authors PUBLIC_APP_DISCLAIMER=# Set to 1 to show a disclaimer on login page PUBLIC_APP_DISCLAIMER_MESSAGE=# Message to show on the login page PUBLIC_ANNOUNCEMENT_BANNERS=`[ { "title": "chat-ui is now open source!", "linkTitle": "check it out", "linkHref": "https://github.com/huggingface/chat-ui" } ]` PUBLIC_SMOOTH_UPDATES=false # set to true to enable smoothing of messages client-side, can be CPU intensive PUBLIC_ORIGIN=#https://huggingface.co PUBLIC_SHARE_PREFIX=#https://hf.co/chat # mostly huggingchat specific PUBLIC_GOOGLE_ANALYTICS_ID=#G-XXXXXXXX / Leave empty to disable PUBLIC_PLAUSIBLE_SCRIPT_URL=#/js/script.js / Leave empty to disable PUBLIC_APPLE_APP_ID=#1234567890 / Leave empty to disable ### Feature Flags ### LLM_SUMMARIZATION=true # generate conversation titles with LLMs ENABLE_ASSISTANTS=false #set to true to enable assistants feature ENABLE_ASSISTANTS_RAG=false # /!\ This will let users specify arbitrary URLs that the server will then request. Make sure you have the proper firewall rules in place. REQUIRE_FEATURED_ASSISTANTS=false # require featured assistants to show in the list COMMUNITY_TOOLS=false # set to true to enable community tools ALLOW_IFRAME=true # Allow the app to be embedded in an iframe ENABLE_DATA_EXPORT=true ### Tools ### # Check out public config in `chart/env/prod.yaml` for more details TOOLS=`[]` ### Rate limits ### # See `src/lib/server/usageLimits.ts` # { # conversations: number, # how many conversations # messages: number, # how many messages in a conversation # assistants: number, # how many assistants # messageLength: number, # how long can a message be before we cut it off # messagesPerMinute: number, # how many messages per minute # tools: number # how many tools # } USAGE_LIMITS=`{}` ### HuggingFace specific ### # Let user authenticate with their HF token in the /api routes. This is only useful if you have OAuth configured with huggingface. USE_HF_TOKEN_IN_API=false ## Feature flag & admin settings # Used for setting early access & admin flags to users HF_ORG_ADMIN= HF_ORG_EARLY_ACCESS= WEBHOOK_URL_REPORT_ASSISTANT=#provide slack webhook url to get notified for reports/feature requests IP_TOKEN_SECRET= ### Metrics ### METRICS_ENABLED=false METRICS_PORT=5565 LOG_LEVEL=info ### Parquet export ### # Not in use anymore but useful to export conversations to a parquet file as a HuggingFace dataset PARQUET_EXPORT_DATASET= PARQUET_EXPORT_HF_TOKEN= ADMIN_API_SECRET=# secret to admin API calls, like computing usage stats or exporting parquet data ### Docker build variables ### # These values cannot be updated at runtime # They need to be passed when building the docker image # See https://github.com/huggingface/chat-ui/main/.github/workflows/deploy-prod.yml#L44-L47 APP_BASE="" # base path of the app, e.g. /chat, left blank as default PUBLIC_APP_COLOR=blue # can be any of tailwind colors: https://tailwindcss.com/docs/customizing-colors#default-color-palette ### Body size limit for SvelteKit https://svelte.dev/docs/kit/adapter-node#Environment-variables-BODY_SIZE_LIMIT BODY_SIZE_LIMIT=15728640 PUBLIC_COMMIT_SHA= ### LEGACY parameters HF_ACCESS_TOKEN=#LEGACY! Use HF_TOKEN instead ALLOW_INSECURE_COOKIES=false # LEGACY! Use COOKIE_SECURE and COOKIE_SAMESITE instead PARQUET_EXPORT_SECRET=#DEPRECATED, use ADMIN_API_SECRET instead RATE_LIMIT= # /!\ DEPRECATED definition of messages per minute. Use USAGE_LIMITS.messagesPerMinute instead OPENID_CLIENT_ID= OPENID_CLIENT_SECRET= OPENID_SCOPES="openid profile" # Add "email" for some providers like Google that do not provide preferred_username OPENID_NAME_CLAIM="name" # Change to "username" for some providers that do not provide name OPENID_PROVIDER_URL=https://huggingface.co # for Google, use https://accounts.google.com OPENID_TOLERANCE= OPENID_RESOURCE= EXPOSE_API=# deprecated, API is now always exposed
chat-ui/.env/0
{ "file_path": "chat-ui/.env", "repo_id": "chat-ui", "token_count": 3062 }
69
.DS_Store node_modules /build /.svelte-kit /package /chart .env .env.* !.env.example # Ignore files for PNPM, NPM and YARN pnpm-lock.yaml package-lock.json yarn.lock
chat-ui/.prettierignore/0
{ "file_path": "chat-ui/.prettierignore", "repo_id": "chat-ui", "token_count": 72 }
70
{{- if $.Values.ingressInternal.enabled }} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: {{ toYaml .Values.ingressInternal.annotations | nindent 4 }} labels: {{ include "labels.standard" . | nindent 4 }} name: {{ include "name" . }}-internal namespace: {{ .Release.Namespace }} spec: {{ if $.Values.ingressInternal.className }} ingressClassName: {{ .Values.ingressInternal.className }} {{ end }} {{- with .Values.ingressInternal.tls }} tls: - hosts: - {{ $.Values.domain | quote }} {{- with .secretName }} secretName: {{ . }} {{- end }} {{- end }} rules: - host: {{ .Values.domain }} http: paths: - backend: service: name: {{ include "name" . }} port: name: http path: {{ $.Values.ingressInternal.path | default "/" }} pathType: Prefix {{- end }}
chat-ui/chart/templates/ingress-internal.yaml/0
{ "file_path": "chat-ui/chart/templates/ingress-internal.yaml", "repo_id": "chat-ui", "token_count": 408 }
71
# Cohere | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | Yes | | [Multimodal](../multimodal) | No | You may use Cohere to run their models directly from Chat UI. You will need to have a Cohere account, then get your [API token](https://dashboard.cohere.com/api-keys). You can either specify it directly in your `.env.local` using the `COHERE_API_TOKEN` variable, or you can set it in the endpoint config. Here is an example of a Cohere model config. You can set which model you want to use by setting the `id` field to the model name. ```ini MODELS=`[ { "name": "command-r-plus", "displayName": "Command R+", "tools": true, "endpoints": [{ "type": "cohere", <!-- optionally specify these, or use COHERE_API_TOKEN "apiKey": "your-api-token" --> }] } ]` ```
chat-ui/docs/source/configuration/models/providers/cohere.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/cohere.md", "repo_id": "chat-ui", "token_count": 342 }
72
# Helm <Tip warning={true}> **We highly discourage using the chart**. The Helm chart is a work in progress and should be considered unstable. Breaking changes to the chart may be pushed without migration guides or notice. Contributions welcome! </Tip> For installation on Kubernetes, you may use the helm chart in `/chart`. Please note that no chart repository has been setup, so you'll need to clone the repository and install the chart by path. The production values may be found at `chart/env/prod.yaml`. **Example values.yaml** ```yaml replicas: 1 domain: example.com service: type: ClusterIP resources: requests: cpu: 100m memory: 2Gi limits: # Recommended to use large limits when web search is enabled cpu: "4" memory: 6Gi envVars: MONGODB_URL: mongodb://chat-ui-mongo:27017 # Ensure that your values.yaml will not leak anywhere # PRs welcome for a chart rework with envFrom support! HF_TOKEN: secret_token ```
chat-ui/docs/source/installation/helm.md/0
{ "file_path": "chat-ui/docs/source/installation/helm.md", "repo_id": "chat-ui", "token_count": 292 }
73
<!doctype html> <html lang="en" class="h-full"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="theme-color" content="rgb(249, 250, 251)" /> <script> if ( localStorage.theme === "dark" || (!("theme" in localStorage) && window.matchMedia("(prefers-color-scheme: dark)").matches) ) { document.documentElement.classList.add("dark"); document .querySelector('meta[name="theme-color"]') .setAttribute("content", "rgb(26, 36, 50)"); } // For some reason, Sveltekit doesn't let us load env variables from .env here, so we load it from hooks.server.ts window.gaId = "%gaId%"; </script> %sveltekit.head% </head> <body data-sveltekit-preload-data="hover" class="h-full dark:bg-gray-900"> <div id="app" class="contents h-full">%sveltekit.body%</div> <!-- Google Tag Manager --> <script> if (window.gaId) { const script = document.createElement("script"); script.src = "https://www.googletagmanager.com/gtag/js?id=" + window.gaId; script.async = true; document.head.appendChild(script); window.dataLayer = window.dataLayer || []; function gtag() { dataLayer.push(arguments); } gtag("js", new Date()); /// ^ See https://developers.google.com/tag-platform/gtagjs/install gtag("config", window.gaId); gtag("consent", "default", { ad_storage: "denied", analytics_storage: "denied" }); /// ^ See https://developers.google.com/tag-platform/gtagjs/reference#consent /// TODO: ask the user for their consent and update this with gtag('consent', 'update') } </script> </body> </html>
chat-ui/src/app.html/0
{ "file_path": "chat-ui/src/app.html", "repo_id": "chat-ui", "token_count": 668 }
74
<script lang="ts"> import { onMount, createEventDispatcher } from "svelte"; const dispatch = createEventDispatcher(); let loader: HTMLDivElement | undefined = $state(); let observer: IntersectionObserver; let intervalId: ReturnType<typeof setInterval> | undefined; onMount(() => { if (!loader) { return; } observer = new IntersectionObserver((entries) => { entries.forEach((entry) => { if (entry.isIntersecting) { // Clear any existing interval if (intervalId) { clearInterval(intervalId); } // Start new interval that dispatches every 250ms intervalId = setInterval(() => { dispatch("visible"); }, 250); } else { // Clear interval when not intersecting if (intervalId) { clearInterval(intervalId); intervalId = undefined; } } }); }); observer.observe(loader); return () => { observer.disconnect(); if (intervalId) { clearInterval(intervalId); } }; }); </script> <div bind:this={loader} class="flex animate-pulse flex-col gap-4"> <div class="w-5/5 ml-2 h-5 gap-5 rounded bg-gray-300 dark:bg-gray-700"></div> <div class="w-5/5 ml-2 h-5 gap-5 rounded bg-gray-300 dark:bg-gray-700"></div> <div class="w-5/5 ml-2 h-5 gap-5 rounded bg-gray-300 dark:bg-gray-700"></div> </div>
chat-ui/src/lib/components/InfiniteScroll.svelte/0
{ "file_path": "chat-ui/src/lib/components/InfiniteScroll.svelte", "repo_id": "chat-ui", "token_count": 543 }
75
<script lang="ts"> interface Props { checked: boolean; name: string; } let { checked = $bindable(), name }: Props = $props(); </script> <input bind:checked type="checkbox" {name} class="peer pointer-events-none absolute opacity-0" /> <div aria-checked={checked} aria-roledescription="switch" aria-label="switch" role="switch" tabindex="0" class="relative inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full bg-gray-300 p-1 shadow-inner ring-gray-400 transition-all peer-checked:bg-blue-600 peer-focus-visible:ring peer-focus-visible:ring-offset-1 hover:bg-gray-400 dark:bg-gray-600 peer-checked:[&>div]:translate-x-3.5" > <div class="h-3.5 w-3.5 rounded-full bg-white shadow-sm transition-all"></div> </div>
chat-ui/src/lib/components/Switch.svelte/0
{ "file_path": "chat-ui/src/lib/components/Switch.svelte", "repo_id": "chat-ui", "token_count": 267 }
76
<script lang="ts"> import { createBubbler } from "svelte/legacy"; const bubble = createBubbler(); import { useSettingsStore } from "$lib/stores/settings"; import { documentParserToolId } from "$lib/utils/toolIds"; import CarbonImage from "~icons/carbon/image"; interface Props { // import EosIconsLoading from "~icons/eos-icons/loading"; files: File[]; mimeTypes?: string[]; onDrag?: boolean; onDragInner?: boolean; } let { files = $bindable(), mimeTypes = [], onDrag = $bindable(false), onDragInner = $bindable(false), }: Props = $props(); const settings = useSettingsStore(); async function dropHandle(event: DragEvent) { event.preventDefault(); if (event.dataTransfer && event.dataTransfer.items) { // Use DataTransferItemList interface to access the file(s) if (files.length > 0) { files = []; } if (event.dataTransfer.items[0].kind === "file") { for (let i = 0; i < event.dataTransfer.items.length; i++) { const file = event.dataTransfer.items[i].getAsFile(); if (file) { // check if the file matches the mimeTypes // else abort if ( !mimeTypes.some((mimeType: string) => { const [type, subtype] = mimeType.split("/"); const [fileType, fileSubtype] = file.type.split("/"); return ( (type === "*" || type === fileType) && (subtype === "*" || subtype === fileSubtype) ); }) ) { setErrorMsg( `Some file type not supported. Only allowed: ${mimeTypes.join( ", " )}. Uploaded document is of type ${file.type}` ); files = []; return; } // if file is bigger than 10MB abort if (file.size > 10 * 1024 * 1024) { setErrorMsg("Some file is too big. (10MB max)"); files = []; return; } // add the file to the files array files = [...files, file]; settings.instantSet({ tools: [...($settings.tools ?? []), documentParserToolId], }); } } onDrag = false; } } } function setErrorMsg(errorMsg: string) { onDrag = false; alert(errorMsg); } </script> <div id="dropzone" role="form" ondrop={dropHandle} ondragenter={() => (onDragInner = true)} ondragleave={() => (onDragInner = false)} ondragover={(e) => { e.preventDefault(); bubble("dragover"); }} class="relative flex h-28 w-full max-w-4xl flex-col items-center justify-center gap-1 rounded-xl border-2 border-dotted {onDragInner ? 'border-blue-200 !bg-blue-500/10 text-blue-600 *:pointer-events-none dark:border-blue-600 dark:bg-blue-500/20 dark:text-blue-500' : 'bg-gray-100 text-gray-500 dark:border-gray-500 dark:bg-gray-700 dark:text-gray-400'}" > <CarbonImage class="text-xl" /> <p>Drop File to add to chat</p> </div>
chat-ui/src/lib/components/chat/FileDropzone.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/FileDropzone.svelte", "repo_id": "chat-ui", "token_count": 1191 }
77
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg class={classNames} xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" fill="currentColor" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" ><path d="M19.02 5.57a5.77 5.77 0 1 1 8.56 7.74L16.6 25.45l-.02.01v.01A7.87 7.87 0 0 1 4.92 14.9L12.95 6A1.18 1.18 0 0 1 14.7 7.6l-8.03 8.87a5.51 5.51 0 1 0 8.19 7.4l10.97-12.14a3.41 3.41 0 1 0-5.06-4.58l-9.32 10.3a1.27 1.27 0 1 0 1.88 1.7l6.28-6.94a1.18 1.18 0 0 1 1.75 1.59l-6.28 6.94a3.63 3.63 0 0 1-5.41-4.83l.02-.02 9.33-10.32Z" fill="currentColor" /></svg >
chat-ui/src/lib/components/icons/IconPaperclip.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconPaperclip.svelte", "repo_id": "chat-ui", "token_count": 381 }
78
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId, type WithId } from "mongodb"; import type { Conversation } from "$lib/types/Conversation"; import type { WebSearchSource } from "$lib/types/WebSearch"; import { MessageUpdateStatus, MessageUpdateType, MessageWebSearchUpdateType, type MessageUpdate, type MessageWebSearchFinishedUpdate, } from "$lib/types/MessageUpdate"; import type { Message } from "$lib/types/Message"; import { isMessageWebSearchSourcesUpdate } from "$lib/utils/messageUpdates"; // ----------- // Copy of the previous message update types export type FinalAnswer = { type: "finalAnswer"; text: string; }; export type TextStreamUpdate = { type: "stream"; token: string; }; type WebSearchUpdate = { type: "webSearch"; messageType: "update" | "error" | "sources"; message: string; args?: string[]; sources?: WebSearchSource[]; }; type StatusUpdate = { type: "status"; status: "started" | "pending" | "finished" | "error" | "title"; message?: string; }; type ErrorUpdate = { type: "error"; message: string; name: string; }; type FileUpdate = { type: "file"; sha: string; }; type OldMessageUpdate = | FinalAnswer | TextStreamUpdate | WebSearchUpdate | StatusUpdate | ErrorUpdate | FileUpdate; /** Converts the old message update to the new schema */ function convertMessageUpdate(message: Message, update: OldMessageUpdate): MessageUpdate | null { try { // Text and files if (update.type === "finalAnswer") { return { type: MessageUpdateType.FinalAnswer, text: update.text, interrupted: message.interrupted ?? false, }; } else if (update.type === "stream") { return { type: MessageUpdateType.Stream, token: update.token, }; } else if (update.type === "file") { return { type: MessageUpdateType.File, name: "Unknown", sha: update.sha, // assume jpeg but could be any image. should be harmless mime: "image/jpeg", }; } // Status else if (update.type === "status") { if (update.status === "title") { return { type: MessageUpdateType.Title, title: update.message ?? "New Chat", }; } if (update.status === "pending") return null; const status = update.status === "started" ? MessageUpdateStatus.Started : update.status === "finished" ? MessageUpdateStatus.Finished : MessageUpdateStatus.Error; return { type: MessageUpdateType.Status, status, message: update.message, }; } else if (update.type === "error") { // Treat it as an error status update return { type: MessageUpdateType.Status, status: MessageUpdateStatus.Error, message: update.message, }; } // Web Search else if (update.type === "webSearch") { if (update.messageType === "update") { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Update, message: update.message, args: update.args, }; } else if (update.messageType === "error") { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Error, message: update.message, args: update.args, }; } else if (update.messageType === "sources") { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Sources, message: update.message, sources: update.sources ?? [], }; } } console.warn("Unknown message update during migration:", update); return null; } catch (error) { console.error("Error converting message update during migration. Skipping it... Error:", error); return null; } } const updateMessageUpdates: Migration = { _id: new ObjectId("5f9f7f7f7f7f7f7f7f7f7f7f"), name: "Convert message updates to the new schema", up: async () => { const allConversations = collections.conversations.find({}); let conversation: WithId<Pick<Conversation, "messages">> | null = null; while ((conversation = await allConversations.tryNext())) { const messages = conversation.messages.map((message) => { // Convert all of the existing updates to the new schema const updates = message.updates ?.map((update) => convertMessageUpdate(message, update as OldMessageUpdate)) .filter((update): update is MessageUpdate => Boolean(update)); // Add the new web search finished update if the sources update exists and webSearch is defined const webSearchSourcesUpdateIndex = updates?.findIndex(isMessageWebSearchSourcesUpdate); if ( message.webSearch && updates && webSearchSourcesUpdateIndex && webSearchSourcesUpdateIndex !== -1 ) { const webSearchFinishedUpdate: MessageWebSearchFinishedUpdate = { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Finished, }; updates.splice(webSearchSourcesUpdateIndex + 1, 0, webSearchFinishedUpdate); } return { ...message, updates }; }); // Set the new messages array await collections.conversations.updateOne({ _id: conversation._id }, { $set: { messages } }); } return true; }, runEveryTime: false, }; export default updateMessageUpdates;
chat-ui/src/lib/migrations/routines/04-update-message-updates.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/04-update-message-updates.ts", "repo_id": "chat-ui", "token_count": 1830 }
79
import { Elysia } from "elysia"; import { models, oldModels, type BackendModel } from "$lib/server/models"; import { authPlugin } from "../../authPlugin"; import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; export type GETModelsResponse = Array<{ id: string; name: string; websiteUrl?: string; modelUrl?: string; tokenizer?: string | { tokenizerUrl: string; tokenizerConfigUrl: string }; datasetName?: string; datasetUrl?: string; displayName: string; description?: string; reasoning: boolean; logoUrl?: string; promptExamples?: { title: string; prompt: string }[]; parameters: BackendModel["parameters"]; preprompt?: string; multimodal: boolean; multimodalAcceptedMimetypes?: string[]; tools: boolean; unlisted: boolean; hasInferenceAPI: boolean; }>; export type GETOldModelsResponse = Array<{ id: string; name: string; displayName: string; transferTo?: string; }>; export const modelGroup = new Elysia().group("/models", (app) => app .get("/", () => { return models .filter((m) => m.unlisted == false) .map((model) => ({ id: model.id, name: model.name, websiteUrl: model.websiteUrl, modelUrl: model.modelUrl, tokenizer: model.tokenizer, datasetName: model.datasetName, datasetUrl: model.datasetUrl, displayName: model.displayName, description: model.description, reasoning: !!model.reasoning, logoUrl: model.logoUrl, promptExamples: model.promptExamples, parameters: model.parameters, preprompt: model.preprompt, multimodal: model.multimodal, multimodalAcceptedMimetypes: model.multimodalAcceptedMimetypes, tools: model.tools, unlisted: model.unlisted, hasInferenceAPI: model.hasInferenceAPI, })) satisfies GETModelsResponse; }) .get("/old", () => { return oldModels satisfies GETOldModelsResponse; }) .group("/:namespace/:model?", (app) => app .derive(async ({ params, error }) => { let modelId: string = params.namespace; if (params.model) { modelId += "/" + params.model; } const model = models.find((m) => m.id === modelId); if (!model || model.unlisted) { return error(404, "Model not found"); } return { model }; }) .get("/", ({ model }) => { return model; }) .use(authPlugin) .post("/subscribe", async ({ locals, model, error }) => { if (!locals.sessionId) { return error(401, "Unauthorized"); } await collections.settings.updateOne( authCondition(locals), { $set: { activeModel: model.id, updatedAt: new Date(), }, $setOnInsert: { createdAt: new Date(), }, }, { upsert: true, } ); return new Response(); }) ) );
chat-ui/src/lib/server/api/routes/groups/models.ts/0
{ "file_path": "chat-ui/src/lib/server/api/routes/groups/models.ts", "repo_id": "chat-ui", "token_count": 1198 }
80
import { z } from "zod"; import type { Endpoint } from "../endpoints"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import type { EndpointMessage } from "../endpoints"; import type { MessageFile } from "$lib/types/Message"; export const endpointBedrockParametersSchema = z.object({ weight: z.number().int().positive().default(1), type: z.literal("bedrock"), region: z.string().default("us-east-1"), model: z.any(), anthropicVersion: z.string().default("bedrock-2023-05-31"), isNova: z.boolean().default(false), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: [ "image/png", "image/jpeg", "image/webp", "image/avif", "image/tiff", "image/gif", ], preferredMimeType: "image/webp", maxSizeInMB: Infinity, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), }); export async function endpointBedrock( input: z.input<typeof endpointBedrockParametersSchema> ): Promise<Endpoint> { const { region, model, anthropicVersion, multimodal, isNova } = endpointBedrockParametersSchema.parse(input); let BedrockRuntimeClient, InvokeModelWithResponseStreamCommand; try { ({ BedrockRuntimeClient, InvokeModelWithResponseStreamCommand } = await import( "@aws-sdk/client-bedrock-runtime" )); } catch (error) { throw new Error("Failed to import @aws-sdk/client-bedrock-runtime. Make sure it's installed."); } const client = new BedrockRuntimeClient({ region, }); const imageProcessor = makeImageProcessor(multimodal.image); return async ({ messages, preprompt, generateSettings }) => { let system = preprompt; // Use the first message as the system prompt if it's of type "system" if (messages?.[0]?.from === "system") { system = messages[0].content; messages = messages.slice(1); // Remove the first system message from the array } const formattedMessages = await prepareMessages(messages, isNova, imageProcessor); let tokenId = 0; const parameters = { ...model.parameters, ...generateSettings }; return (async function* () { const baseCommandParams = { contentType: "application/json", accept: "application/json", modelId: model.id, }; const maxTokens = parameters.max_new_tokens || 4096; let bodyContent; if (isNova) { bodyContent = { messages: formattedMessages, inferenceConfig: { maxTokens, topP: 0.1, temperature: 1.0, }, system: [{ text: system }], }; } else { bodyContent = { anthropic_version: anthropicVersion, max_tokens: maxTokens, messages: formattedMessages, system, }; } const command = new InvokeModelWithResponseStreamCommand({ ...baseCommandParams, body: Buffer.from(JSON.stringify(bodyContent), "utf-8"), trace: "DISABLED", }); const response = await client.send(command); let text = ""; for await (const item of response.body ?? []) { const chunk = JSON.parse(new TextDecoder().decode(item.chunk?.bytes)); if ("contentBlockDelta" in chunk || chunk.type === "content_block_delta") { const chunkText = chunk.contentBlockDelta?.delta?.text || chunk.delta?.text || ""; text += chunkText; yield { token: { id: tokenId++, text: chunkText, logprob: 0, special: false, }, generated_text: null, details: null, } satisfies TextGenerationStreamOutput; } else if ("messageStop" in chunk || chunk.type === "message_stop") { yield { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: text, details: null, } satisfies TextGenerationStreamOutput; } } })(); }; } // Prepare the messages excluding system prompts async function prepareMessages( messages: EndpointMessage[], isNova: boolean, imageProcessor: ReturnType<typeof makeImageProcessor> ) { const formattedMessages = []; for (const message of messages) { const content = []; if (message.files?.length) { content.push(...(await prepareFiles(imageProcessor, isNova, message.files))); } if (isNova) { content.push({ text: message.content }); } else { content.push({ type: "text", text: message.content }); } const lastMessage = formattedMessages[formattedMessages.length - 1]; if (lastMessage && lastMessage.role === message.from) { // If the last message has the same role, merge the content lastMessage.content.push(...content); } else { formattedMessages.push({ role: message.from, content }); } } return formattedMessages; } // Process files and convert them to base64 encoded strings async function prepareFiles( imageProcessor: ReturnType<typeof makeImageProcessor>, isNova: boolean, files: MessageFile[] ) { const processedFiles = await Promise.all(files.map(imageProcessor)); if (isNova) { return processedFiles.map((file) => ({ image: { format: file.mime.substring("image/".length), source: { bytes: file.image.toString("base64") }, }, })); } else { return processedFiles.map((file) => ({ type: "image", source: { type: "base64", media_type: file.mime, data: file.image.toString("base64") }, })); } }
chat-ui/src/lib/server/endpoints/aws/endpointBedrock.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/aws/endpointBedrock.ts", "repo_id": "chat-ui", "token_count": 2027 }
81
import type { Message } from "$lib/types/Message"; import { format } from "date-fns"; import type { EndpointMessage } from "./endpoints"; import { downloadFile } from "../files/downloadFile"; import type { ObjectId } from "mongodb"; export async function preprocessMessages( messages: Message[], webSearch: Message["webSearch"], convId: ObjectId ): Promise<EndpointMessage[]> { return Promise.resolve(messages) .then((msgs) => addWebSearchContext(msgs, webSearch)) .then((msgs) => downloadFiles(msgs, convId)) .then((msgs) => injectClipboardFiles(msgs)); } function addWebSearchContext(messages: Message[], webSearch: Message["webSearch"]) { const webSearchContext = webSearch?.contextSources .map(({ context }, idx) => `Source [${idx + 1}]\n${context.trim()}`) .join("\n\n----------\n\n"); // No web search context available, skip if (!webSearch || !webSearchContext?.trim()) return messages; // No messages available, skip if (messages.length === 0) return messages; const lastQuestion = messages.findLast((el) => el.from === "user")?.content ?? ""; const previousQuestions = messages .filter((el) => el.from === "user") .slice(0, -1) .map((el) => el.content); const currentDate = format(new Date(), "MMMM d, yyyy"); const finalMessage = { ...messages[messages.length - 1], content: `I searched the web using the query: ${webSearch.searchQuery}. The query was generated by a tool and might not be relevant to the question. Today is ${currentDate} and here are the results. When answering the question, you must reference the sources you used inline by wrapping the index in brackets like this: [1]. If multiple sources are used, you must reference each one of them without commas like this: [1][2][3]. ===================== ${webSearchContext} ===================== ${previousQuestions.length > 0 ? `Previous questions: \n- ${previousQuestions.join("\n- ")}` : ""} Answer the question: ${lastQuestion}`, }; return [...messages.slice(0, -1), finalMessage]; } async function downloadFiles(messages: Message[], convId: ObjectId): Promise<EndpointMessage[]> { return Promise.all( messages.map<Promise<EndpointMessage>>((message) => Promise.all((message.files ?? []).map((file) => downloadFile(file.value, convId))).then( (files) => ({ ...message, files }) ) ) ); } async function injectClipboardFiles(messages: EndpointMessage[]) { return Promise.all( messages.map((message) => { const plaintextFiles = message.files ?.filter((file) => file.mime === "application/vnd.chatui.clipboard") .map((file) => Buffer.from(file.value, "base64").toString("utf-8")); if (!plaintextFiles || plaintextFiles.length === 0) return message; return { ...message, content: `${plaintextFiles.join("\n\n")}\n\n${message.content}`, files: message.files?.filter((file) => file.mime !== "application/vnd.chatui.clipboard"), }; }) ); }
chat-ui/src/lib/server/endpoints/preprocessMessages.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/preprocessMessages.ts", "repo_id": "chat-ui", "token_count": 960 }
82
import { isURLLocal } from "./isURLLocal"; import { describe, expect, it } from "vitest"; describe("isURLLocal", async () => { it("should return true for localhost", async () => { expect(await isURLLocal(new URL("http://localhost"))).toBe(true); }); it("should return true for 127.0.0.1", async () => { expect(await isURLLocal(new URL("http://127.0.0.1"))).toBe(true); }); it("should return true for 127.254.254.254", async () => { expect(await isURLLocal(new URL("http://127.254.254.254"))).toBe(true); }); it("should return false for huggingface.co", async () => { expect(await isURLLocal(new URL("https://huggingface.co/"))).toBe(false); }); it("should return true for 127.0.0.1.nip.io", async () => { expect(await isURLLocal(new URL("http://127.0.0.1.nip.io"))).toBe(true); }); it("should fail on ipv6", async () => { await expect(isURLLocal(new URL("http://[::1]"))).rejects.toThrow(); }); it("should fail on ipv6 --1.sslip.io", async () => { await expect(isURLLocal(new URL("http://--1.sslip.io"))).rejects.toThrow(); }); it("should fail on invalid domain names", async () => { await expect( isURLLocal(new URL("http://34329487239847329874923948732984.com/")) ).rejects.toThrow(); }); });
chat-ui/src/lib/server/isURLLocal.spec.ts/0
{ "file_path": "chat-ui/src/lib/server/isURLLocal.spec.ts", "repo_id": "chat-ui", "token_count": 492 }
83
import type { Tool } from "$lib/types/Tool"; import { extractJson } from "./utils"; import { externalToToolCall } from "../textGeneration/tools"; import { logger } from "../logger"; import type { Endpoint, EndpointMessage } from "../endpoints/endpoints"; interface GetToolOutputOptions { messages: EndpointMessage[]; tool: Tool; preprompt?: string; endpoint: Endpoint; generateSettings?: { max_new_tokens?: number; [key: string]: unknown; }; } export async function getToolOutput<T = string>({ messages, preprompt, tool, endpoint, generateSettings = { max_new_tokens: 64 }, }: GetToolOutputOptions): Promise<T | undefined> { try { const stream = await endpoint({ messages, preprompt: preprompt + `\n\n Only use tool ${tool.name}.`, tools: [tool], generateSettings, }); const calls = []; for await (const output of stream) { if (output.token.toolCalls) { calls.push(...output.token.toolCalls); } if (output.generated_text) { const extractedCalls = await extractJson(output.generated_text).then((calls) => calls.map((call) => externalToToolCall(call, [tool])).filter((call) => call !== undefined) ); calls.push(...extractedCalls); } if (calls.length > 0) { break; } } if (calls.length > 0) { // Find the tool call matching our tool const toolCall = calls.find((call) => call.name === tool.name); // If we found a matching call and it has parameters if (toolCall?.parameters) { // Get the first parameter value since most tools have a single main parameter const firstParamValue = Object.values(toolCall.parameters)[0]; if (typeof firstParamValue === "string") { return firstParamValue as T; } } } return undefined; } catch (error) { logger.warn(error, "Error getting tool output"); return undefined; } }
chat-ui/src/lib/server/tools/getToolOutput.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/getToolOutput.ts", "repo_id": "chat-ui", "token_count": 672 }
84
import { defaultEmbeddingModel, embeddingModels } from "$lib/server/embeddingModels"; import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import type { WebSearch, WebSearchScrapedSource } from "$lib/types/WebSearch"; import type { Assistant } from "$lib/types/Assistant"; import type { MessageWebSearchUpdate } from "$lib/types/MessageUpdate"; import { search } from "./search/search"; import { scrape } from "./scrape/scrape"; import { findContextSources } from "./embed/embed"; import { removeParents } from "./markdown/tree"; import { makeErrorUpdate, makeFinalAnswerUpdate, makeGeneralUpdate, makeSourcesUpdate, } from "./update"; import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators"; import { MetricsServer } from "../metrics"; import { logger } from "$lib/server/logger"; const MAX_N_PAGES_TO_SCRAPE = 8 as const; const MAX_N_PAGES_TO_EMBED = 5 as const; export async function* runWebSearch( conv: Conversation, messages: Message[], ragSettings?: Assistant["rag"], query?: string ): AsyncGenerator<MessageWebSearchUpdate, WebSearch, undefined> { const prompt = messages[messages.length - 1].content; const createdAt = new Date(); const updatedAt = new Date(); MetricsServer.getMetrics().webSearch.requestCount.inc(); try { const embeddingModel = embeddingModels.find((m) => m.id === conv.embeddingModel) ?? defaultEmbeddingModel; if (!embeddingModel) { throw Error(`Embedding model ${conv.embeddingModel} not available anymore`); } // Search the web const { searchQuery, pages } = yield* search(messages, ragSettings, query); if (pages.length === 0) throw Error("No results found for this search query"); // Scrape pages yield makeGeneralUpdate({ message: "Browsing search results" }); const allScrapedPages = yield* mergeAsyncGenerators( pages.slice(0, MAX_N_PAGES_TO_SCRAPE).map(scrape(embeddingModel.chunkCharLength)) ); const scrapedPages = allScrapedPages .filter((p): p is WebSearchScrapedSource => Boolean(p)) .filter((p) => p.page.markdownTree.children.length > 0) .slice(0, MAX_N_PAGES_TO_EMBED); if (!scrapedPages.length) { throw Error(`No text found in the first ${MAX_N_PAGES_TO_SCRAPE} results`); } // Chunk the text of each of the elements and find the most similar chunks to the prompt yield makeGeneralUpdate({ message: "Extracting relevant information" }); const contextSources = await findContextSources(scrapedPages, prompt, embeddingModel).then( (ctxSources) => ctxSources.map((source) => ({ ...source, page: { ...source.page, markdownTree: removeParents(source.page.markdownTree) }, })) ); yield makeSourcesUpdate(contextSources); const webSearch: WebSearch = { prompt, searchQuery, results: scrapedPages.map(({ page, ...source }) => ({ ...source, page: { ...page, markdownTree: removeParents(page.markdownTree) }, })), contextSources, createdAt, updatedAt, }; yield makeFinalAnswerUpdate(); return webSearch; } catch (searchError) { const message = searchError instanceof Error ? searchError.message : String(searchError); logger.error(message); yield makeErrorUpdate({ message: "An error occurred", args: [message] }); const webSearch: WebSearch = { prompt, searchQuery: "", results: [], contextSources: [], createdAt, updatedAt, }; yield makeFinalAnswerUpdate(); return webSearch; } }
chat-ui/src/lib/server/websearch/runWebSearch.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/runWebSearch.ts", "repo_id": "chat-ui", "token_count": 1159 }
85
import type { WebSearchSource } from "$lib/types/WebSearch"; import { MessageUpdateType, MessageWebSearchUpdateType, type MessageWebSearchErrorUpdate, type MessageWebSearchFinishedUpdate, type MessageWebSearchGeneralUpdate, type MessageWebSearchSourcesUpdate, } from "$lib/types/MessageUpdate"; export function makeGeneralUpdate( update: Pick<MessageWebSearchGeneralUpdate, "message" | "args"> ): MessageWebSearchGeneralUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Update, ...update, }; } export function makeErrorUpdate( update: Pick<MessageWebSearchErrorUpdate, "message" | "args"> ): MessageWebSearchErrorUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Error, ...update, }; } export function makeSourcesUpdate(sources: WebSearchSource[]): MessageWebSearchSourcesUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Sources, message: "sources", sources: sources.map(({ link, title }) => ({ link, title })), }; } export function makeFinalAnswerUpdate(): MessageWebSearchFinishedUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Finished, }; }
chat-ui/src/lib/server/websearch/update.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/update.ts", "repo_id": "chat-ui", "token_count": 374 }
86
import type { Timestamps } from "./Timestamps"; export interface ConversationStats extends Timestamps { date: { at: Date; span: "day" | "week" | "month"; field: "updatedAt" | "createdAt"; }; type: "conversation" | "message"; /** _id => number of conversations/messages in the month */ distinct: "sessionId" | "userId" | "userOrSessionId" | "_id"; count: number; }
chat-ui/src/lib/types/ConversationStats.ts/0
{ "file_path": "chat-ui/src/lib/types/ConversationStats.ts", "repo_id": "chat-ui", "token_count": 134 }
87
/* eslint-disable no-shadow */ export enum UrlDependency { ConversationList = "conversation:list", Conversation = "conversation:id", }
chat-ui/src/lib/types/UrlDependency.ts/0
{ "file_path": "chat-ui/src/lib/types/UrlDependency.ts", "repo_id": "chat-ui", "token_count": 49 }
88
export function isURL(url: string) { try { new URL(url); return true; } catch (e) { return false; } }
chat-ui/src/lib/utils/isUrl.ts/0
{ "file_path": "chat-ui/src/lib/utils/isUrl.ts", "repo_id": "chat-ui", "token_count": 48 }
89
export const timeout = <T>(prom: Promise<T>, time: number): Promise<T> => { let timer: NodeJS.Timeout; return Promise.race([ prom, new Promise<T>((_, reject) => { timer = setTimeout(() => reject(new Error(`Timeout after ${time / 1000} seconds`)), time); }), ]).finally(() => clearTimeout(timer)); };
chat-ui/src/lib/utils/timeout.ts/0
{ "file_path": "chat-ui/src/lib/utils/timeout.ts", "repo_id": "chat-ui", "token_count": 110 }
90
import type { WebSearchSource } from "$lib/types/WebSearch"; import { processTokens, type Token } from "$lib/utils/marked"; export type IncomingMessage = { type: "process"; content: string; sources: WebSearchSource[]; }; export type OutgoingMessage = { type: "processed"; tokens: Token[]; }; // Flag to track if the worker is currently processing a message let isProcessing = false; // Buffer to store the latest incoming message let latestMessage: IncomingMessage | null = null; // Helper function to safely handle the latest message async function processMessage() { if (latestMessage) { const nextMessage = latestMessage; latestMessage = null; isProcessing = true; try { const { content, sources } = nextMessage; const processedTokens = await processTokens(content, sources); postMessage(JSON.parse(JSON.stringify({ type: "processed", tokens: processedTokens }))); } finally { isProcessing = false; // After processing, check if a new message was buffered await new Promise((resolve) => setTimeout(resolve, 100)); processMessage(); } } } onmessage = (event) => { if (event.data.type !== "process") { return; } latestMessage = event.data as IncomingMessage; if (!isProcessing && latestMessage) { processMessage(); } };
chat-ui/src/lib/workers/markdownWorker.ts/0
{ "file_path": "chat-ui/src/lib/workers/markdownWorker.ts", "repo_id": "chat-ui", "token_count": 398 }
91
import { collections } from "$lib/server/database"; import { models } from "$lib/server/models"; import { authCondition } from "$lib/server/auth"; import type { Conversation } from "$lib/types/Conversation"; import { CONV_NUM_PER_PAGE } from "$lib/constants/pagination"; export async function GET({ locals, url }) { const p = parseInt(url.searchParams.get("p") ?? "0"); if (locals.user?._id || locals.sessionId) { const convs = await collections.conversations .find({ ...authCondition(locals), }) .project<Pick<Conversation, "_id" | "title" | "updatedAt" | "model" | "assistantId">>({ title: 1, updatedAt: 1, model: 1, assistantId: 1, }) .sort({ updatedAt: -1 }) .skip(p * CONV_NUM_PER_PAGE) .limit(CONV_NUM_PER_PAGE) .toArray(); if (convs.length === 0) { return Response.json([]); } const res = convs.map((conv) => ({ _id: conv._id, id: conv._id, // legacy param iOS title: conv.title, updatedAt: conv.updatedAt, model: conv.model, modelId: conv.model, // legacy param iOS assistantId: conv.assistantId, modelTools: models.find((m) => m.id == conv.model)?.tools ?? false, })); return Response.json(res); } else { return Response.json({ message: "Must have session cookie" }, { status: 401 }); } } export async function DELETE({ locals }) { if (locals.user?._id || locals.sessionId) { await collections.conversations.deleteMany({ ...authCondition(locals), }); } return new Response(); }
chat-ui/src/routes/api/conversations/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/conversations/+server.ts", "repo_id": "chat-ui", "token_count": 580 }
92
import { useAPIClient, handleResponse } from "$lib/APIClient"; export const load = async ({ url, fetch }) => { const client = useAPIClient({ fetch }); const data = client.assistants.search .get({ query: Object.fromEntries(url.searchParams.entries()) }) .then(handleResponse); return data; };
chat-ui/src/routes/assistants/+page.ts/0
{ "file_path": "chat-ui/src/routes/assistants/+page.ts", "repo_id": "chat-ui", "token_count": 100 }
93
<script lang="ts"> import type { PageData } from "./$types"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; import { base } from "$app/paths"; import { page } from "$app/state"; import CarbonHelpFilled from "~icons/carbon/help-filled"; import CarbonTools from "~icons/carbon/tools"; import CarbonImage from "~icons/carbon/image"; import { useSettingsStore } from "$lib/stores/settings"; interface Props { data: PageData; } let { data }: Props = $props(); const settings = useSettingsStore(); const publicConfig = usePublicConfig(); </script> <svelte:head> {#if publicConfig.isHuggingChat} <title>HuggingChat - Models</title> <meta property="og:title" content="HuggingChat - Models" /> <meta property="og:type" content="link" /> <meta property="og:description" content="Browse HuggingChat available models" /> <meta property="og:url" content={page.url.href} /> {/if} </svelte:head> <div class="scrollbar-custom h-full overflow-y-auto py-12 max-sm:pt-8 md:py-24"> <div class="pt-42 mx-auto flex flex-col px-5 xl:w-[60rem] 2xl:w-[64rem]"> <div class="flex items-center"> <h1 class="text-2xl font-bold">Models</h1> {#if publicConfig.isHuggingChat} <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/372" class="ml-auto dark:text-gray-400 dark:hover:text-gray-300" target="_blank" aria-label="Hub discussion about models" > <CarbonHelpFilled /> </a> {/if} </div> <h2 class="text-gray-500">All models available on {publicConfig.PUBLIC_APP_NAME}</h2> <div class="mt-8 grid grid-cols-1 gap-3 sm:gap-5 xl:grid-cols-2"> {#each data.models.filter((el) => !el.unlisted) as model, index (model.id)} <div aria-label="Model card" role="region" class="relative flex flex-col gap-2 overflow-hidden rounded-xl border bg-gray-50/50 px-6 py-5 shadow hover:bg-gray-50 hover:shadow-inner dark:border-gray-800/70 dark:bg-gray-950/20 dark:hover:bg-gray-950/40" class:active-model={model.id === $settings.activeModel} > <a href="{base}/models/{model.id}" class="absolute inset-0 z-10" aria-label="View details for {model.displayName}" ></a> <div class="flex items-center justify-between gap-1"> {#if model.logoUrl} <img class="overflown aspect-square size-6 rounded border dark:border-gray-700" src={model.logoUrl} alt="{model.displayName} logo" /> {:else} <div class="size-6 rounded border border-transparent bg-gray-300 dark:bg-gray-800" aria-hidden="true" ></div> {/if} <div class="flex items-center gap-1"> {#if model.tools} <span title="This model supports tools." class="ml-auto grid size-[21px] place-items-center rounded-lg border border-purple-300 dark:border-purple-700" aria-label="Model supports tools" role="img" > <CarbonTools class="text-xxs text-purple-700 dark:text-purple-500" /> </span> {/if} {#if model.multimodal} <span title="This model is multimodal and supports image inputs natively." class="ml-auto flex size-[21px] items-center justify-center rounded-lg border border-blue-700 dark:border-blue-500" aria-label="Model is multimodal" role="img" > <CarbonImage class="text-xxs text-blue-700 dark:text-blue-500" /> </span> {/if} {#if model.reasoning} <span title="This model supports reasoning." class="ml-auto grid size-[21px] place-items-center rounded-lg border border-purple-300 dark:border-purple-700" aria-label="Model supports reasoning" role="img" > <svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" viewBox="0 0 32 32" > <path class="stroke-purple-700" style="stroke-width: 2; fill: none; stroke-linecap: round; stroke-linejoin: round; stroke-dasharray: 50;" d="M16 6v3.33M16 6c0-2.65 3.25-4.3 5.4-2.62 1.2.95 1.6 2.65.95 4.04a3.63 3.63 0 0 1 4.61.16 3.45 3.45 0 0 1 .46 4.37 5.32 5.32 0 0 1 1.87 4.75c-.22 1.66-1.39 3.6-3.07 4.14M16 6c0-2.65-3.25-4.3-5.4-2.62a3.37 3.37 0 0 0-.95 4.04 3.65 3.65 0 0 0-4.6.16 3.37 3.37 0 0 0-.49 4.27 5.57 5.57 0 0 0-1.85 4.85 5.3 5.3 0 0 0 3.07 4.15M16 9.33v17.34m0-17.34c0 2.18 1.82 4 4 4m6.22 7.5c.67 1.3.56 2.91-.27 4.11a4.05 4.05 0 0 1-4.62 1.5c0 1.53-1.05 2.9-2.66 2.9A2.7 2.7 0 0 1 16 26.66m10.22-5.83a4.05 4.05 0 0 0-3.55-2.17m-16.9 2.18a4.05 4.05 0 0 0 .28 4.1c1 1.44 2.92 2.09 4.59 1.5 0 1.52 1.12 2.88 2.7 2.88A2.7 2.7 0 0 0 16 26.67M5.78 20.85a4.04 4.04 0 0 1 3.55-2.18" /> </svg> </span> {/if} {#if model.id === $settings.activeModel} <span class="rounded-full border border-blue-500 bg-blue-500/5 px-2 py-0.5 text-xs text-blue-500 dark:border-blue-500 dark:bg-blue-500/10" > Active </span> {:else if index === 0} <span class="rounded-full border border-gray-300 px-2 py-0.5 text-xs text-gray-500 dark:border-gray-500 dark:text-gray-400" > Default </span> {/if} </div> </div> <span class="flex items-center gap-2 font-semibold"> {model.displayName} </span> <span class="whitespace-pre-wrap text-sm text-gray-500 dark:text-gray-400"> {model.description || "-"} </span> </div> {/each} </div> </div> </div>
chat-ui/src/routes/models/+page.svelte/0
{ "file_path": "chat-ui/src/routes/models/+page.svelte", "repo_id": "chat-ui", "token_count": 2720 }
94
import { collections } from "$lib/server/database"; import { error, type RequestHandler } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; export const GET: RequestHandler = async ({ params }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { error(404, "No assistant found"); } if (!assistant.avatar) { error(404, "No avatar found"); } const fileId = collections.bucket.find({ filename: assistant._id.toString() }); const content = await fileId.next().then(async (file) => { if (!file?._id) { error(404, "Avatar not found"); } const fileStream = collections.bucket.openDownloadStream(file?._id); const fileBuffer = await new Promise<Buffer>((resolve, reject) => { const chunks: Uint8Array[] = []; fileStream.on("data", (chunk) => chunks.push(chunk)); fileStream.on("error", reject); fileStream.on("end", () => resolve(Buffer.concat(chunks))); }); return fileBuffer; }); return new Response(content, { headers: { "Content-Type": "image/jpeg", "Content-Security-Policy": "default-src 'none'; script-src 'none'; style-src 'none'; sandbox;", }, }); };
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/avatar.jpg/+server.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/avatar.jpg/+server.ts", "repo_id": "chat-ui", "token_count": 420 }
95
@import "./highlight-js.css"; @tailwind base; @tailwind components; @tailwind utilities; @layer components { .btn { @apply inline-flex flex-shrink-0 cursor-pointer select-none items-center justify-center whitespace-nowrap outline-none transition-all focus:ring disabled:cursor-default; } .active-model { @apply border-blue-500 bg-blue-500/5 hover:bg-blue-500/10; } .file-hoverable { @apply hover:bg-gray-500/10; } .base-tool { @apply flex h-[1.6rem] items-center gap-[.2rem] whitespace-nowrap border border-transparent text-xs outline-none transition-all focus:outline-none active:outline-none dark:hover:text-gray-300 sm:hover:text-purple-600; } .active-tool { @apply rounded-full !border-purple-200 bg-purple-100 pl-1 pr-2 text-purple-600 hover:text-purple-600 dark:!border-purple-700 dark:bg-purple-600/40 dark:text-purple-200; } } @layer utilities { .scrollbar-custom { @apply scrollbar-thin scrollbar-track-transparent scrollbar-thumb-black/10 scrollbar-thumb-rounded-full scrollbar-w-1 hover:scrollbar-thumb-black/20 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20; } } .katex-display { overflow: auto hidden; }
chat-ui/src/styles/main.css/0
{ "file_path": "chat-ui/src/styles/main.css", "repo_id": "chat-ui", "token_count": 435 }
96
{ "extends": "./.svelte-kit/tsconfig.json", "compilerOptions": { "allowJs": true, "checkJs": true, "esModuleInterop": true, "forceConsistentCasingInFileNames": true, "resolveJsonModule": true, "skipLibCheck": true, "sourceMap": true, "strict": true, "target": "ES2018" }, "exclude": ["vite.config.ts"] // Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias // // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes // from the referenced tsconfig.json - TypeScript does not merge them in }
chat-ui/tsconfig.json/0
{ "file_path": "chat-ui/tsconfig.json", "repo_id": "chat-ui", "token_count": 211 }
97
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 50_000 SMALL_TEST = 5_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def read(dataset: datasets.Dataset, length): for i in range(length): _ = dataset[i] @get_duration def read_batch(dataset: datasets.Dataset, length, batch_size): for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_formatted(dataset: datasets.Dataset, length, type): with dataset.formatted_as(type=type): for i in range(length): _ = dataset[i] @get_duration def read_formatted_batch(dataset: datasets.Dataset, length, batch_size, type): with dataset.formatted_as(type=type): for i in range(0, length, batch_size): _ = dataset[i : i + batch_size] def benchmark_iterating(): times = {"num examples": SPEED_TEST_N_EXAMPLES} functions = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted, {"type": "pandas", "length": SMALL_TEST}), (read_formatted, {"type": "torch", "length": SMALL_TEST}), (read_formatted, {"type": "tensorflow", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}), ] functions_shuffled = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("generating dataset") features = datasets.Features( {"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")} ) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"list": (100,)}, ) print("first set of iterations") for func, kwargs in functions: print(func.__name__, str(kwargs)) times[func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(dataset, **kwargs) print("shuffling dataset") dataset = dataset.shuffle() print("Second set of iterations (after shuffling") for func, kwargs in functions_shuffled: print("shuffled ", func.__name__, str(kwargs)) times["shuffled " + func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func( dataset, **kwargs ) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
datasets/benchmarks/benchmark_iterating.py/0
{ "file_path": "datasets/benchmarks/benchmark_iterating.py", "repo_id": "datasets", "token_count": 1697 }
98
# Dataset features [`Features`] defines the internal structure of a dataset. It is used to specify the underlying serialization format. What's more interesting to you though is that [`Features`] contains high-level information about everything from the column names and types, to the [`ClassLabel`]. You can think of [`Features`] as the backbone of a dataset. The [`Features`] format is simple: `dict[column_name, column_type]`. It is a dictionary of column name and column type pairs. The column type provides a wide range of options for describing the type of data you have. Let's have a look at the features of the MRPC dataset from the GLUE benchmark: ```py >>> from datasets import load_dataset >>> dataset = load_dataset('nyu-mll/glue', 'mrpc', split='train') >>> dataset.features {'idx': Value('int32'), 'label': ClassLabel(names=['not_equivalent', 'equivalent']), 'sentence1': Value('string'), 'sentence2': Value('string'), } ``` The [`Value`] feature tells 🤗 Datasets: - The `idx` data type is `int32`. - The `sentence1` and `sentence2` data types are `string`. 🤗 Datasets supports many other data types such as `bool`, `float32` and `binary` to name just a few. <Tip> Refer to [`Value`] for a full list of supported data types. </Tip> The [`ClassLabel`] feature informs 🤗 Datasets the `label` column contains two classes. The classes are labeled `not_equivalent` and `equivalent`. Labels are stored as integers in the dataset. When you retrieve the labels, [`ClassLabel.int2str`] and [`ClassLabel.str2int`] carries out the conversion from integer value to label name, and vice versa. If your data type contains a list of objects, then you want to use the [`List`] feature. Remember the SQuAD dataset? ```py >>> from datasets import load_dataset >>> dataset = load_dataset('rajpurkar/squad', split='train') >>> dataset.features {'id': Value('string'), 'title': Value('string'), 'context': Value('string'), 'question': Value('string'), 'answers': {'text': List(Value('string')), 'answer_start': List(Value('int32'))}} ``` The `answers` field is constructed using the dict of features because and contains two subfields, `text` and `answer_start`, which are lists of `string` and `int32`, respectively. <Tip> See the [flatten](./process#flatten) section to learn how you can extract the nested subfields as their own independent columns. </Tip> The array feature type is useful for creating arrays of various sizes. You can create arrays with two dimensions using [`Array2D`], and even arrays with five dimensions using [`Array5D`]. ```py >>> features = Features({'a': Array2D(shape=(1, 3), dtype='int32')}) ``` The array type also allows the first dimension of the array to be dynamic. This is useful for handling sequences with variable lengths such as sentences, without having to pad or truncate the input to a uniform shape. ```py >>> features = Features({'a': Array3D(shape=(None, 5, 2), dtype='int32')}) ``` ## Audio feature Audio datasets have a column with type [`Audio`], which contains three important fields: - `array`: the decoded audio data represented as a 1-dimensional array. - `path`: the path to the downloaded audio file. - `sampling_rate`: the sampling rate of the audio data. When you load an audio dataset and call the audio column, the [`Audio`] feature automatically decodes and resamples the audio file: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset[0]["audio"] <datasets.features._torchcodec.AudioDecoder object at 0x11642b6a0> ``` <Tip warning={true}> Index into an audio dataset using the row index first and then the `audio` column - `dataset[0]["audio"]` - to avoid decoding and resampling all the audio files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> With `decode=False`, the [`Audio`] type simply gives you the path or the bytes of the audio file, without decoding it into an torchcodec `AudioDecoder` object, ```py >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train").cast_column("audio", Audio(decode=False)) >>> dataset[0] {'audio': {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav'}, 'english_transcription': 'I would like to set up a joint account with my partner', 'intent_class': 11, 'lang_id': 4, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'transcription': 'I would like to set up a joint account with my partner'} ``` ## Image feature Image datasets have a column with type [`Image`], which loads `PIL.Image` objects from images stored as bytes: When you load an image dataset and call the image column, the [`Image`] feature automatically decodes the image file: ```py >>> from datasets import load_dataset, Image >>> dataset = load_dataset("AI-Lab-Makerere/beans", split="train") >>> dataset[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x125506CF8> ``` <Tip warning={true}> Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding all the image files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> With `decode=False`, the [`Image`] type simply gives you the path or the bytes of the image file, without decoding it into an `PIL.Image`, ```py >>> dataset = load_dataset("AI-Lab-Makerere/beans", split="train").cast_column("image", Image(decode=False)) >>> dataset[0]["image"] {'bytes': None, 'path': '/Users/username/.cache/huggingface/datasets/downloads/extracted/772e7c1fba622cff102b85dd74bcce46e8168634df4eaade7bedd3b8d91d3cd7/train/healthy/healthy_train.265.jpg'} ``` Depending on the dataset, you may get the path to the local downloaded image, or the content of the image as bytes if the dataset is not made of individual files. You can also define a dataset of images from numpy arrays: ```python >>> ds = Dataset.from_dict({"i": [np.zeros(shape=(16, 16, 3), dtype=np.uint8)]}, features=Features({"i": Image()})) ``` And in this case the numpy arrays are encoded into PNG (or TIFF if the pixels values precision is important). For multi-channels arrays like RGB or RGBA, only uint8 is supported. If you use a larger precision, you get a warning and the array is downcasted to uint8. For gray-scale images you can use the integer or float precision you want as long as it is compatible with `Pillow`. A warning is shown if your image integer or float precision is too high, and in this case the array is downcated: an int64 array is downcasted to int32, and a float64 array is downcasted to float32.
datasets/docs/source/about_dataset_features.mdx/0
{ "file_path": "datasets/docs/source/about_dataset_features.mdx", "repo_id": "datasets", "token_count": 2130 }
99
# Cloud storage ## Hugging Face Datasets The Hugging Face Dataset Hub is home to a growing collection of datasets that span a variety of domains and tasks. It's more than a cloud storage: the Dataset Hub is a platform that provides data versioning thanks to git, as well as a Dataset Viewer to explore the data, making it a great place to store AI-ready datasets. This guide shows how to import data from other cloud storage using the filesystems implementations from `fsspec`. ## Import data from a cloud storage Most cloud storage providers have a `fsspec` FileSystem implementation, which is useful to import data from any cloud provider with the same code. This is especially useful to publish datasets on Hugging Face. Take a look at the following table for some example of supported cloud storage providers: | Storage provider | Filesystem implementation | |----------------------|---------------------------------------------------------------| | Amazon S3 | [s3fs](https://s3fs.readthedocs.io/en/latest/) | | Google Cloud Storage | [gcsfs](https://gcsfs.readthedocs.io/en/latest/) | | Azure Blob/DataLake | [adlfs](https://github.com/fsspec/adlfs) | | Oracle Cloud Storage | [ocifs](https://ocifs.readthedocs.io/en/latest/) | This guide will show you how to import data files from any cloud storage and save a dataset on Hugging Face. Let's say we want to publish a dataset on Hugging Face from Parquet files from a cloud storage. First, instantiate your cloud storage filesystem and list the files you'd like to import: ```python >>> import fsspec >>> fs = fsspec.filesystem("...") # s3 / gcs / abfs / adl / oci / ... >>> data_dir = "path/to/my/data/" >>> pattern = "*.parquet" >>> data_files = fs.glob(data_dir + pattern) ["path/to/my/data/0001.parquet", "path/to/my/data/0001.parquet", ...] ``` Then you can create a dataset on Hugging Face and import the data files, using for example: ```python >>> from huggingface_hub import create_repo, upload_file >>> from tqdm.auto import tqdm >>> destination_dataset = "username/my-dataset" >>> create_repo(destination_dataset, repo_type="dataset") >>> for data_file in tqdm(fs.glob(data_dir + pattern)): ... with fs.open(data_file) as fileobj: ... path_in_repo = data_file[len(data_dir):] ... upload_file( ... path_or_fileobj=fileobj, ... path_in_repo=path_in_repo, ... repo_id=destination_dataset, ... repo_type="dataset", ... ) ``` Check out the [huggingface_hub](https://huggingface.co/docs/huggingface_hub) documentation on files uploads [here](https://huggingface.co/docs/huggingface_hub/en/guides/upload) if you're looking for more upload options. Finally you can now load the dataset using 🤗 Datasets: ```python >>> from datasets import load_dataset >>> ds = load_dataset("username/my-dataset") ```
datasets/docs/source/filesystems.mdx/0
{ "file_path": "datasets/docs/source/filesystems.mdx", "repo_id": "datasets", "token_count": 1044 }
100
# Loading methods Methods for listing and loading datasets: ## Datasets [[autodoc]] datasets.load_dataset [[autodoc]] datasets.load_from_disk [[autodoc]] datasets.load_dataset_builder [[autodoc]] datasets.get_dataset_config_names [[autodoc]] datasets.get_dataset_infos [[autodoc]] datasets.get_dataset_split_names ## From files Configurations used to load data files. They are used when loading local files or a dataset repository: - local files: `load_dataset("parquet", data_dir="path/to/data/dir")` - dataset repository: `load_dataset("allenai/c4")` You can pass arguments to `load_dataset` to configure data loading. For example you can specify the `sep` parameter to define the [`~datasets.packaged_modules.csv.CsvConfig`] that is used to load the data: ```python load_dataset("csv", data_dir="path/to/data/dir", sep="\t") ``` ### Text [[autodoc]] datasets.packaged_modules.text.TextConfig [[autodoc]] datasets.packaged_modules.text.Text ### CSV [[autodoc]] datasets.packaged_modules.csv.CsvConfig [[autodoc]] datasets.packaged_modules.csv.Csv ### JSON [[autodoc]] datasets.packaged_modules.json.JsonConfig [[autodoc]] datasets.packaged_modules.json.Json ### XML [[autodoc]] datasets.packaged_modules.xml.XmlConfig [[autodoc]] datasets.packaged_modules.xml.Xml ### Parquet [[autodoc]] datasets.packaged_modules.parquet.ParquetConfig [[autodoc]] datasets.packaged_modules.parquet.Parquet ### Arrow [[autodoc]] datasets.packaged_modules.arrow.ArrowConfig [[autodoc]] datasets.packaged_modules.arrow.Arrow ### SQL [[autodoc]] datasets.packaged_modules.sql.SqlConfig [[autodoc]] datasets.packaged_modules.sql.Sql ### Images [[autodoc]] datasets.packaged_modules.imagefolder.ImageFolderConfig [[autodoc]] datasets.packaged_modules.imagefolder.ImageFolder ### Audio [[autodoc]] datasets.packaged_modules.audiofolder.AudioFolderConfig [[autodoc]] datasets.packaged_modules.audiofolder.AudioFolder ### Videos [[autodoc]] datasets.packaged_modules.videofolder.VideoFolderConfig [[autodoc]] datasets.packaged_modules.videofolder.VideoFolder ### Pdf [[autodoc]] datasets.packaged_modules.pdffolder.PdfFolderConfig [[autodoc]] datasets.packaged_modules.pdffolder.PdfFolder ### WebDataset [[autodoc]] datasets.packaged_modules.webdataset.WebDataset
datasets/docs/source/package_reference/loading_methods.mdx/0
{ "file_path": "datasets/docs/source/package_reference/loading_methods.mdx", "repo_id": "datasets", "token_count": 783 }
101
# Use with NumPy This document is a quick introduction to using `datasets` with NumPy, with a particular focus on how to get `numpy.ndarray` objects out of our datasets, and how to use them to train models based on NumPy such as `scikit-learn` models. ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc.. To get NumPy arrays instead, you can set the format of the dataset to `numpy`: ```py >>> from datasets import Dataset >>> data = [[1, 2], [3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("numpy") >>> ds[0] {'data': array([1, 2])} >>> ds[:2] {'data': array([ [1, 2], [3, 4]])} ``` <Tip> A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to NumPy arrays. </Tip> Note that the exact same procedure applies to `DatasetDict` objects, so that when setting the format of a `DatasetDict` to `numpy`, all the `Dataset`s there will be formatted as `numpy`: ```py >>> from datasets import DatasetDict >>> data = {"train": {"data": [[1, 2], [3, 4]]}, "test": {"data": [[5, 6], [7, 8]]}} >>> dds = DatasetDict.from_dict(data) >>> dds = dds.with_format("numpy") >>> dds["train"][:2] {'data': array([ [1, 2], [3, 4]])} ``` ### N-dimensional arrays If your dataset consists of N-dimensional arrays, you will see that by default they are considered as the same array if the shape is fixed: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3, 4]], [[5, 6],[7, 8]]] # fixed shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("numpy") >>> ds[0] {'data': array([[1, 2], [3, 4]])} ``` ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3]], [[4, 5, 6],[7, 8]]] # varying shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("numpy") >>> ds[0] {'data': array([array([1, 2]), array([3])], dtype=object)} ``` However this logic often requires slow shape comparisons and data copies. To avoid this, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: ```py >>> from datasets import Dataset, Features, Array2D >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] >>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) >>> ds = Dataset.from_dict({"data": data}, features=features) >>> ds = ds.with_format("numpy") >>> ds[0] {'data': array([[1, 2], [3, 4]])} >>> ds[:2] {'data': array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])} ``` ### Other feature types [`ClassLabel`] data is properly converted to arrays: ```py >>> from datasets import Dataset, Features, ClassLabel >>> labels = [0, 0, 1] >>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) >>> ds = Dataset.from_dict({"label": labels}, features=features) >>> ds = ds.with_format("numpy") >>> ds[:3] {'label': array([0, 0, 1])} ``` String and binary objects are unchanged, since NumPy only supports numbers. The [`Image`] and [`Audio`] feature types are also supported. <Tip> To use the [`Image`] feature type, you'll need to install the `vision` extra as `pip install datasets[vision]`. </Tip> ```py >>> from datasets import Dataset, Features, Image >>> images = ["path/to/image.png"] * 10 >>> features = Features({"image": Image()}) >>> ds = Dataset.from_dict({"image": images}, features=features) >>> ds = ds.with_format("numpy") >>> ds[0]["image"].shape (512, 512, 3) >>> ds[0] {'image': array([[[ 255, 255, 255], [ 255, 255, 255], ..., [ 255, 255, 255], [ 255, 255, 255]]], dtype=uint8)} >>> ds[:2]["image"].shape (2, 512, 512, 3) >>> ds[:2] {'image': array([[[[ 255, 255, 255], [ 255, 255, 255], ..., [ 255, 255, 255], [ 255, 255, 255]]]], dtype=uint8)} ``` <Tip> To use the [`Audio`] feature type, you'll need to install the `audio` extra as `pip install datasets[audio]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio >>> audio = ["path/to/audio.wav"] * 10 >>> features = Features({"audio": Audio()}) >>> ds = Dataset.from_dict({"audio": audio}, features=features) >>> ds = ds.with_format("numpy") >>> ds[0]["audio"]["array"] array([-0.059021 , -0.03894043, -0.00735474, ..., 0.0133667 , 0.01809692, 0.00268555], dtype=float32) >>> ds[0]["audio"]["sampling_rate"] array(44100, weak_type=True) ``` ## Data loading NumPy doesn't have any built-in data loading capabilities, so you'll either need to materialize the NumPy arrays like `X, y` to use in `scikit-learn` or use a library such as [PyTorch](https://pytorch.org/) to load your data using a `DataLoader`. ### Using `with_format('numpy')` The easiest way to get NumPy arrays out of a dataset is to use the `with_format('numpy')` method. Lets assume that we want to train a neural network on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) available at the HuggingFace Hub at https://huggingface.co/datasets/mnist. ```py >>> from datasets import load_dataset >>> ds = load_dataset("mnist") >>> ds = ds.with_format("numpy") >>> ds["train"][0] {'image': array([[ 0, 0, 0, ...], [ 0, 0, 0, ...], ..., [ 0, 0, 0, ...], [ 0, 0, 0, ...]], dtype=uint8), 'label': array(5)} ``` Once the format is set we can feed the dataset to the model based on NumPy in batches using the `Dataset.iter()` method: ```py >>> for epoch in range(epochs): ... for batch in ds["train"].iter(batch_size=32): ... x, y = batch["image"], batch["label"] ... ... ```
datasets/docs/source/use_with_numpy.mdx/0
{ "file_path": "datasets/docs/source/use_with_numpy.mdx", "repo_id": "datasets", "token_count": 2282 }
102
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """DatasetBuilder base class.""" import abc import contextlib import copy import inspect import os import posixpath import shutil import textwrap import time import urllib from collections.abc import Iterable, Mapping from dataclasses import dataclass from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Optional, Union from unittest.mock import patch import fsspec from fsspec.core import url_to_fs from multiprocess import Pool from tqdm.contrib.concurrent import thread_map from . import config, utils from .arrow_dataset import Dataset from .arrow_reader import ( ArrowReader, ReadInstruction, ) from .arrow_writer import ArrowWriter, ParquetWriter, SchemaInferenceError from .data_files import DataFilesDict, DataFilesPatternsDict, sanitize_patterns from .dataset_dict import DatasetDict, IterableDatasetDict from .download.download_config import DownloadConfig from .download.download_manager import DownloadManager, DownloadMode from .download.streaming_download_manager import StreamingDownloadManager, xjoin from .exceptions import DatasetGenerationCastError, DatasetGenerationError, FileFormatError, ManualDownloadError from .features import Features from .filesystems import ( is_remote_filesystem, rename, ) from .fingerprint import Hasher from .info import DatasetInfo, PostProcessedInfo from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset from .keyhash import DuplicatedKeysError from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase from .splits import Split, SplitDict, SplitGenerator, SplitInfo from .streaming import extend_dataset_builder_for_streaming from .table import CastError from .utils import logging from .utils import tqdm as hf_tqdm from .utils._filelock import FileLock from .utils.file_utils import is_remote_url from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits from .utils.py_utils import ( classproperty, convert_file_size_to_int, has_sufficient_disk_space, iflatmap_unordered, map_nested, memoize, size_str, temporary_assignment, ) from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs from .utils.track import tracked_list if TYPE_CHECKING: from .load import DatasetModule logger = logging.get_logger(__name__) class InvalidConfigName(ValueError): pass @dataclass class BuilderConfig: """Base class for `DatasetBuilder` data configuration. `DatasetBuilder` subclasses with data configuration options should subclass `BuilderConfig` and add their own properties. Attributes: name (`str`, defaults to `default`): The name of the configuration. version (`Version` or `str`, defaults to `0.0.0`): The version of the configuration. data_dir (`str`, *optional*): Path to the directory containing the source data. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). description (`str`, *optional*): A human description of the configuration. """ name: str = "default" version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0") data_dir: Optional[str] = None data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None description: Optional[str] = None def __post_init__(self): # The config name is used to name the cache directory. for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH: if invalid_char in self.name: raise InvalidConfigName( f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. " f"They could create issues when creating a directory for this config on Windows filesystem." ) if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)): raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}") def __eq__(self, o): # we need to override the default dataclass __eq__ since it doesn't check for # other attributes that the ones of the signature. if set(self.__dict__.keys()) != set(o.__dict__.keys()): return False return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys()) def create_config_id( self, config_kwargs: dict, custom_features: Optional[Features] = None, ) -> str: """ The config id is used to build the cache directory. By default it is equal to the config name. However the name of a config is not sufficient to have a unique identifier for the dataset being generated since it doesn't take into account: - the config kwargs that can be used to overwrite attributes - the custom features used to write the dataset - the data_files for json/text/csv/pandas datasets Therefore the config id is just the config name with an optional suffix based on these. """ # Possibly add a suffix to the name to handle custom features/data_files/config_kwargs suffix: Optional[str] = None config_kwargs_to_add_to_suffix = config_kwargs.copy() # name and version are already used to build the cache directory config_kwargs_to_add_to_suffix.pop("name", None) config_kwargs_to_add_to_suffix.pop("version", None) # data dir handling (when specified it points to the manually downloaded data): # it was previously ignored before the introduction of config id because we didn't want # to change the config name. Now it's fine to take it into account for the config id. # config_kwargs_to_add_to_suffix.pop("data_dir", None) if "data_dir" in config_kwargs_to_add_to_suffix: if config_kwargs_to_add_to_suffix["data_dir"] is None: config_kwargs_to_add_to_suffix.pop("data_dir", None) else: # canonicalize the data dir to avoid two paths to the same location having different # hashes data_dir = config_kwargs_to_add_to_suffix["data_dir"] data_dir = os.path.normpath(data_dir) config_kwargs_to_add_to_suffix["data_dir"] = data_dir if config_kwargs_to_add_to_suffix: # we don't care about the order of the kwargs config_kwargs_to_add_to_suffix = { k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix) } if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()): suffix = ",".join( str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items() ) if len(suffix) > 32: # hash if too long suffix = Hasher.hash(config_kwargs_to_add_to_suffix) else: suffix = Hasher.hash(config_kwargs_to_add_to_suffix) if custom_features is not None: m = Hasher() if suffix: m.update(suffix) m.update(custom_features) suffix = m.hexdigest() if suffix: config_id = self.name + "-" + suffix if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH: config_id = self.name + "-" + Hasher.hash(suffix) return config_id else: return self.name def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None: if isinstance(self.data_files, DataFilesPatternsDict): base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path self.data_files = self.data_files.resolve(base_path, download_config) class DatasetBuilder: """Abstract base class for all datasets. `DatasetBuilder` has 3 key methods: - [`DatasetBuilder.info`]: Documents the dataset, including feature names, types, shapes, version, splits, citation, etc. - [`DatasetBuilder.download_and_prepare`]: Downloads the source data and writes it to disk. - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`]. Some `DatasetBuilder`s expose multiple variants of the dataset by defining a [`BuilderConfig`] subclass and accepting a config object (or name) on construction. Configurable datasets expose a pre-defined set of configurations in [`DatasetBuilder.builder_configs`]. Args: cache_dir (`str`, *optional*): Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`. dataset_name (`str`, *optional*): Name of the dataset, if different from the builder name. Useful for packaged builders like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets that use the same packaged builder. config_name (`str`, *optional*): Name of the dataset configuration. It affects the data generated on disk. Different configurations will have their own subdirectories and versions. If not provided, the default configuration is used (if it exists). <Added version="2.3.0"> Parameter `name` was renamed to `config_name`. </Added> hash (`str`, *optional*): Hash specific to the dataset builder code. Used to update the caching directory when the dataset builder code is updated (to avoid reusing old data). The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`. base_path (`str`, *optional*): Base path for relative paths that are used to download files. This can be a remote URL. features ([`Features`], *optional*): Features types to use with this dataset. It can be used to change the [`Features`] types of a dataset, for example. token (`str` or `bool`, *optional*): String or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, will get token from `"~/.huggingface"`. repo_id (`str`, *optional*): ID of the dataset repository. Used to distinguish builders with the same name but not coming from the same namespace, for example "rajpurkar/squad" and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad". data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). For builders like "csv" or "json" that need the user to specify data files. They can be either local or remote files. For convenience, you can use a `DataFilesDict`. data_dir (`str`, *optional*): Path to directory containing source data file(s). Use only if `data_files` is not passed, in which case it is equivalent to passing `os.path.join(data_dir, "**")` as `data_files`. For builders that require manual download, it must be the path to the local directory containing the manually downloaded data. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the dataset file-system backend, if any. writer_batch_size (`int`, *optional*): Batch size used by the ArrowWriter. It defines the number of samples that are kept in memory before writing them and also the length of the arrow chunks. None means that the ArrowWriter will use its default value. **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder configuration class is [`BuilderConfig`] or a subclass of it. """ # Default version VERSION = None # Default version set in BuilderConfig # Class for the builder config. BUILDER_CONFIG_CLASS = BuilderConfig # Named configurations that modify the data generated by download_and_prepare. BUILDER_CONFIGS = [] # Optional default config name to be used when name is None DEFAULT_CONFIG_NAME = None # Default batch size used by the ArrowWriter # It defines the number of samples that are kept in memory before writing them # and also the length of the arrow chunks # None means that the ArrowWriter will use its default value DEFAULT_WRITER_BATCH_SIZE = None def __init__( self, cache_dir: Optional[str] = None, dataset_name: Optional[str] = None, config_name: Optional[str] = None, hash: Optional[str] = None, base_path: Optional[str] = None, info: Optional[DatasetInfo] = None, features: Optional[Features] = None, token: Optional[Union[bool, str]] = None, repo_id: Optional[str] = None, data_files: Optional[Union[str, list, dict, DataFilesDict]] = None, data_dir: Optional[str] = None, storage_options: Optional[dict] = None, writer_batch_size: Optional[int] = None, **config_kwargs, ): # DatasetBuilder name self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1]) self.hash: Optional[str] = hash self.base_path = base_path self.token = token self.repo_id = repo_id self.storage_options = storage_options or {} self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE if data_files is not None and not isinstance(data_files, DataFilesDict): data_files = DataFilesDict.from_patterns( sanitize_patterns(data_files), base_path=base_path, download_config=DownloadConfig(token=token, storage_options=self.storage_options), ) # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None: config_kwargs["features"] = features if data_files is not None: config_kwargs["data_files"] = data_files if data_dir is not None: config_kwargs["data_dir"] = data_dir self.config_kwargs = config_kwargs self.config, self.config_id = self._create_builder_config( config_name=config_name, custom_features=features, **config_kwargs, ) # prepare info: DatasetInfo are a standardized dataclass across all datasets # Prefill datasetinfo if info is None: info = self._info() info.builder_name = self.name info.dataset_name = self.dataset_name info.config_name = self.config.name info.version = self.config.version self.info = info # update info with user specified infos if features is not None: self.info.features = features # Prepare data dirs: # cache_dir can be a remote bucket on GCS or S3 self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE) self._cache_dir_root = ( self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root) ) self._cache_downloaded_dir = ( posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR) if cache_dir else str(config.DOWNLOADED_DATASETS_PATH) ) self._cache_downloaded_dir = ( self._cache_downloaded_dir if is_remote_url(self._cache_downloaded_dir) else os.path.expanduser(self._cache_downloaded_dir) ) # In case there exists a legacy cache directory self._legacy_relative_data_dir = None self._cache_dir = self._build_cache_dir() if not is_remote_url(self._cache_dir_root): os.makedirs(self._cache_dir_root, exist_ok=True) lock_path = os.path.join( self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock" ) with FileLock(lock_path): if os.path.exists(self._cache_dir): # check if data exist if len(os.listdir(self._cache_dir)) > 0: if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)): logger.debug("Overwrite dataset info from restored data version if exists.") self.info = DatasetInfo.from_directory(self._cache_dir) else: # dir exists but no data, remove the empty dir as data aren't available anymore logger.warning( f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. " ) os.rmdir(self._cache_dir) # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare self._output_dir = self._cache_dir self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file") # Set download manager self.dl_manager = None # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value. self._record_infos = False # Set in `.download_and_prepare` once the format of the generated dataset is known self._file_format = None # Enable streaming (e.g. it patches "open" to work with remote files) extend_dataset_builder_for_streaming(self) def __getstate__(self): return self.__dict__ def __setstate__(self, d): self.__dict__ = d # Re-enable streaming, since patched functions are not kept when pickling extend_dataset_builder_for_streaming(self) # Must be set for datasets that use 'data_dir' functionality - the ones # that require users to do additional steps to download the data # (this is usually due to some external regulations / rules). # This field should contain a string with user instructions, including # the list of files that should be present. It will be # displayed in the dataset documentation. @property def manual_download_instructions(self) -> Optional[str]: return None def _check_legacy_cache(self) -> Optional[str]: """Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13""" if ( self.__module__.startswith("datasets.") and not is_remote_url(self._cache_dir_root) and self.config.name == "default" ): from .packaged_modules import _PACKAGED_DATASETS_MODULES namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name config_id = config_name + self.config_id[len(self.config.name) :] hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1] legacy_relative_data_dir = posixpath.join( self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}", config_id, "0.0.0", hash, ) legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir) if os.path.isdir(legacy_cache_dir): return legacy_relative_data_dir def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]: """Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15""" if ( self.__module__.startswith("datasets.") and not is_remote_url(self._cache_dir_root) and not (set(self.config_kwargs) - {"data_files", "data_dir"}) ): from .packaged_modules import _PACKAGED_DATASETS_MODULES_2_15_HASHES from .utils._dill import Pickler def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str: """ Used to update hash of packaged modules which is used for creating unique cache directories to reflect different config parameters which are passed in metadata from readme. """ params_to_exclude = {"config_name", "version", "description"} params_to_add_to_hash = { param: value for param, value in sorted(config_parameters.items()) if param not in params_to_exclude } m = Hasher() m.update(hash) m.update(params_to_add_to_hash) return m.hexdigest() namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True): config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files}) hash = _PACKAGED_DATASETS_MODULES_2_15_HASHES.get(self.name, "missing") if ( dataset_module.builder_configs_parameters.metadata_configs and self.config.name in dataset_module.builder_configs_parameters.metadata_configs ): hash = update_hash_with_config_parameters( hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name] ) legacy_relative_data_dir = posixpath.join( self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}", config_id, "0.0.0", hash, ) legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir) if os.path.isdir(legacy_cache_dir): return legacy_relative_data_dir def _create_builder_config( self, config_name=None, custom_features=None, **config_kwargs ) -> tuple[BuilderConfig, str]: """Create and validate BuilderConfig object as well as a unique config id for this config. Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None. config_kwargs override the defaults kwargs in config """ builder_config = None # try default config if config_name is None and self.BUILDER_CONFIGS: if self.DEFAULT_CONFIG_NAME is not None: builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME) logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}") else: if len(self.BUILDER_CONFIGS) > 1: if not config_kwargs: example_of_usage = ( f"load_dataset('{self.repo_id or self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')" ) raise ValueError( "Config name is missing." f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}" + f"\nExample of usage:\n\t`{example_of_usage}`" ) else: builder_config = self.BUILDER_CONFIGS[0] logger.info( f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}" ) # try to get config by name if isinstance(config_name, str): builder_config = self.builder_configs.get(config_name) if builder_config is None and self.BUILDER_CONFIGS: raise ValueError( f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}" ) # if not using an existing config, then create a new config on the fly if not builder_config: if config_name is not None: config_kwargs["name"] = config_name elif self.DEFAULT_CONFIG_NAME and not config_kwargs: # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed config_kwargs["name"] = self.DEFAULT_CONFIG_NAME if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION: config_kwargs["version"] = self.VERSION builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs) # otherwise use the config_kwargs to overwrite the attributes else: builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config for key, value in config_kwargs.items(): if value is not None: if not hasattr(builder_config, key): raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.") setattr(builder_config, key, value) if not builder_config.name: raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}") # resolve data files if needed builder_config._resolve_data_files( base_path=self.base_path, download_config=DownloadConfig(token=self.token, storage_options=self.storage_options), ) # compute the config id that is going to be used for caching config_id = builder_config.create_config_id( config_kwargs, custom_features=custom_features, ) is_custom = (config_id not in self.builder_configs) and config_id != "default" if is_custom: logger.info(f"Using custom data configuration {config_id}") else: if ( builder_config.name in self.builder_configs and builder_config != self.builder_configs[builder_config.name] ): raise ValueError( "Cannot name a custom BuilderConfig the same as an available " f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}" ) if not builder_config.version: raise ValueError(f"BuilderConfig {builder_config.name} must have a version") return builder_config, config_id @classproperty @classmethod @memoize() def builder_configs(cls) -> dict[str, BuilderConfig]: """Dictionary of pre-defined configurations for this builder class.""" configs = {config.name: config for config in cls.BUILDER_CONFIGS} if len(configs) != len(cls.BUILDER_CONFIGS): names = [config.name for config in cls.BUILDER_CONFIGS] raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}") return configs @property def cache_dir(self): return self._cache_dir def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"): # Check for the legacy cache directory template (datasets<3.0.0) self._legacy_relative_data_dir = ( self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None ) self._cache_dir = self._build_cache_dir() self._output_dir = self._cache_dir def _relative_data_dir(self, with_version=True, with_hash=True) -> str: """Relative path of this dataset in cache_dir: Will be: self.dataset_name/self.config.version/self.hash/ or if a repo_id with a namespace has been specified: self.namespace___self.dataset_name/self.config.version/self.hash/ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped. """ if self._legacy_relative_data_dir is not None and with_version and with_hash: return self._legacy_relative_data_dir namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}" builder_data_dir = posixpath.join(builder_data_dir, self.config_id) if with_version: builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version)) if with_hash and self.hash and isinstance(self.hash, str): builder_data_dir = posixpath.join(builder_data_dir, self.hash) return builder_data_dir def _build_cache_dir(self): """Return the data directory for the current version.""" builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False)) version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True)) def _other_versions_on_disk(): """Returns previous versions on disk.""" if not os.path.exists(builder_data_dir): return [] version_dirnames = [] for dir_name in os.listdir(builder_data_dir): try: version_dirnames.append((utils.Version(dir_name), dir_name)) except ValueError: # Invalid version (ex: incomplete data dir) pass version_dirnames.sort(reverse=True) return version_dirnames # Check and warn if other versions exist if not is_remote_url(builder_data_dir): version_dirs = _other_versions_on_disk() if version_dirs: other_version = version_dirs[0][0] if other_version != self.config.version: warn_msg = ( f"Found a different version {str(other_version)} of dataset {self.dataset_name} in " f"cache_dir {self._cache_dir_root}. Using currently defined version " f"{str(self.config.version)}." ) logger.warning(warn_msg) return version_data_dir @abc.abstractmethod def _info(self) -> DatasetInfo: """Construct the DatasetInfo object. See `DatasetInfo` for details. Warning: This function is only called once and the result is cached for all following .info() calls. Returns: info: (DatasetInfo) The dataset information """ raise NotImplementedError @classmethod def get_imported_module_dir(cls): """Return the path of the module of this class or subclass.""" return os.path.dirname(inspect.getfile(inspect.getmodule(cls))) def _rename(self, src: str, dst: str): rename(self._fs, src, dst) def download_and_prepare( self, output_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, verification_mode: Optional[Union[VerificationMode, str]] = None, dl_manager: Optional[DownloadManager] = None, base_path: Optional[str] = None, file_format: str = "arrow", max_shard_size: Optional[Union[int, str]] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **download_and_prepare_kwargs, ): """Downloads and prepares dataset for reading. Args: output_dir (`str`, *optional*): Output directory for the dataset. Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default. <Added version="2.5.0"/> download_config (`DownloadConfig`, *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, *optional*): Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`. verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). <Added version="2.9.1"/> dl_manager (`DownloadManager`, *optional*): Specific `DownloadManger` to use. base_path (`str`, *optional*): Base path for relative paths that are used to download files. This can be a remote url. If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead. file_format (`str`, *optional*): Format of the data files in which the dataset will be written. Supported formats: "arrow", "parquet". Default to "arrow" format. If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files. <Added version="2.5.0"/> max_shard_size (`Union[str, int]`, *optional*): Maximum number of bytes written per shard, default is "500MB". The size is based on uncompressed data size, so in practice your shard files may be smaller than `max_shard_size` thanks to Parquet compression for example. <Added version="2.5.0"/> num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.7.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the caching file-system backend, if any. <Added version="2.5.0"/> **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments. Example: Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`: ```py >>> from datasets import load_dataset_builder >>> builder = load_dataset_builder("cornell-movie-review-data/rotten_tomatoes") >>> builder.download_and_prepare() ``` Download and prepare the dataset as sharded Parquet files locally: ```py >>> from datasets import load_dataset_builder >>> builder = load_dataset_builder("cornell-movie-review-data/rotten_tomatoes") >>> builder.download_and_prepare("./output_dir", file_format="parquet") ``` Download and prepare the dataset as sharded Parquet files in a cloud storage: ```py >>> from datasets import load_dataset_builder >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} >>> builder = load_dataset_builder("cornell-movie-review-data/rotten_tomatoes") >>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet") ``` """ output_dir = output_dir if output_dir is not None else self._cache_dir # output_dir can be a remote bucket on GCS or S3 fs, output_dir = url_to_fs(output_dir, **(storage_options or {})) self._fs = fs self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir) download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS) base_path = base_path if base_path is not None else self.base_path if file_format is not None and file_format not in ["arrow", "parquet"]: raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'") self._file_format = file_format if self._fs._strip_protocol(self._output_dir) == "": # We don't support the root directory, because it has no dirname, # and we need a dirname to use a <dirname>.incomplete directory # when the dataset is being written raise RuntimeError( f"Unable to download and prepare the dataset at the root {self._output_dir}. " f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'" ) if dl_manager is None: if download_config is None: download_config = DownloadConfig( cache_dir=self._cache_downloaded_dir, force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD, force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD, use_etag=False, num_proc=num_proc, token=self.token, storage_options=self.storage_options, ) # We don't use etag for data files to speed up the process dl_manager = DownloadManager( dataset_name=self.dataset_name, download_config=download_config, data_dir=self.config.data_dir, base_path=base_path, record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS), ) is_local = not is_remote_filesystem(self._fs) self.dl_manager = dl_manager # Prevent parallel local disk operations if is_local: # Create parent directory of the output_dir to put the lock file in there Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True) lock_path = self._output_dir + "_builder.lock" # File locking only with local paths; no file locking on GCS or S3 with FileLock(lock_path) if is_local else contextlib.nullcontext(): # Check if the data already exists data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME)) if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS: logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})") # We need to update the info in case some splits were added in the meantime # for example when calling load_dataset from multiple workers. self.info = self._load_info() self.download_post_processing_resources(dl_manager) return logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})") if is_local: # if cache dir is local, check for available space if not has_sufficient_disk_space( self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent ): raise OSError( f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})" ) @contextlib.contextmanager def incomplete_dir(dirname): """Create temporary dir for dirname and rename on exit.""" if not is_local: self._fs.makedirs(dirname, exist_ok=True) yield dirname else: tmp_dir = dirname + ".incomplete" os.makedirs(tmp_dir, exist_ok=True) try: yield tmp_dir if os.path.isdir(dirname): shutil.rmtree(dirname) # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory shutil.move(tmp_dir, dirname) finally: if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) # Print is intentional: we want this to always go to stdout so user has # information needed to cancel download/preparation if needed. # This comes right before the progress bar. if self.info.size_in_bytes: logger.info( f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} " f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, " f"post-processed: {size_str(self.info.post_processing_size)}, " f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..." ) else: _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...") self._check_manual_download(dl_manager) # Create a tmp dir and rename to self._output_dir on successful exit. with incomplete_dir(self._output_dir) as tmp_output_dir: # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward # it to every sub function. with temporary_assignment(self, "_output_dir", tmp_output_dir): prepare_split_kwargs = {"file_format": file_format} if max_shard_size is not None: prepare_split_kwargs["max_shard_size"] = max_shard_size if num_proc is not None: prepare_split_kwargs["num_proc"] = num_proc self._download_and_prepare( dl_manager=dl_manager, verification_mode=verification_mode, **prepare_split_kwargs, **download_and_prepare_kwargs, ) # Sync info self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) self.info.download_checksums = dl_manager.get_recorded_sizes_checksums() if self.info.download_size is not None: self.info.size_in_bytes = self.info.dataset_size + self.info.download_size # Save info self._save_info() # Download post processing resources self.download_post_processing_resources(dl_manager) logger.info( f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. " f"Subsequent calls will reuse this data." ) def _check_manual_download(self, dl_manager): if self.manual_download_instructions is not None and dl_manager.manual_dir is None: raise ManualDownloadError( textwrap.dedent( f"""\ The dataset {self.dataset_name} with config {self.config.name} requires manual data. Please follow the manual download instructions: {self.manual_download_instructions} Manual data can be loaded with: datasets.load_dataset("{self.repo_id or self.dataset_name}", data_dir="<path/to/manual/data>")""" ) ) def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs): """Downloads and prepares dataset for reading. This is the internal implementation to overwrite called when user calls `download_and_prepare`. It should download all required data and generate the pre-processed datasets files. Args: dl_manager ([`DownloadManager`]): `DownloadManager` used to download and cache data. verification_mode ([`VerificationMode`]): if `ALL_CHECKS`, perform all the verifications including checksums. if `BASIC_CHECKS`, do not perform checksums, only perform split tests. if `NO_CHECKS`, do not perform any verification. prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size` """ # Generating data for all splits split_dict = SplitDict(dataset_name=self.dataset_name) split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) split_generators = self._split_generators(dl_manager, **split_generators_kwargs) # Checksums verification if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums: verify_checksums( self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files" ) # Build splits for split_generator in split_generators: if str(split_generator.split_info.name).lower() == "all": raise ValueError( "`all` is a special split keyword corresponding to the " "union of all splits, so cannot be used as key in " "._split_generator()." ) logger.info(f"Generating {split_generator.split_info.name} split") split_dict.add(split_generator.split_info) try: # Prepare split will record examples associated to the split self._prepare_split(split_generator, **prepare_split_kwargs) except OSError as e: raise OSError( "Cannot find data file. " + (self.manual_download_instructions or "") + "\nOriginal error:\n" + str(e) ) from None # If check_duplicates is set to True , then except DuplicatedKeysError except DuplicatedKeysError as e: raise DuplicatedKeysError( e.key, e.duplicate_key_indices, fix_msg=f"To avoid duplicate keys, please fix the dataset splits for {self.name}", ) from None dl_manager.manage_extracted_files() if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS: verify_splits(self.info.splits, split_dict) # Update the info object with the splits. self.info.splits = split_dict self.info.download_size = dl_manager.downloaded_size def download_post_processing_resources(self, dl_manager): for split in self.info.splits or []: for resource_name, resource_file_name in self._post_processing_resources(split).items(): if not not is_remote_filesystem(self._fs): raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}") if os.sep in resource_file_name: raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") resource_path = os.path.join(self._output_dir, resource_file_name) if not os.path.exists(resource_path): downloaded_resource_path = self._download_post_processing_resources( split, resource_name, dl_manager ) if downloaded_resource_path: logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}") shutil.move(downloaded_resource_path, resource_path) def _load_info(self) -> DatasetInfo: return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options) def _save_info(self): file_lock = ( FileLock(self._output_dir + "_info.lock") if not is_remote_filesystem(self._fs) else contextlib.nullcontext() ) with file_lock: self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options) def _make_split_generators_kwargs(self, prepare_split_kwargs): """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`.""" del prepare_split_kwargs return {} def as_dataset( self, split: Optional[Union[str, Split, list[str], list[Split]]] = None, run_post_process=True, verification_mode: Optional[Union[VerificationMode, str]] = None, in_memory=False, ) -> Union[Dataset, DatasetDict]: """Return a Dataset for the specified split. Args: split (`datasets.Split`): Which subset of the data to return. run_post_process (`bool`, defaults to `True`): Whether to run post-processing dataset transforms and/or add indexes. verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). <Added version="2.9.1"/> in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: datasets.Dataset Example: ```py >>> from datasets import load_dataset_builder >>> builder = load_dataset_builder('cornell-movie-review-data/rotten_tomatoes') >>> builder.download_and_prepare() >>> ds = builder.as_dataset(split='train') >>> ds Dataset({ features: ['text', 'label'], num_rows: 8530 }) ``` """ if self._file_format is not None and self._file_format != "arrow": raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.') if is_remote_filesystem(self._fs): raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.") if not os.path.exists(self._output_dir): raise FileNotFoundError( f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call " "builder.download_and_prepare(), or use " "datasets.load_dataset() before trying to access the Dataset object." ) logger.debug(f"Constructing Dataset for split {split or ', '.join(self.info.splits)}, from {self._output_dir}") # By default, return all splits if split is None: split = {s: s for s in self.info.splits} verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS) # Create a dataset for each of the given splits datasets = map_nested( partial( self._build_single_dataset, run_post_process=run_post_process, verification_mode=verification_mode, in_memory=in_memory, ), split, map_tuple=True, disable_tqdm=True, ) if isinstance(datasets, dict): datasets = DatasetDict(datasets) return datasets def _build_single_dataset( self, split: Union[str, ReadInstruction, Split], run_post_process: bool, verification_mode: VerificationMode, in_memory: bool = False, ): """as_dataset for a single split.""" if not isinstance(split, ReadInstruction): split = str(split) if split == "all": split = "+".join(self.info.splits.keys()) split = Split(split) # Build base dataset ds = self._as_dataset( split=split, in_memory=in_memory, ) if run_post_process: for resource_file_name in self._post_processing_resources(split).values(): if os.sep in resource_file_name: raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") resources_paths = { resource_name: os.path.join(self._output_dir, resource_file_name) for resource_name, resource_file_name in self._post_processing_resources(split).items() } post_processed = self._post_process(ds, resources_paths) if post_processed is not None: ds = post_processed recorded_checksums = {} record_checksums = False for resource_name, resource_path in resources_paths.items(): size_checksum = get_size_checksum_dict(resource_path) recorded_checksums[resource_name] = size_checksum if verification_mode == VerificationMode.ALL_CHECKS and record_checksums: if self.info.post_processed is None or self.info.post_processed.resources_checksums is None: expected_checksums = None else: expected_checksums = self.info.post_processed.resources_checksums.get(split) verify_checksums(expected_checksums, recorded_checksums, "post processing resources") if self.info.post_processed is None: self.info.post_processed = PostProcessedInfo() if self.info.post_processed.resources_checksums is None: self.info.post_processed.resources_checksums = {} self.info.post_processed.resources_checksums[str(split)] = recorded_checksums self.info.post_processing_size = sum( checksums_dict["num_bytes"] for split_checksums_dicts in self.info.post_processed.resources_checksums.values() for checksums_dict in split_checksums_dicts.values() ) if self.info.dataset_size is not None and self.info.download_size is not None: self.info.size_in_bytes = ( self.info.dataset_size + self.info.download_size + self.info.post_processing_size ) self._save_info() ds._info.post_processed = self.info.post_processed ds._info.post_processing_size = self.info.post_processing_size ds._info.size_in_bytes = self.info.size_in_bytes if self.info.post_processed.features is not None: if self.info.post_processed.features.type != ds.features.type: raise ValueError( f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}" ) else: ds.info.features = self.info.post_processed.features return ds def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset: """Constructs a `Dataset`. This is the internal implementation to overwrite called when user calls `as_dataset`. It should read the pre-processed datasets files and generate the `Dataset` object. Args: split (`datasets.Split`): which subset of the data to read. in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: `Dataset` """ cache_dir = self._fs._strip_protocol(self._output_dir) dataset_name = self.dataset_name if self._check_legacy_cache(): dataset_name = self.name dataset_kwargs = ArrowReader(cache_dir, self.info).read( name=dataset_name, instructions=split, split_infos=self.info.splits.values(), in_memory=in_memory, ) fingerprint = self._get_dataset_fingerprint(split) return Dataset(fingerprint=fingerprint, **dataset_kwargs) def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str: """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs.""" hasher = Hasher() hasher.update(Path(self._relative_data_dir()).as_posix()) hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder) fingerprint = hasher.hexdigest() return fingerprint def as_streaming_dataset( self, split: Optional[str] = None, base_path: Optional[str] = None, ) -> Union[dict[str, IterableDataset], IterableDataset]: if is_remote_filesystem(self._fs): raise NotImplementedError( f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet." ) dl_manager = StreamingDownloadManager( base_path=base_path or self.base_path, download_config=DownloadConfig(token=self.token, storage_options=self.storage_options), dataset_name=self.dataset_name, data_dir=self.config.data_dir, ) self._check_manual_download(dl_manager) splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)} # By default, return all splits if split is None: splits_generator = splits_generators elif split in splits_generators: splits_generator = splits_generators[split] else: raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}") # Create a dataset for each of the given splits datasets = map_nested( self._as_streaming_dataset_single, splits_generator, map_tuple=True, ) if isinstance(datasets, dict): datasets = IterableDatasetDict(datasets) return datasets def _as_streaming_dataset_single( self, splits_generator, ) -> IterableDataset: ex_iterable = self._get_examples_iterable_for_split(splits_generator) # add auth to be able to access and decode audio/image files from private repositories. token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {} return IterableDataset( ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id ) def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]: """Run dataset transforms or add indexes""" return None def _post_processing_resources(self, split: str) -> dict[str, str]: """Mapping resource_name -> resource_file_name""" return {} def _download_post_processing_resources( self, split: str, resource_name: str, dl_manager: DownloadManager ) -> Optional[str]: """Download the resource using the download manager and return the downloaded path.""" return None @abc.abstractmethod def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]): """Specify feature dictionary generators and dataset splits. This function returns a list of `SplitGenerator`s defining how to generate data and what splits to use. Example: return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={'file': 'train_data.zip'}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={'file': 'test_data.zip'}, ), ] The above code will first call `_generate_examples(file='train_data.zip')` to write the train data, then `_generate_examples(file='test_data.zip')` to write the test data. Datasets are typically split into different subsets to be used at various stages of training and evaluation. Note that for datasets without a `VALIDATION` split, you can use a fraction of the `TRAIN` data for evaluation as you iterate on your model so as not to overfit to the `TEST` data. For downloads and extractions, use the given `download_manager`. Note that the `DownloadManager` caches downloads, so it is fine to have each generator attempt to download the source data. A good practice is to download all data in this function, and then distribute the relevant parts to each split with the `gen_kwargs` argument Args: dl_manager (`Union[DownloadManager, StreamingDownloadManager]`): Download manager to download the data Returns: `list<SplitGenerator>`. """ raise NotImplementedError() @abc.abstractmethod def _prepare_split( self, split_generator: SplitGenerator, file_format: str = "arrow", max_shard_size: Optional[Union[str, int]] = None, num_proc: Optional[int] = None, **kwargs, ): """Generate the examples and record them on disk. Args: split_generator (`SplitGenerator`): Split generator to process file_format (`str`, *optional*): format of the data files in which the dataset will be written. Supported formats: "arrow", "parquet". Default to "arrow" format. max_shard_size (`Union[str, int]`, *optional*): Maximum number of bytes written per shard, default is "500MB". The size is based on uncompressed data size, so in practice your shard files may be smaller than `max_shard_size` thanks to Parquet compression for example. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.7.0"/> **kwargs: Additional kwargs forwarded from _download_and_prepare """ raise NotImplementedError() def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: """Generate the examples on the fly. Args: split_generator (`SplitGenerator`): Split generator to process """ raise NotImplementedError() class GeneratorBasedBuilder(DatasetBuilder): """Base class for datasets with data generation based on dict generators. `GeneratorBasedBuilder` is a convenience class that abstracts away much of the data writing and reading of `DatasetBuilder`. It expects subclasses to implement generators of feature dictionaries across the dataset splits (`_split_generators`). See the method docstrings for details. """ @abc.abstractmethod def _generate_examples(self, **kwargs): """Default function generating examples for each `SplitGenerator`. This function preprocess the examples from the raw data to the preprocessed dataset files. This function is called once for each `SplitGenerator` defined in `_split_generators`. The examples yielded here will be written on disk. Args: **kwargs (additional keyword arguments): Arguments forwarded from the SplitGenerator.gen_kwargs Yields: key: `str` or `int`, a unique deterministic example identification key. * Unique: An error will be raised if two examples are yield with the same key. * Deterministic: When generating the dataset twice, the same example should have the same key. Good keys can be the image id, or line number if examples are extracted from a text file. The key will be hashed and sorted to shuffle examples deterministically, such as generating the dataset multiple times keep examples in the same order. example: `dict<str feature_name, feature_value>`, a feature dictionary ready to be encoded and written to disk. The example will be encoded with `self.info.features.encode_example({...})`. """ raise NotImplementedError() def _prepare_split( self, split_generator: SplitGenerator, check_duplicate_keys: bool, file_format="arrow", num_proc: Optional[int] = None, max_shard_size: Optional[Union[int, str]] = None, ): max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) if self.info.splits is not None: split_info = self.info.splits[split_generator.name] else: split_info = split_generator.split_info SUFFIX = "-JJJJJ-SSSSS-of-NNNNN" fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}" fpath = posixpath.join(self._output_dir, fname) if num_proc and num_proc > 1: num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs) if num_input_shards <= 1: logger.warning( f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard." ) num_proc = 1 elif num_input_shards < num_proc: logger.warning( f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards." ) num_proc = num_input_shards pbar = hf_tqdm( unit=" examples", total=split_info.num_examples, desc=f"Generating {split_info.name} split", ) _prepare_split_args = { "fpath": fpath, "file_format": file_format, "max_shard_size": max_shard_size, "split_info": split_info, "check_duplicate_keys": check_duplicate_keys, } if num_proc is None or num_proc == 1: result = None gen_kwargs = split_generator.gen_kwargs job_id = 0 with pbar: for job_id, done, content in self._prepare_split_single( gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args ): if done: result = content else: pbar.update(content) # wrapping everything into lists for consistency with the multiprocessed code path assert result is not None, "Failed to retrieve results from prepare_split" examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = ( [item] for item in result ) else: kwargs_per_job = [ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args} for job_id, gen_kwargs in enumerate( _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc) ) ] num_jobs = len(kwargs_per_job) examples_per_job = [None] * num_jobs bytes_per_job = [None] * num_jobs features_per_job = [None] * num_jobs shards_per_job = [None] * num_jobs shard_lengths_per_job = [None] * num_jobs with Pool(num_proc) as pool: with pbar: for job_id, done, content in iflatmap_unordered( pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job ): if done: # the content is the result of the job ( examples_per_job[job_id], bytes_per_job[job_id], features_per_job[job_id], shards_per_job[job_id], shard_lengths_per_job[job_id], ) = content else: # the content is the number of examples progress update pbar.update(content) assert None not in examples_per_job, ( f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results" ) total_shards = sum(shards_per_job) total_num_examples = sum(examples_per_job) total_num_bytes = sum(bytes_per_job) features = features_per_job[0] split_generator.split_info.num_examples = total_num_examples split_generator.split_info.num_bytes = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards.") if total_shards > 1: # use the -SSSSS-of-NNNNN pattern def _rename_shard(shard_and_job: tuple[int]): shard_id, job_id = shard_and_job global_shard_id = sum(shards_per_job[:job_id]) + shard_id self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), ) shards_and_jobs = [ (shard_id, job_id) for job_id, num_shards in enumerate(shards_per_job) for shard_id in range(num_shards) ] thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64) split_generator.split_info.shard_lengths = [ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths ] else: # don't use any pattern shard_id, job_id = 0, 0 self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), fpath.replace(SUFFIX, ""), ) if self.info.features is None: self.info.features = features def _prepare_split_single( self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, split_info: SplitInfo, check_duplicate_keys: bool, job_id: int, ) -> Iterable[tuple[int, bool, Union[int, tuple]]]: generator = self._generate_examples(**gen_kwargs) writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter embed_local_files = file_format == "parquet" shard_lengths = [] total_num_examples, total_num_bytes = 0, 0 shard_id = 0 num_examples_progress_update = 0 try: writer = writer_class( features=self.info.features, path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), writer_batch_size=self._writer_batch_size, hash_salt=split_info.name, check_duplicates=check_duplicate_keys, storage_options=self._fs.storage_options, embed_local_files=embed_local_files, ) try: _time = time.time() for key, record in generator: if max_shard_size is not None and writer._num_bytes > max_shard_size: num_examples, num_bytes = writer.finalize() writer.close() shard_lengths.append(num_examples) total_num_examples += num_examples total_num_bytes += num_bytes shard_id += 1 writer = writer_class( features=writer._features, path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), writer_batch_size=self._writer_batch_size, hash_salt=split_info.name, check_duplicates=check_duplicate_keys, storage_options=self._fs.storage_options, embed_local_files=embed_local_files, ) example = self.info.features.encode_example(record) if self.info.features is not None else record writer.write(example, key) num_examples_progress_update += 1 if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield job_id, False, num_examples_progress_update num_examples_progress_update = 0 finally: yield job_id, False, num_examples_progress_update num_shards = shard_id + 1 num_examples, num_bytes = writer.finalize() writer.close() shard_lengths.append(num_examples) total_num_examples += num_examples total_num_bytes += num_bytes except Exception as e: # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded if isinstance(e, SchemaInferenceError) and e.__context__ is not None: e = e.__context__ raise DatasetGenerationError("An error occurred while generating the dataset") from e yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths) def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs): super()._download_and_prepare( dl_manager, verification_mode, check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS, **prepare_splits_kwargs, ) def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs) class ArrowBasedBuilder(DatasetBuilder): """Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet).""" @abc.abstractmethod def _generate_tables(self, **kwargs): """Default function generating examples for each `SplitGenerator`. This function preprocess the examples from the raw data to the preprocessed dataset files. This function is called once for each `SplitGenerator` defined in `_split_generators`. The examples yielded here will be written on disk. Args: **kwargs (additional keyword arguments): Arguments forwarded from the SplitGenerator.gen_kwargs Yields: key: `str` or `int`, a unique deterministic example identification key. * Unique: An error will be raised if two examples are yield with the same key. * Deterministic: When generating the dataset twice, the same example should have the same key. Good keys can be the image id, or line number if examples are extracted from a text file. The key will be hashed and sorted to shuffle examples deterministically, such as generating the dataset multiple times keep examples in the same order. example: `pyarrow.Table`, a feature table ready to be encoded and written to disk. """ raise NotImplementedError() def _prepare_split( self, split_generator: SplitGenerator, file_format: str = "arrow", num_proc: Optional[int] = None, max_shard_size: Optional[Union[str, int]] = None, ): max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) try: split_info = self.info.splits[split_generator.name] except Exception: split_info = split_generator.split_info SUFFIX = "-JJJJJ-SSSSS-of-NNNNN" fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}" fpath = posixpath.join(self._output_dir, fname) if num_proc and num_proc > 1: num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs) if num_input_shards <= 1: logger.warning( f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard." ) num_proc = 1 elif num_input_shards < num_proc: logger.warning( f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards." ) num_proc = num_input_shards pbar = hf_tqdm( unit=" examples", total=split_info.num_examples, desc=f"Generating {split_info.name} split", ) _prepare_split_args = { "fpath": fpath, "file_format": file_format, "max_shard_size": max_shard_size, } if num_proc is None or num_proc == 1: result = None gen_kwargs = split_generator.gen_kwargs job_id = 0 with pbar: for job_id, done, content in self._prepare_split_single( gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args ): if done: result = content else: pbar.update(content) # wrapping everything into lists for consistency with the multiprocessed code path assert result is not None, "Failed to retrieve results from prepare_split" examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = ( [item] for item in result ) else: kwargs_per_job = [ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args} for job_id, gen_kwargs in enumerate( _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc) ) ] num_jobs = len(kwargs_per_job) examples_per_job = [None] * num_jobs bytes_per_job = [None] * num_jobs features_per_job = [None] * num_jobs shards_per_job = [None] * num_jobs shard_lengths_per_job = [None] * num_jobs with Pool(num_proc) as pool: with pbar: for job_id, done, content in iflatmap_unordered( pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job ): if done: # the content is the result of the job ( examples_per_job[job_id], bytes_per_job[job_id], features_per_job[job_id], shards_per_job[job_id], shard_lengths_per_job[job_id], ) = content else: # the content is the number of examples progress update pbar.update(content) assert None not in examples_per_job, ( f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results" ) total_shards = sum(shards_per_job) total_num_examples = sum(examples_per_job) total_num_bytes = sum(bytes_per_job) features = features_per_job[0] split_generator.split_info.num_examples = total_num_examples split_generator.split_info.num_bytes = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards.") if total_shards > 1: # use the -SSSSS-of-NNNNN pattern def _rename_shard(shard_id_and_job: tuple[int]): shard_id, job_id = shard_id_and_job global_shard_id = sum(shards_per_job[:job_id]) + shard_id self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), ) shard_ids_and_jobs = [ (shard_id, job_id) for job_id, num_shards in enumerate(shards_per_job) for shard_id in range(num_shards) ] thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64) split_generator.split_info.shard_lengths = [ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths ] else: # don't use any pattern shard_id, job_id = 0, 0 self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), fpath.replace(SUFFIX, ""), ) if self.info.features is None: self.info.features = features def _prepare_split_single( self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int ) -> Iterable[tuple[int, bool, Union[int, tuple]]]: gen_kwargs = {k: tracked_list(v) if isinstance(v, list) else v for k, v in gen_kwargs.items()} generator = self._generate_tables(**gen_kwargs) writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter embed_local_files = file_format == "parquet" shard_lengths = [] total_num_examples, total_num_bytes = 0, 0 shard_id = 0 num_examples_progress_update = 0 try: writer = writer_class( features=self.info.features, path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), writer_batch_size=self._writer_batch_size, storage_options=self._fs.storage_options, embed_local_files=embed_local_files, ) try: _time = time.time() for _, table in generator: if max_shard_size is not None and writer._num_bytes > max_shard_size: num_examples, num_bytes = writer.finalize() writer.close() shard_lengths.append(num_examples) total_num_examples += num_examples total_num_bytes += num_bytes shard_id += 1 writer = writer_class( features=writer._features, path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), writer_batch_size=self._writer_batch_size, storage_options=self._fs.storage_options, embed_local_files=embed_local_files, ) try: writer.write_table(table) except CastError as cast_error: raise DatasetGenerationCastError.from_cast_error( cast_error=cast_error, builder_name=self.info.builder_name, gen_kwargs=gen_kwargs, token=self.token, ) num_examples_progress_update += len(table) if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield job_id, False, num_examples_progress_update num_examples_progress_update = 0 finally: yield job_id, False, num_examples_progress_update num_shards = shard_id + 1 num_examples, num_bytes = writer.finalize() writer.close() shard_lengths.append(num_examples) total_num_examples += num_examples total_num_bytes += num_bytes except Exception as e: # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded if isinstance(e, SchemaInferenceError) and e.__context__ is not None: e = e.__context__ if isinstance(e, DatasetGenerationError): raise raise DatasetGenerationError("An error occurred while generating the dataset") from e yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths) def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs)
datasets/src/datasets/builder.py/0
{ "file_path": "datasets/src/datasets/builder.py", "repo_id": "datasets", "token_count": 40108 }
103
__all__ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "LargeList", "List", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", "Video", "Pdf", ] from .audio import Audio from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, LargeList, List, Sequence, Value from .image import Image from .pdf import Pdf from .translation import Translation, TranslationVariableLanguages from .video import Video
datasets/src/datasets/features/__init__.py/0
{ "file_path": "datasets/src/datasets/features/__init__.py", "repo_id": "datasets", "token_count": 205 }
104
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import tensorflow as tf class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): def __init__(self, features=None, token_per_repo_id=None, **tf_tensor_kwargs): super().__init__(features=features, token_per_repo_id=token_per_repo_id) self.tf_tensor_kwargs = tf_tensor_kwargs import tensorflow as tf # noqa: F401 - import tf at initialization def _consolidate(self, column): import tensorflow as tf if isinstance(column, list) and column: if all( isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return tf.stack(column) elif all( isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype for x in column ): # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated return tf.ragged.stack(column) return column def _tensorize(self, value): import tensorflow as tf if value is None: return value default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": tf.int64} elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": tf.float32} if config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): value = np.asarray(value) if config.TORCHVISION_AVAILABLE and "torchvision" in sys.modules: from torchvision.io import VideoReader if isinstance(value, VideoReader): return value # TODO(QL): set output to tf tensors ? if config.TORCHCODEC_AVAILABLE and "torchcodec" in sys.modules: from torchcodec.decoders import AudioDecoder, VideoDecoder if isinstance(value, (VideoDecoder, AudioDecoder)): return value # TODO(QL): set output to jax arrays ? return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) def _recursive_tensorize(self, data_struct): import tensorflow as tf # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) elif isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "tf.Tensor": column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
datasets/src/datasets/formatting/tf_formatter.py/0
{ "file_path": "datasets/src/datasets/formatting/tf_formatter.py", "repo_id": "datasets", "token_count": 2138 }
105
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Access datasets.""" import glob import importlib import inspect import json import os import posixpath from collections import Counter from collections.abc import Mapping, Sequence from dataclasses import dataclass, field from pathlib import Path from typing import Any, Optional, Union import fsspec import requests import yaml from fsspec.core import url_to_fs from huggingface_hub import DatasetCard, DatasetCardData, HfApi from huggingface_hub.utils import ( EntryNotFoundError, GatedRepoError, LocalEntryNotFoundError, OfflineModeIsEnabled, RepositoryNotFoundError, RevisionNotFoundError, get_session, ) from . import __version__, config from .arrow_dataset import Dataset from .builder import BuilderConfig, DatasetBuilder from .data_files import ( DataFilesDict, DataFilesList, DataFilesPatternsDict, EmptyDatasetError, get_data_patterns, sanitize_patterns, ) from .dataset_dict import DatasetDict, IterableDatasetDict from .download.download_config import DownloadConfig from .download.download_manager import DownloadMode from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin from .exceptions import DataFilesNotFoundError, DatasetNotFoundError from .features import Features from .features.features import _fix_for_backward_compatible_features from .fingerprint import Hasher from .info import DatasetInfo, DatasetInfosDict from .iterable_dataset import IterableDataset from .naming import camelcase_to_snakecase, snakecase_to_camelcase from .packaged_modules import ( _EXTENSION_TO_MODULE, _MODULE_TO_EXTENSIONS, _MODULE_TO_METADATA_FILE_NAMES, _PACKAGED_DATASETS_MODULES, ) from .packaged_modules.folder_based_builder.folder_based_builder import FolderBasedBuilder from .splits import Split from .utils import _dataset_viewer from .utils.file_utils import ( _raise_if_offline_mode_is_enabled, cached_path, get_datasets_user_agent, is_relative_path, relative_to_absolute_path, ) from .utils.hub import hf_dataset_url from .utils.info_utils import VerificationMode, is_small_dataset from .utils.logging import get_logger from .utils.metadata import MetadataConfigs from .utils.typing import PathLike from .utils.version import Version logger = get_logger(__name__) ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + [".zip"] class _InitializeConfiguredDatasetBuilder: """ From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class See also ConfiguredDatasetBuilder.__reduce__ When called with the param value as the only argument, returns an un-initialized instance of the parameterized class. Subsequent __setstate__ will be called by pickle. """ def __call__(self, builder_cls, metadata_configs, default_config_name, name): # make a simple object which has no complex __init__ (this one will do) obj = _InitializeConfiguredDatasetBuilder() obj.__class__ = configure_builder_class( builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name ) return obj def configure_builder_class( builder_cls: type[DatasetBuilder], builder_configs: list[BuilderConfig], default_config_name: Optional[str], dataset_name: str, ) -> type[DatasetBuilder]: """ Dynamically create a builder class with custom builder configs parsed from README.md file, i.e. set BUILDER_CONFIGS class variable of a builder class to custom configs list. """ class ConfiguredDatasetBuilder(builder_cls): BUILDER_CONFIGS = builder_configs DEFAULT_CONFIG_NAME = default_config_name __module__ = builder_cls.__module__ # so that the actual packaged builder can be imported def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder parent_builder_cls = self.__class__.__mro__[1] return ( _InitializeConfiguredDatasetBuilder(), ( parent_builder_cls, self.BUILDER_CONFIGS, self.DEFAULT_CONFIG_NAME, self.dataset_name, ), self.__dict__.copy(), ) ConfiguredDatasetBuilder.__name__ = ( f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}" ) ConfiguredDatasetBuilder.__qualname__ = ( f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}" ) return ConfiguredDatasetBuilder def import_main_class(module_path) -> Optional[type[DatasetBuilder]]: """Import a module at module_path and return its main class: a DatasetBuilder""" module = importlib.import_module(module_path) # Find the main class in our imported module module_main_cls = None for name, obj in module.__dict__.items(): if inspect.isclass(obj) and issubclass(obj, DatasetBuilder): if inspect.isabstract(obj): continue module_main_cls = obj obj_module = inspect.getmodule(obj) if obj_module is not None and module == obj_module: break return module_main_cls def get_dataset_builder_class( dataset_module: "DatasetModule", dataset_name: Optional[str] = None ) -> type[DatasetBuilder]: builder_cls = import_main_class(dataset_module.module_path) if dataset_module.builder_configs_parameters.builder_configs: dataset_name = dataset_name or dataset_module.builder_kwargs.get("dataset_name") if dataset_name is None: raise ValueError("dataset_name should be specified but got None") builder_cls = configure_builder_class( builder_cls, builder_configs=dataset_module.builder_configs_parameters.builder_configs, default_config_name=dataset_module.builder_configs_parameters.default_config_name, dataset_name=dataset_name, ) return builder_cls def increase_load_count(name: str): """Update the download count of a dataset.""" if not config.HF_HUB_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS: try: get_session().head( "/".join((config.S3_DATASETS_BUCKET_PREFIX, name, name + ".py")), headers={"User-Agent": get_datasets_user_agent()}, timeout=3, ) except Exception: pass def infer_module_for_data_files_list( data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None ) -> tuple[Optional[str], dict]: """Infer module (and builder kwargs) from list of data files. It picks the module based on the most common file extension. In case of a draw ".parquet" is the favorite, and then alphabetical order. Args: data_files_list (DataFilesList): List of data files. download_config (bool or str, optional): Mainly use `token` or `storage_options` to support different platforms and auth types. Returns: tuple[str, dict[str, Any]]: Tuple with - inferred module name - dict of builder kwargs """ extensions_counter = Counter( ("." + suffix.lower(), xbasename(filepath) in FolderBasedBuilder.METADATA_FILENAMES) for filepath in data_files_list[: config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE] for suffix in xbasename(filepath).split(".")[1:] ) if extensions_counter: def sort_key(ext_count: tuple[tuple[str, bool], int]) -> tuple[int, bool]: """Sort by count and set ".parquet" as the favorite in case of a draw, and ignore metadata files""" (ext, is_metadata), count = ext_count return (not is_metadata, count, ext == ".parquet", ext == ".jsonl", ext == ".json", ext == ".csv", ext) for (ext, _), _ in sorted(extensions_counter.items(), key=sort_key, reverse=True): if ext in _EXTENSION_TO_MODULE: return _EXTENSION_TO_MODULE[ext] elif ext == ".zip": return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config) return None, {} def infer_module_for_data_files_list_in_archives( data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None ) -> tuple[Optional[str], dict]: """Infer module (and builder kwargs) from list of archive data files. Args: data_files_list (DataFilesList): List of data files. download_config (bool or str, optional): Mainly use `token` or `storage_options` to support different platforms and auth types. Returns: tuple[str, dict[str, Any]]: Tuple with - inferred module name - dict of builder kwargs """ archived_files = [] archive_files_counter = 0 for filepath in data_files_list: if str(filepath).endswith(".zip"): archive_files_counter += 1 if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE: break extracted = xjoin(StreamingDownloadManager().extract(filepath), "**") archived_files += [ f.split("::")[0] for f in xglob(extracted, recursive=True, download_config=download_config)[ : config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE ] ] extensions_counter = Counter( "." + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split(".")[1:] ) if extensions_counter: most_common = extensions_counter.most_common(1)[0][0] if most_common in _EXTENSION_TO_MODULE: return _EXTENSION_TO_MODULE[most_common] return None, {} def infer_module_for_data_files( data_files: DataFilesDict, path: Optional[str] = None, download_config: Optional[DownloadConfig] = None ) -> tuple[Optional[str], dict[str, Any]]: """Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match. Args: data_files ([`DataFilesDict`]): Dict of list of data files. path (str, *optional*): Dataset name or path. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters to authenticate on the Hugging Face Hub for private remote files. Returns: tuple[str, dict[str, Any]]: Tuple with - inferred module name - builder kwargs """ split_modules = { split: infer_module_for_data_files_list(data_files_list, download_config=download_config) for split, data_files_list in data_files.items() } module_name, default_builder_kwargs = next(iter(split_modules.values())) if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()): raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}") if not module_name: raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else "")) return module_name, default_builder_kwargs def create_builder_configs_from_metadata_configs( module_path: str, metadata_configs: MetadataConfigs, base_path: Optional[str] = None, default_builder_kwargs: dict[str, Any] = None, download_config: Optional[DownloadConfig] = None, ) -> tuple[list[BuilderConfig], str]: builder_cls = import_main_class(module_path) builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS default_config_name = metadata_configs.get_default_config_name() builder_configs = [] default_builder_kwargs = {} if default_builder_kwargs is None else default_builder_kwargs base_path = base_path if base_path is not None else "" for config_name, config_params in metadata_configs.items(): config_data_files = config_params.get("data_files") config_data_dir = config_params.get("data_dir") config_base_path = xjoin(base_path, config_data_dir) if config_data_dir else base_path try: config_patterns = ( sanitize_patterns(config_data_files) if config_data_files is not None else get_data_patterns(config_base_path, download_config=download_config) ) config_data_files_dict = DataFilesPatternsDict.from_patterns( config_patterns, allowed_extensions=ALL_ALLOWED_EXTENSIONS, ) except EmptyDatasetError as e: raise EmptyDatasetError( f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}'," f" check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. " ) from e ignored_params = [ param for param in config_params if not hasattr(builder_config_cls, param) and param != "default" ] if ignored_params: logger.warning( f"Some datasets params were ignored: {ignored_params}. " "Make sure to use only valid params for the dataset builder and to have " "a up-to-date version of the `datasets` library." ) builder_configs.append( builder_config_cls( name=config_name, data_files=config_data_files_dict, data_dir=config_data_dir, **{ param: value for param, value in {**default_builder_kwargs, **config_params}.items() if hasattr(builder_config_cls, param) and param not in ("default", "data_files", "data_dir") }, ) ) return builder_configs, default_config_name @dataclass class BuilderConfigsParameters: """Dataclass containing objects related to creation of builder configurations from yaml's metadata content. Attributes: metadata_configs (`MetadataConfigs`, *optional*): Configs parsed from yaml's metadata. builder_configs (`list[BuilderConfig]`, *optional*): List of BuilderConfig objects created from metadata_configs above. default_config_name (`str`): Name of default config taken from yaml's metadata. """ metadata_configs: Optional[MetadataConfigs] = None builder_configs: Optional[list[BuilderConfig]] = None default_config_name: Optional[str] = None @dataclass class DatasetModule: module_path: str hash: str builder_kwargs: dict builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters) dataset_infos: Optional[DatasetInfosDict] = None class _DatasetModuleFactory: def get_module(self) -> DatasetModule: raise NotImplementedError class LocalDatasetModuleFactory(_DatasetModuleFactory): """Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred from the data files extensions.""" def __init__( self, path: str, data_dir: Optional[str] = None, data_files: Optional[Union[str, list, dict]] = None, download_mode: Optional[Union[DownloadMode, str]] = None, ): if data_dir and os.path.isabs(data_dir): raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}") self.path = Path(path).as_posix() self.name = Path(path).stem self.data_files = data_files self.data_dir = data_dir self.download_mode = download_mode def get_module(self) -> DatasetModule: readme_path = os.path.join(self.path, config.REPOCARD_FILENAME) standalone_yaml_path = os.path.join(self.path, config.REPOYAML_FILENAME) dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData() if os.path.exists(standalone_yaml_path): with open(standalone_yaml_path, encoding="utf-8") as f: standalone_yaml_data = yaml.safe_load(f.read()) if standalone_yaml_data: _dataset_card_data_dict = dataset_card_data.to_dict() _dataset_card_data_dict.update(standalone_yaml_data) dataset_card_data = DatasetCardData(**_dataset_card_data_dict) metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data) # we need a set of data files to find which dataset builder to use # because we need to infer module name by files extensions base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix() if self.data_files is not None: patterns = sanitize_patterns(self.data_files) elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())): patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"]) else: patterns = get_data_patterns(base_path) data_files = DataFilesDict.from_patterns( patterns, base_path=base_path, allowed_extensions=ALL_ALLOWED_EXTENSIONS, ) module_name, default_builder_kwargs = infer_module_for_data_files( data_files=data_files, path=self.path, ) data_files = data_files.filter( extensions=_MODULE_TO_EXTENSIONS[module_name], file_names=_MODULE_TO_METADATA_FILE_NAMES[module_name] ) module_path, _ = _PACKAGED_DATASETS_MODULES[module_name] if metadata_configs: builder_configs, default_config_name = create_builder_configs_from_metadata_configs( module_path, metadata_configs, base_path=base_path, default_builder_kwargs=default_builder_kwargs, ) else: builder_configs: list[BuilderConfig] = [ import_main_class(module_path).BUILDER_CONFIG_CLASS( data_files=data_files, **default_builder_kwargs, ) ] default_config_name = None builder_kwargs = { "base_path": self.path, "dataset_name": camelcase_to_snakecase(Path(self.path).name), } if self.data_dir: builder_kwargs["data_files"] = data_files # this file is deprecated and was created automatically in old versions of push_to_hub if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)): with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f: legacy_dataset_infos = DatasetInfosDict( { config_name: DatasetInfo.from_dict(dataset_info_dict) for config_name, dataset_info_dict in json.load(f).items() } ) if len(legacy_dataset_infos) == 1: # old config e.g. named "username--dataset_name" legacy_config_name = next(iter(legacy_dataset_infos)) legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name) legacy_dataset_infos.update(dataset_infos) dataset_infos = legacy_dataset_infos if default_config_name is None and len(dataset_infos) == 1: default_config_name = next(iter(dataset_infos)) hash = Hasher.hash({"dataset_infos": dataset_infos, "builder_configs": builder_configs}) return DatasetModule( module_path, hash, builder_kwargs, dataset_infos=dataset_infos, builder_configs_parameters=BuilderConfigsParameters( metadata_configs=metadata_configs, builder_configs=builder_configs, default_config_name=default_config_name, ), ) class PackagedDatasetModuleFactory(_DatasetModuleFactory): """Get the dataset builder module from the ones that are packaged with the library: csv, json, etc.""" def __init__( self, name: str, data_dir: Optional[str] = None, data_files: Optional[Union[str, list, dict]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, ): self.name = name self.data_files = data_files self.data_dir = data_dir self.download_config = download_config self.download_mode = download_mode increase_load_count(name) def get_module(self) -> DatasetModule: base_path = Path(self.data_dir or "").expanduser().resolve().as_posix() patterns = ( sanitize_patterns(self.data_files) if self.data_files is not None else get_data_patterns(base_path, download_config=self.download_config) ) data_files = DataFilesDict.from_patterns( patterns, download_config=self.download_config, base_path=base_path, ) module_path, hash = _PACKAGED_DATASETS_MODULES[self.name] builder_kwargs = { "data_files": data_files, "dataset_name": self.name, } return DatasetModule(module_path, hash, builder_kwargs) class HubDatasetModuleFactory(_DatasetModuleFactory): """ Get the module of a dataset loaded from data files of a dataset repository. The dataset builder module to use is inferred from the data files extensions. """ def __init__( self, name: str, commit_hash: str, data_dir: Optional[str] = None, data_files: Optional[Union[str, list, dict]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, use_exported_dataset_infos: bool = False, ): self.name = name self.commit_hash = commit_hash self.data_files = data_files self.data_dir = data_dir self.download_config = download_config or DownloadConfig() self.download_mode = download_mode self.use_exported_dataset_infos = use_exported_dataset_infos increase_load_count(name) def get_module(self) -> DatasetModule: # Get the Dataset Card and fix the revision in case there are new commits in the meantime api = HfApi( endpoint=config.HF_ENDPOINT, token=self.download_config.token, library_name="datasets", library_version=__version__, user_agent=get_datasets_user_agent(self.download_config.user_agent), ) try: dataset_readme_path = api.hf_hub_download( repo_id=self.name, filename=config.REPOCARD_FILENAME, repo_type="dataset", revision=self.commit_hash, proxies=self.download_config.proxies, ) dataset_card_data = DatasetCard.load(dataset_readme_path).data except EntryNotFoundError: dataset_card_data = DatasetCardData() download_config = self.download_config.copy() if download_config.download_desc is None: download_config.download_desc = "Downloading standalone yaml" try: standalone_yaml_path = cached_path( hf_dataset_url(self.name, config.REPOYAML_FILENAME, revision=self.commit_hash), download_config=download_config, ) with open(standalone_yaml_path, encoding="utf-8") as f: standalone_yaml_data = yaml.safe_load(f.read()) if standalone_yaml_data: _dataset_card_data_dict = dataset_card_data.to_dict() _dataset_card_data_dict.update(standalone_yaml_data) dataset_card_data = DatasetCardData(**_dataset_card_data_dict) except FileNotFoundError: pass base_path = f"hf://datasets/{self.name}@{self.commit_hash}/{self.data_dir or ''}".rstrip("/") metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data) if config.USE_PARQUET_EXPORT and self.use_exported_dataset_infos: try: exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos( dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token ) exported_dataset_infos = DatasetInfosDict( { config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name]) for config_name in exported_dataset_infos } ) except _dataset_viewer.DatasetViewerError: exported_dataset_infos = None else: exported_dataset_infos = None if exported_dataset_infos: exported_dataset_infos.update(dataset_infos) dataset_infos = exported_dataset_infos # we need a set of data files to find which dataset builder to use # because we need to infer module name by files extensions if self.data_files is not None: patterns = sanitize_patterns(self.data_files) elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())): patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"]) else: patterns = get_data_patterns(base_path, download_config=self.download_config) data_files = DataFilesDict.from_patterns( patterns, base_path=base_path, allowed_extensions=ALL_ALLOWED_EXTENSIONS, download_config=self.download_config, ) module_name, default_builder_kwargs = infer_module_for_data_files( data_files=data_files, path=self.name, download_config=self.download_config, ) data_files = data_files.filter( extensions=_MODULE_TO_EXTENSIONS[module_name], file_names=_MODULE_TO_METADATA_FILE_NAMES[module_name] ) module_path, _ = _PACKAGED_DATASETS_MODULES[module_name] if metadata_configs: builder_configs, default_config_name = create_builder_configs_from_metadata_configs( module_path, metadata_configs, base_path=base_path, default_builder_kwargs=default_builder_kwargs, download_config=self.download_config, ) else: builder_configs: list[BuilderConfig] = [ import_main_class(module_path).BUILDER_CONFIG_CLASS( data_files=data_files, **default_builder_kwargs, ) ] default_config_name = None builder_kwargs = { "base_path": hf_dataset_url(self.name, "", revision=self.commit_hash).rstrip("/"), "repo_id": self.name, "dataset_name": camelcase_to_snakecase(Path(self.name).name), } if self.data_dir: builder_kwargs["data_files"] = data_files download_config = self.download_config.copy() if download_config.download_desc is None: download_config.download_desc = "Downloading metadata" try: # this file is deprecated and was created automatically in old versions of push_to_hub dataset_infos_path = cached_path( hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.commit_hash), download_config=download_config, ) with open(dataset_infos_path, encoding="utf-8") as f: legacy_dataset_infos = DatasetInfosDict( { config_name: DatasetInfo.from_dict(dataset_info_dict) for config_name, dataset_info_dict in json.load(f).items() } ) if len(legacy_dataset_infos) == 1: # old config e.g. named "username--dataset_name" legacy_config_name = next(iter(legacy_dataset_infos)) legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name) legacy_dataset_infos.update(dataset_infos) dataset_infos = legacy_dataset_infos except FileNotFoundError: pass if default_config_name is None and len(dataset_infos) == 1: default_config_name = next(iter(dataset_infos)) return DatasetModule( module_path, self.commit_hash, builder_kwargs, dataset_infos=dataset_infos, builder_configs_parameters=BuilderConfigsParameters( metadata_configs=metadata_configs, builder_configs=builder_configs, default_config_name=default_config_name, ), ) class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory): """ Get the module of a dataset loaded from parquet files of a dataset repository parquet export. """ def __init__( self, name: str, commit_hash: str, download_config: Optional[DownloadConfig] = None, ): self.name = name self.commit_hash = commit_hash self.download_config = download_config or DownloadConfig() increase_load_count(name) def get_module(self) -> DatasetModule: exported_parquet_files = _dataset_viewer.get_exported_parquet_files( dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token ) exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos( dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token ) dataset_infos = DatasetInfosDict( { config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name]) for config_name in exported_dataset_infos } ) parquet_commit_hash = ( HfApi( endpoint=config.HF_ENDPOINT, token=self.download_config.token, library_name="datasets", library_version=__version__, user_agent=get_datasets_user_agent(self.download_config.user_agent), ) .dataset_info( self.name, revision="refs/convert/parquet", token=self.download_config.token, timeout=100.0, ) .sha ) # fix the revision in case there are new commits in the meantime metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos( parquet_commit_hash=parquet_commit_hash, exported_parquet_files=exported_parquet_files, dataset_infos=dataset_infos, ) module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"] builder_configs, default_config_name = create_builder_configs_from_metadata_configs( module_path, metadata_configs, download_config=self.download_config, ) builder_kwargs = { "repo_id": self.name, "dataset_name": camelcase_to_snakecase(Path(self.name).name), } return DatasetModule( module_path, self.commit_hash, builder_kwargs, dataset_infos=dataset_infos, builder_configs_parameters=BuilderConfigsParameters( metadata_configs=metadata_configs, builder_configs=builder_configs, default_config_name=default_config_name, ), ) class CachedDatasetModuleFactory(_DatasetModuleFactory): """ Get the module of a dataset that has been loaded once already and cached. """ def __init__( self, name: str, cache_dir: Optional[str] = None, ): self.name = name self.cache_dir = cache_dir assert self.name.count("/") <= 1 def get_module(self) -> DatasetModule: cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE)) namespace_and_dataset_name = self.name.split("/") namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1]) cached_relative_path = "___".join(namespace_and_dataset_name) cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path) cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*")) if os.path.isdir(cached_directory_path) ] if cached_directory_paths: builder_kwargs = { "repo_id": self.name, "dataset_name": self.name.split("/")[-1], } warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub" if config.HF_HUB_OFFLINE: warning_msg += " (offline mode is enabled)." logger.warning(warning_msg) return DatasetModule( "datasets.packaged_modules.cache.cache", "auto", {**builder_kwargs, "version": "auto"}, ) raise FileNotFoundError(f"Dataset {self.name} is not cached in {self.cache_dir}") def dataset_module_factory( path: str, revision: Optional[Union[str, Version]] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, data_dir: Optional[str] = None, data_files: Optional[Union[dict, list, str, DataFilesDict]] = None, cache_dir: Optional[str] = None, **download_kwargs, ) -> DatasetModule: """ Download/extract/cache a dataset module. Dataset codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks). Args: path (str): Path or name of the dataset. Depending on ``path``, the dataset builder that is used comes from one of the generic dataset builders (JSON, CSV, Parquet, text etc.). For local datasets: - if ``path`` is a local directory (containing data files only) -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory e.g. ``'./path/to/directory/with/my/csv/data'``. For datasets on the Hugging Face Hub (list all available datasets with ``huggingface_hub.list_datasets()``) - if ``path`` is a dataset repository on the HF hub (containing data files only) -> load a generic dataset builder (csv, text etc.) based on the content of the repository e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files. revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters. download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. data_dir (:obj:`str`, optional): Directory with the data files. Used only if `data_files` is not specified, in which case it's equal to pass `os.path.join(data_dir, "**")` as `data_files`. data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration. cache_dir (`str`, *optional*): Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. <Added version="2.16.0"/> **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied. Returns: DatasetModule """ if download_config is None: download_config = DownloadConfig(**download_kwargs) download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) download_config.extract_compressed_file = True download_config.force_extract = True download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1] if not filename.endswith(".py"): filename = filename + ".py" combined_path = os.path.join(path, filename) # We have several ways to get a dataset builder: # # - if path is the name of a packaged dataset module # -> use the packaged module (json, csv, etc.) # # - if os.path.join(path, name) is a local python file # -> use the module from the python file # - if path is a local directory (but no python file) # -> use a packaged module (csv, text etc.) based on content of the directory # # - if path has one "/" and is dataset repository on the HF hub with a python file # -> the module from the python file in the dataset repository # - if path has one "/" and is dataset repository on the HF hub without a python file # -> use a packaged module (csv, text etc.) based on content of the repository # Try packaged if path in _PACKAGED_DATASETS_MODULES: return PackagedDatasetModuleFactory( path, data_dir=data_dir, data_files=data_files, download_config=download_config, download_mode=download_mode, ).get_module() # Try locally elif path.endswith(filename): raise RuntimeError(f"Dataset scripts are no longer supported, but found {filename}") elif os.path.isfile(combined_path): raise RuntimeError(f"Dataset scripts are no longer supported, but found {filename}") elif os.path.isdir(path): return LocalDatasetModuleFactory( path, data_dir=data_dir, data_files=data_files, download_mode=download_mode ).get_module() # Try remotely elif is_relative_path(path) and path.count("/") <= 1: try: # Get the Dataset Card + get the revision + check authentication all at in one call # We fix the commit_hash in case there are new commits in the meantime api = HfApi( endpoint=config.HF_ENDPOINT, token=download_config.token, library_name="datasets", library_version=__version__, user_agent=get_datasets_user_agent(download_config.user_agent), ) try: _raise_if_offline_mode_is_enabled() dataset_readme_path = api.hf_hub_download( repo_id=path, filename=config.REPOCARD_FILENAME, repo_type="dataset", revision=revision, proxies=download_config.proxies, ) commit_hash = os.path.basename(os.path.dirname(dataset_readme_path)) except LocalEntryNotFoundError as e: if isinstance( e.__cause__, ( OfflineModeIsEnabled, requests.exceptions.Timeout, requests.exceptions.ConnectionError, ), ): raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({e.__class__.__name__})") from e else: raise except EntryNotFoundError: commit_hash = api.dataset_info( path, revision=revision, timeout=100.0, ).sha except ( OfflineModeIsEnabled, requests.exceptions.Timeout, requests.exceptions.ConnectionError, ) as e: raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({e.__class__.__name__})") from e except GatedRepoError as e: message = f"Dataset '{path}' is a gated dataset on the Hub." if e.response.status_code == 401: message += " You must be authenticated to access it." elif e.response.status_code == 403: message += f" Visit the dataset page at https://huggingface.co/datasets/{path} to ask for access." raise DatasetNotFoundError(message) from e except RevisionNotFoundError as e: raise DatasetNotFoundError( f"Revision '{revision}' doesn't exist for dataset '{path}' on the Hub." ) from e except RepositoryNotFoundError as e: raise DatasetNotFoundError(f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed.") from e try: api.hf_hub_download( repo_id=path, filename=filename, repo_type="dataset", revision=commit_hash, proxies=download_config.proxies, ) raise RuntimeError(f"Dataset scripts are no longer supported, but found {filename}") except EntryNotFoundError: # Use the infos from the parquet export except in some cases: if data_dir or data_files or (revision and revision != "main"): use_exported_dataset_infos = False else: use_exported_dataset_infos = True return HubDatasetModuleFactory( path, commit_hash=commit_hash, data_dir=data_dir, data_files=data_files, download_config=download_config, download_mode=download_mode, use_exported_dataset_infos=use_exported_dataset_infos, ).get_module() except GatedRepoError as e: message = f"Dataset '{path}' is a gated dataset on the Hub." if e.response.status_code == 401: message += " You must be authenticated to access it." elif e.response.status_code == 403: message += f" Visit the dataset page at https://huggingface.co/datasets/{path} to ask for access." raise DatasetNotFoundError(message) from e except RevisionNotFoundError as e: raise DatasetNotFoundError( f"Revision '{revision}' doesn't exist for dataset '{path}' on the Hub." ) from e except Exception as e1: # All the attempts failed, before raising the error we should check if the module is already cached try: return CachedDatasetModuleFactory(path, cache_dir=cache_dir).get_module() except Exception: # If it's not in the cache, then it doesn't exist. if isinstance(e1, OfflineModeIsEnabled): raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)): raise e1 from None if isinstance(e1, FileNotFoundError): raise FileNotFoundError( f"Couldn't find any data file at {relative_to_absolute_path(path)}. " f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}" ) from None raise e1 from None else: raise FileNotFoundError(f"Couldn't find any data file at {relative_to_absolute_path(path)}.") def load_dataset_builder( path: str, name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, cache_dir: Optional[str] = None, features: Optional[Features] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, storage_options: Optional[dict] = None, **config_kwargs, ) -> DatasetBuilder: """Load a dataset builder which can be used to: - Inspect general information that is required to build a dataset (cache directory, config, dataset info, features, data files, etc.) - Download and prepare the dataset as Arrow files in the cache - Get a streaming dataset without downloading or caching anything You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. A dataset is a directory that contains some data files in generic formats (JSON, CSV, Parquet, etc.) and possibly in a generic structure (Webdataset, ImageFolder, AudioFolder, VideoFolder, etc.) Args: path (`str`): Path or name of the dataset. - if `path` is a dataset repository on the HF hub (list all available datasets with [`huggingface_hub.list_datasets`]) -> load the dataset builder from supported files in the repository (csv, json, parquet, etc.) e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing the data files. - if `path` is a local directory -> load the dataset builder from supported files in the directory (csv, json, parquet, etc.) e.g. `'./path/to/directory/with/my/csv/data'`. - if `path` is the name of a dataset builder and `data_files` or `data_dir` is specified (available builders are "json", "csv", "parquet", "arrow", "text", "xml", "webdataset", "imagefolder", "audiofolder", "videofolder") -> load the dataset builder from the files in `data_files` or `data_dir` e.g. `'parquet'`. name (`str`, *optional*): Defining the name of the dataset configuration. data_dir (`str`, *optional*): Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). cache_dir (`str`, *optional*): Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. features ([`Features`], *optional*): Set the features type to use for this dataset. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. revision ([`Version`] or `str`, *optional*): Version of the dataset to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. storage_options (`dict`, *optional*, defaults to `None`): **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. <Added version="2.11.0"/> **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. Returns: [`DatasetBuilder`] Example: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder('cornell-movie-review-data/rotten_tomatoes') >>> ds_builder.info.features {'label': ClassLabel(names=['neg', 'pos']), 'text': Value('string')} ``` """ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) if token is not None: download_config = download_config.copy() if download_config else DownloadConfig() download_config.token = token if storage_options is not None: download_config = download_config.copy() if download_config else DownloadConfig() download_config.storage_options.update(storage_options) if features is not None: features = _fix_for_backward_compatible_features(features) dataset_module = dataset_module_factory( path, revision=revision, download_config=download_config, download_mode=download_mode, data_dir=data_dir, data_files=data_files, cache_dir=cache_dir, ) # Get dataset builder class builder_kwargs = dataset_module.builder_kwargs data_dir = builder_kwargs.pop("data_dir", data_dir) data_files = builder_kwargs.pop("data_files", data_files) config_name = builder_kwargs.pop( "config_name", name or dataset_module.builder_configs_parameters.default_config_name ) dataset_name = builder_kwargs.pop("dataset_name", None) info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None if ( path in _PACKAGED_DATASETS_MODULES and data_files is None and dataset_module.builder_configs_parameters.builder_configs[0].data_files is None ): error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder." example_extensions = [ extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path ] if example_extensions: error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`' raise ValueError(error_msg) builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name) # Instantiate the dataset builder builder_instance: DatasetBuilder = builder_cls( cache_dir=cache_dir, dataset_name=dataset_name, config_name=config_name, data_dir=data_dir, data_files=data_files, hash=dataset_module.hash, info=info, features=features, token=token, storage_options=storage_options, **builder_kwargs, **config_kwargs, ) builder_instance._use_legacy_cache_dir_if_possible(dataset_module) return builder_instance def load_dataset( path: str, name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, split: Optional[Union[str, Split, list[str], list[Split]]] = None, cache_dir: Optional[str] = None, features: Optional[Features] = None, download_config: Optional[DownloadConfig] = None, download_mode: Optional[Union[DownloadMode, str]] = None, verification_mode: Optional[Union[VerificationMode, str]] = None, keep_in_memory: Optional[bool] = None, save_infos: bool = False, revision: Optional[Union[str, Version]] = None, token: Optional[Union[bool, str]] = None, streaming: bool = False, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **config_kwargs, ) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]: """Load a dataset from the Hugging Face Hub, or a local dataset. You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. A dataset is a directory that contains some data files in generic formats (JSON, CSV, Parquet, etc.) and possibly in a generic structure (Webdataset, ImageFolder, AudioFolder, VideoFolder, etc.) This function does the following under the hood: 1. Load a dataset builder: * Find the most common data format in the dataset and pick its associated builder (JSON, CSV, Parquet, Webdataset, ImageFolder, AudioFolder, etc.) * Find which file goes into which split (e.g. train/test) based on file and directory names or on the YAML configuration * It is also possible to specify `data_files` manually, and which dataset builder to use (e.g. "parquet"). 2. Run the dataset builder: In the general case: * Download the data files from the dataset if they are not already available locally or cached. * Process and cache the dataset in typed Arrow tables for caching. Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types. They can be directly accessed from disk, loaded in RAM or even streamed over the web. In the streaming case: * Don't download or cache anything. Instead, the dataset is lazily loaded and will be streamed on-the-fly when iterating on it. 3. Return a dataset built from the requested splits in `split` (default: all). Args: path (`str`): Path or name of the dataset. - if `path` is a dataset repository on the HF hub (list all available datasets with [`huggingface_hub.list_datasets`]) -> load the dataset from supported files in the repository (csv, json, parquet, etc.) e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing the data files. - if `path` is a local directory -> load the dataset from supported files in the directory (csv, json, parquet, etc.) e.g. `'./path/to/directory/with/my/csv/data'`. - if `path` is the name of a dataset builder and `data_files` or `data_dir` is specified (available builders are "json", "csv", "parquet", "arrow", "text", "xml", "webdataset", "imagefolder", "audiofolder", "videofolder") -> load the dataset from the files in `data_files` or `data_dir` e.g. `'parquet'`. name (`str`, *optional*): Defining the name of the dataset configuration. data_dir (`str`, *optional*): Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. data_files (`str` or `Sequence` or `Mapping`, *optional*): Path(s) to source data file(s). split (`Split` or `str`): Which split of the data to load. If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`). If given, will return a single Dataset. Splits can be combined and specified like in tensorflow-datasets. cache_dir (`str`, *optional*): Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. features (`Features`, *optional*): Set the features type to use for this dataset. download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): Download/generate mode. verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). <Added version="2.9.1"/> keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. revision ([`Version`] or `str`, *optional*): Version of the dataset to load. As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. token (`str` or `bool`, *optional*): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. If `True`, or not specified, will get token from `"~/.huggingface"`. streaming (`bool`, defaults to `False`): If set to `True`, don't download the data files. Instead, it streams the data progressively while iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case. Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example. Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats like rar and xz are not yet supported. The tgz format doesn't allow streaming. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.7.0"/> storage_options (`dict`, *optional*, defaults to `None`): **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. <Added version="2.11.0"/> **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the `BuilderConfig` and used in the [`DatasetBuilder`]. Returns: [`Dataset`] or [`DatasetDict`]: - if `split` is not `None`: the dataset requested, - if `split` is `None`, a [`~datasets.DatasetDict`] with each split. or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True` - if `split` is not `None`, the dataset is requested - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split. Example: Load a dataset from the Hugging Face Hub: ```py >>> from datasets import load_dataset >>> ds = load_dataset('cornell-movie-review-data/rotten_tomatoes', split='train') # Load a subset or dataset configuration (here 'sst2') >>> from datasets import load_dataset >>> ds = load_dataset('nyu-mll/glue', 'sst2', split='train') # Manual mapping of data files to splits >>> data_files = {'train': 'train.csv', 'test': 'test.csv'} >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files) # Manual selection of a directory to load >>> ds = load_dataset('namespace/your_dataset_name', data_dir='folder_name') ``` Load a local dataset: ```py # Load a CSV file >>> from datasets import load_dataset >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv') # Load a JSON file >>> from datasets import load_dataset >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json') ``` Load an [`~datasets.IterableDataset`]: ```py >>> from datasets import load_dataset >>> ds = load_dataset('cornell-movie-review-data/rotten_tomatoes', split='train', streaming=True) ``` Load an image dataset with the `ImageFolder` dataset builder: ```py >>> from datasets import load_dataset >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train') ``` """ if "trust_remote_code" in config_kwargs: if config_kwargs.pop("trust_remote_code"): logger.error( "`trust_remote_code` is not supported anymore.\n" f"Please check that the Hugging Face dataset '{path}' isn't based on a loading script and remove `trust_remote_code`.\n" "If the dataset is based on a loading script, please ask the dataset author to remove it and convert it to a standard format like Parquet." ) if data_files is not None and not data_files: raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).") if Path(path, config.DATASET_STATE_JSON_FILENAME).exists(): raise ValueError( "You are trying to load a dataset that was saved using `save_to_disk`. " "Please use `load_from_disk` instead." ) if streaming and num_proc is not None: raise NotImplementedError( "Loading a streaming dataset in parallel with `num_proc` is not implemented. " "To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead." ) download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) verification_mode = VerificationMode( (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS ) # Create a dataset builder builder_instance = load_dataset_builder( path=path, name=name, data_dir=data_dir, data_files=data_files, cache_dir=cache_dir, features=features, download_config=download_config, download_mode=download_mode, revision=revision, token=token, storage_options=storage_options, **config_kwargs, ) # Return iterable dataset in case of streaming if streaming: return builder_instance.as_streaming_dataset(split=split) # Download and prepare data builder_instance.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, num_proc=num_proc, storage_options=storage_options, ) # Build dataset for splits keep_in_memory = ( keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) ) ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory) return ds def load_from_disk( dataset_path: PathLike, keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None ) -> Union[Dataset, DatasetDict]: """ Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. Args: dataset_path (`path-like`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset/dataset-dict will be loaded from. keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Returns: [`Dataset`] or [`DatasetDict`]: - If `dataset_path` is a path of a dataset directory: the dataset requested. - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split. Example: ```py >>> from datasets import load_from_disk >>> ds = load_from_disk('path/to/dataset/directory') ``` """ fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_path, **(storage_options or {})) if not fs.exists(dataset_path): raise FileNotFoundError(f"Directory {dataset_path} not found") if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile( posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME) ): return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)): return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) else: raise FileNotFoundError( f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory." )
datasets/src/datasets/load.py/0
{ "file_path": "datasets/src/datasets/load.py", "repo_id": "datasets", "token_count": 28352 }
106
import itertools from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, Optional from typing import List as ListT import numpy as np import pyarrow as pa import datasets from datasets.features.features import ( Array2D, Array3D, Array4D, Array5D, Features, LargeList, List, Value, _ArrayXD, _arrow_to_datasets_dtype, ) from datasets.table import table_cast if TYPE_CHECKING: import h5py logger = datasets.utils.logging.get_logger(__name__) EXTENSIONS = [".h5", ".hdf5"] @dataclass class HDF5Config(datasets.BuilderConfig): """BuilderConfig for HDF5.""" batch_size: Optional[int] = None columns: Optional[ListT[str]] = None features: Optional[datasets.Features] = None def __post_init__(self): super().__post_init__() class HDF5(datasets.ArrowBasedBuilder): """ArrowBasedBuilder that converts HDF5 files to Arrow tables using the HF extension types.""" BUILDER_CONFIG_CLASS = HDF5Config def _info(self): if ( self.config.columns is not None and self.config.features is not None and set(self.config.columns) != set(self.config.features) ): raise ValueError( "The columns and features argument must contain the same columns, but got ", f"{self.config.columns} and {self.config.features}", ) return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): import h5py if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] # Infer features from first file if self.info.features is None: for first_file in itertools.chain.from_iterable(files): with h5py.File(first_file, "r") as h5: dataset_map = _traverse_datasets(h5) features_dict = {} for path, dset in dataset_map.items(): if _is_complex_dtype(dset.dtype): complex_features = _create_complex_features(path, dset) features_dict.update(complex_features) elif _is_compound_dtype(dset.dtype): compound_features = _create_compound_features(path, dset) features_dict.update(compound_features) elif _is_vlen_string_dtype(dset.dtype): features_dict[path] = Value("string") else: feat = _infer_feature_from_dataset(dset) features_dict[path] = feat self.info.features = datasets.Features(features_dict) break splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) if self.config.columns is not None and set(self.config.columns) != set(self.info.features): self.info.features = datasets.Features( {col: feat for col, feat in self.info.features.items() if col in self.config.columns} ) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.info.features is not None: pa_table = table_cast(pa_table, self.info.features.arrow_schema) return pa_table def _generate_tables(self, files): import h5py batch_size_cfg = self.config.batch_size for file_idx, file in enumerate(itertools.chain.from_iterable(files)): try: with h5py.File(file, "r") as h5: dataset_map = _traverse_datasets(h5) if not dataset_map: logger.warning(f"File '{file}' contains no data, skipping...") continue if self.config.columns is not None: filtered_dataset_map = { path: dset for path, dset in dataset_map.items() if path in self.config.columns } if not filtered_dataset_map: logger.warning( f"No datasets match the specified columns {self.config.columns}, skipping..." ) continue dataset_map = filtered_dataset_map # Sanity-check lengths for selected datasets first_dset = next(iter(dataset_map.values())) num_rows = first_dset.shape[0] for path, dset in dataset_map.items(): if dset.shape[0] != num_rows: raise ValueError( f"Dataset '{path}' length {dset.shape[0]} differs from {num_rows} in file '{file}'" ) effective_batch = batch_size_cfg or self._writer_batch_size or num_rows for start in range(0, num_rows, effective_batch): end = min(start + effective_batch, num_rows) batch_dict = {} for path, dset in dataset_map.items(): arr = dset[start:end] # Handle variable-length arrays if _is_vlen_string_dtype(dset.dtype): logger.debug( f"Converting variable-length string data for '{path}' (shape: {arr.shape})" ) batch_dict[path] = _convert_vlen_string_to_array(arr) elif ( hasattr(dset.dtype, "metadata") and dset.dtype.metadata and "vlen" in dset.dtype.metadata ): # Handle other variable-length types (non-strings) pa_arr = datasets.features.features.numpy_to_pyarrow_listarray(arr) batch_dict[path] = pa_arr elif _is_complex_dtype(dset.dtype): batch_dict.update(_convert_complex_to_nested(path, arr, dset)) elif _is_compound_dtype(dset.dtype): batch_dict.update(_convert_compound_to_nested(path, arr, dset)) elif dset.dtype.kind == "O": raise ValueError( f"Object dtype dataset '{path}' is not supported. " f"For variable-length data, please use h5py.vlen_dtype() " f"when creating the HDF5 file. " f"See: https://docs.h5py.org/en/stable/special.html#variable-length-strings" ) else: # If any non-batch dimension is zero, emit an unsized pa.list_ # to avoid creating FixedSizeListArray with list_size=0. if any(dim == 0 for dim in dset.shape[1:]): inner_type = pa.from_numpy_dtype(dset.dtype) pa_arr = pa.array([[] for _ in arr], type=pa.list_(inner_type)) else: pa_arr = datasets.features.features.numpy_to_pyarrow_listarray(arr) batch_dict[path] = pa_arr pa_table = pa.Table.from_pydict(batch_dict) yield f"{file_idx}_{start}", self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise def _traverse_datasets(h5_obj, prefix: str = "") -> Dict[str, "h5py.Dataset"]: import h5py mapping: Dict[str, h5py.Dataset] = {} def collect_datasets(name, obj): if isinstance(obj, h5py.Dataset): full_path = f"{prefix}{name}" if prefix else name mapping[full_path] = obj h5_obj.visititems(collect_datasets) return mapping # ┌───────────┐ # │ Complex │ # └───────────┘ def _is_complex_dtype(dtype: np.dtype) -> bool: """Check if dtype is a complex number type.""" return dtype.kind == "c" def _create_complex_features(base_path: str, dset: "h5py.Dataset") -> Dict[str, Features]: """Create Features for complex data with real and imaginary parts `real` and `imag`. NOTE: Always uses float64 for the real and imaginary parts. """ logger.info( f"Complex dataset '{base_path}' (dtype: {dset.dtype}) represented as nested structure with 'real' and 'imag' fields" ) nested_features = Features( { "real": Value("float64"), "imag": Value("float64"), } ) return {base_path: nested_features} def _convert_complex_to_nested(base_path: str, arr: np.ndarray, dset: "h5py.Dataset") -> Dict[str, pa.Array]: """Convert complex to Features with real and imaginary parts `real` and `imag`.""" result = {} def _convert_complex_scalar(complex_val): """Convert a complex scalar to a dictionary.""" if complex_val.size == 1: return {"real": float(complex_val.item().real), "imag": float(complex_val.item().imag)} else: # For multi-dimensional arrays, convert to list return {"real": complex_val.real.tolist(), "imag": complex_val.imag.tolist()} result[base_path] = pa.array([_convert_complex_scalar(complex_val) for complex_val in arr]) return result # ┌────────────┐ # │ Compound │ # └────────────┘ def _is_compound_dtype(dtype: np.dtype) -> bool: """Check if dtype is a compound/structured type.""" return dtype.names is not None class _MockDataset: def __init__(self, dtype): self.dtype = dtype self.names = dtype.names def _create_compound_features(base_path: str, dset: "h5py.Dataset") -> Dict[str, Features]: """Create nested features for compound data with field names as keys.""" field_names = list(dset.dtype.names) logger.info( f"Compound dataset '{base_path}' (dtype: {dset.dtype}) represented as nested Features with fields: {field_names}" ) nested_features_dict = {} for field_name in field_names: field_dtype = dset.dtype[field_name] if _is_complex_dtype(field_dtype): nested_features_dict[field_name] = Features( { "real": Value("float64"), "imag": Value("float64"), } ) elif _is_compound_dtype(field_dtype): mock_dset = _MockDataset(field_dtype) nested_features_dict[field_name] = _create_compound_features(field_name, mock_dset)[field_name] else: nested_features_dict[field_name] = _np_to_pa_to_hf_value(field_dtype) nested_features = Features(nested_features_dict) return {base_path: nested_features} def _convert_compound_to_nested(base_path: str, arr: np.ndarray, dset: "h5py.Dataset") -> Dict[str, pa.Array]: """Convert compound array to nested structure with field names as keys.""" result = {} def _convert_compound_recursive(compound_arr, compound_dtype): """Recursively convert compound array to nested structure.""" nested_data = [] for row in compound_arr: row_dict = {} for field_name in compound_dtype.names: field_dtype = compound_dtype[field_name] field_data = row[field_name] if _is_complex_dtype(field_dtype): row_dict[field_name] = {"real": float(field_data.real), "imag": float(field_data.imag)} elif _is_compound_dtype(field_dtype): row_dict[field_name] = _convert_compound_recursive([field_data], field_dtype)[0] else: row_dict[field_name] = field_data.item() if field_data.size == 1 else field_data.tolist() nested_data.append(row_dict) return nested_data result[base_path] = pa.array(_convert_compound_recursive(arr, dset.dtype)) return result # ┌───────────────────────────┐ # │ Variable-Length Strings │ # └───────────────────────────┘ def _is_vlen_string_dtype(dtype: np.dtype) -> bool: """Check if dtype is a variable-length string type.""" if hasattr(dtype, "metadata") and dtype.metadata and "vlen" in dtype.metadata: vlen_dtype = dtype.metadata["vlen"] return vlen_dtype in (str, bytes) return False def _convert_vlen_string_to_array(arr: np.ndarray) -> pa.Array: list_of_items = [] for item in arr: if isinstance(item, bytes): logger.info("Assuming variable-length bytes are utf-8 encoded strings") list_of_items.append(item.decode("utf-8")) elif isinstance(item, str): list_of_items.append(item) else: raise ValueError(f"Unsupported variable-length string type: {type(item)}") return pa.array(list_of_items) # ┌───────────┐ # │ Generic │ # └───────────┘ def _infer_feature_from_dataset(dset: "h5py.Dataset"): # non-string varlen if hasattr(dset.dtype, "metadata") and dset.dtype.metadata and "vlen" in dset.dtype.metadata: vlen_dtype = dset.dtype.metadata["vlen"] inner_feature = _np_to_pa_to_hf_value(vlen_dtype) return List(inner_feature) value_feature = _np_to_pa_to_hf_value(dset.dtype) dtype_str = value_feature.dtype dset_shape = dset.shape[1:] if any(dim == 0 for dim in dset_shape): logger.warning( f"HDF5 to Arrow: Found a dataset named '{dset.name}' with shape {dset_shape} and dtype {dtype_str} that has a dimension with size 0. Shape information will be lost in the conversion to List({value_feature})." ) return List(value_feature) rank = len(dset_shape) if rank == 0: return value_feature elif rank == 1: return List(value_feature, length=dset_shape[0]) elif rank <= 5: return _sized_arrayxd(rank)(shape=dset_shape, dtype=dtype_str) else: raise TypeError(f"Array{rank}D not supported. Maximum 5 dimensions allowed.") def _has_zero_dimensions(feature): if isinstance(feature, _ArrayXD): return any(dim == 0 for dim in feature.shape) elif isinstance(feature, List): # also gets regular List return feature.length == 0 or _has_zero_dimensions(feature.feature) elif isinstance(feature, LargeList): return _has_zero_dimensions(feature.feature) else: return False def _sized_arrayxd(rank: int): return {2: Array2D, 3: Array3D, 4: Array4D, 5: Array5D}[rank] def _np_to_pa_to_hf_value(numpy_dtype: np.dtype) -> Value: return Value(dtype=_arrow_to_datasets_dtype(pa.from_numpy_dtype(numpy_dtype)))
datasets/src/datasets/packaged_modules/hdf5/hdf5.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/hdf5/hdf5.py", "repo_id": "datasets", "token_count": 7787 }
107
import itertools from dataclasses import dataclass from io import StringIO from typing import Optional import pyarrow as pa import datasets from datasets.features.features import require_storage_cast from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class TextConfig(datasets.BuilderConfig): """BuilderConfig for text files.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" encoding_errors: Optional[str] = None chunksize: int = 10 << 20 # 10MB keep_linebreaks: bool = False sample_by: str = "line" class Text(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = TextConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. If str or List[str], then the dataset returns only the 'train' split. If dict, then keys should be from the `datasets.Split` enum. """ if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa_table.cast(schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table else: return pa_table.cast(pa.schema({"text": pa.string()})) def _generate_tables(self, files): pa_table_names = list(self.config.features) if self.config.features is not None else ["text"] for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n" with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: if self.config.sample_by == "line": batch_idx = 0 while True: batch = f.read(self.config.chunksize) if not batch: break batch += f.readline() # finish current line # StringIO.readlines, by default splits only on "\n" (and keeps line breaks) batch = StringIO(batch).readlines() if not self.config.keep_linebreaks: batch = [line.rstrip("\n") for line in batch] pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 elif self.config.sample_by == "paragraph": batch_idx = 0 batch = "" while True: new_batch = f.read(self.config.chunksize) if not new_batch: break batch += new_batch batch += f.readline() # finish current line batch = batch.split("\n\n") pa_table = pa.Table.from_arrays( [pa.array([example for example in batch[:-1] if example])], names=pa_table_names ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 batch = batch[-1] if batch: pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) yield (file_idx, batch_idx), self._cast_table(pa_table) elif self.config.sample_by == "document": text = f.read() pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) yield file_idx, self._cast_table(pa_table)
datasets/src/datasets/packaged_modules/text/text.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/text/text.py", "repo_id": "datasets", "token_count": 2703 }
108
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Extends `dill` to support pickling more types and produce more consistent dumps.""" import os import sys from io import BytesIO from types import CodeType, FunctionType import dill from packaging import version from .. import config class Pickler(dill.Pickler): dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy()) _legacy_no_dict_keys_sorting = False def save(self, obj, save_persistent_id=True): obj_type = type(obj) if obj_type not in self.dispatch: if "regex" in sys.modules: import regex # type: ignore if obj_type is regex.Pattern: pklregister(obj_type)(_save_regexPattern) if "spacy" in sys.modules: import spacy # type: ignore if issubclass(obj_type, spacy.Language): pklregister(obj_type)(_save_spacyLanguage) if "tiktoken" in sys.modules: import tiktoken # type: ignore if obj_type is tiktoken.Encoding: pklregister(obj_type)(_save_tiktokenEncoding) if "torch" in sys.modules: import torch # type: ignore if issubclass(obj_type, torch.Tensor): pklregister(obj_type)(_save_torchTensor) if obj_type is torch.Generator: pklregister(obj_type)(_save_torchGenerator) # Unwrap `torch.compile`-ed modules if issubclass(obj_type, torch.nn.Module): obj = getattr(obj, "_orig_mod", obj) if "transformers" in sys.modules: import transformers # type: ignore if issubclass(obj_type, transformers.PreTrainedTokenizerBase): pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase) # Unwrap `torch.compile`-ed functions if obj_type is FunctionType: obj = getattr(obj, "_torchdynamo_orig_callable", obj) dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id) def _batch_setitems(self, items): if self._legacy_no_dict_keys_sorting: return super()._batch_setitems(items) # Ignore the order of keys in a dict try: # Faster, but fails for unorderable elements items = sorted(items) except Exception: # TypeError, decimal.InvalidOperation, etc. from datasets.fingerprint import Hasher items = sorted(items, key=lambda x: Hasher.hash(x[0])) dill.Pickler._batch_setitems(self, items) def memoize(self, obj): # Don't memoize strings since two identical strings can have different Python ids if type(obj) is not str: # noqa: E721 dill.Pickler.memoize(self, obj) def pklregister(t): """Register a custom reducer for the type.""" def proxy(func): Pickler.dispatch[t] = func return func return proxy def dump(obj, file): """Pickle an object to a file.""" Pickler(file, recurse=True).dump(obj) def dumps(obj): """Pickle an object to a string.""" file = BytesIO() dump(obj, file) return file.getvalue() if config.DILL_VERSION < version.parse("0.3.6"): def log(pickler, msg): dill._dill.log.info(msg) elif config.DILL_VERSION.release[:3] in [ version.parse("0.3.6").release, version.parse("0.3.7").release, version.parse("0.3.8").release, ]: def log(pickler, msg): dill._dill.logger.trace(pickler, msg) @pklregister(set) def _save_set(pickler, obj): log(pickler, f"Se: {obj}") try: # Faster, but fails for unorderable elements args = (sorted(obj),) except Exception: # TypeError, decimal.InvalidOperation, etc. from datasets.fingerprint import Hasher args = (sorted(obj, key=Hasher.hash),) pickler.save_reduce(set, args, obj=obj) log(pickler, "# Se") def _save_regexPattern(pickler, obj): import regex # type: ignore log(pickler, f"Re: {obj}") args = (obj.pattern, obj.flags) pickler.save_reduce(regex.compile, args, obj=obj) log(pickler, "# Re") def _save_tiktokenEncoding(pickler, obj): import tiktoken # type: ignore log(pickler, f"Enc: {obj}") args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens) pickler.save_reduce(tiktoken.Encoding, args, obj=obj) log(pickler, "# Enc") def _save_torchTensor(pickler, obj): import torch # type: ignore # `torch.from_numpy` is not picklable in `torch>=1.11.0` def create_torchTensor(np_array, dtype=None): tensor = torch.from_numpy(np_array) if dtype: tensor = tensor.type(dtype) return tensor log(pickler, f"To: {obj}") if obj.dtype == torch.bfloat16: args = (obj.detach().to(torch.float).cpu().numpy(), torch.bfloat16) else: args = (obj.detach().cpu().numpy(),) pickler.save_reduce(create_torchTensor, args, obj=obj) log(pickler, "# To") def _save_torchGenerator(pickler, obj): import torch # type: ignore def create_torchGenerator(state): generator = torch.Generator() generator.set_state(state) return generator log(pickler, f"Ge: {obj}") args = (obj.get_state(),) pickler.save_reduce(create_torchGenerator, args, obj=obj) log(pickler, "# Ge") def _save_spacyLanguage(pickler, obj): import spacy # type: ignore def create_spacyLanguage(config, bytes): lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"]) lang_inst = lang_cls.from_config(config) return lang_inst.from_bytes(bytes) log(pickler, f"Sp: {obj}") args = (obj.config, obj.to_bytes()) pickler.save_reduce(create_spacyLanguage, args, obj=obj) log(pickler, "# Sp") def _save_transformersPreTrainedTokenizerBase(pickler, obj): log(pickler, f"Tok: {obj}") # Ignore the `cache` attribute state = obj.__dict__ if "cache" in state and isinstance(state["cache"], dict): state["cache"] = {} pickler.save_reduce(type(obj), (), state=state, obj=obj) log(pickler, "# Tok") if config.DILL_VERSION < version.parse("0.3.6"): @pklregister(CodeType) def _save_code(pickler, obj): """ From dill._dill.save_code This is a modified version that removes the origin (filename + line no.) of functions created in notebooks or shells for example. """ dill._dill.log.info(f"Co: {obj}") # The filename of a function is the .py file where it is defined. # Filenames of functions created in notebooks or shells start with '<' # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell # Filenames of functions created in ipykernel the filename # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" # Moreover lambda functions have a special name: '<lambda>' # ex: (lambda x: x).__code__.co_name == "<lambda>" # True # # For the hashing mechanism we ignore where the function has been defined # More specifically: # - we ignore the filename of special functions (filename starts with '<') # - we always ignore the line number # - we only use the base name of the file instead of the whole path, # to be robust in case a script is moved for example. # # Only those two lines are different from the original implementation: co_filename = ( "" if obj.co_filename.startswith("<") or ( len(obj.co_filename.split(os.path.sep)) > 1 and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") ) or obj.co_name == "<lambda>" else os.path.basename(obj.co_filename) ) co_firstlineno = 1 # The rest is the same as in the original dill implementation (with also a version check for 3.10) if dill._dill.PY3: if hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args) args = ( obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_linetable if sys.version_info >= (3, 10) else obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: # python 3.7 (15 args) args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) pickler.save_reduce(CodeType, args, obj=obj) dill._dill.log.info("# Co") return elif config.DILL_VERSION.release[:3] in [ version.parse("0.3.6").release, version.parse("0.3.7").release, version.parse("0.3.8").release, ]: # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104 @pklregister(CodeType) def save_code(pickler, obj): dill._dill.logger.trace(pickler, "Co: %s", obj) ############################################################################################################ # Modification here for huggingface/datasets # The filename of a function is the .py file where it is defined. # Filenames of functions created in notebooks or shells start with '<' # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell # Filenames of functions created in ipykernel the filename # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" # Moreover lambda functions have a special name: '<lambda>' # ex: (lambda x: x).__code__.co_name == "<lambda>" # True # # For the hashing mechanism we ignore where the function has been defined # More specifically: # - we ignore the filename of special functions (filename starts with '<') # - we always ignore the line number # - we only use the base name of the file instead of the whole path, # to be robust in case a script is moved for example. # # Only those two lines are different from the original implementation: co_filename = ( "" if obj.co_filename.startswith("<") or ( len(obj.co_filename.split(os.path.sep)) > 1 and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") ) or obj.co_name == "<lambda>" else os.path.basename(obj.co_filename) ) co_firstlineno = 1 # The rest is the same as in the original dill implementation, except for the replacements: # - obj.co_filename => co_filename # - obj.co_firstlineno => co_firstlineno # - obj.co_lnotab => obj.co_linetable for >= 3.10 since co_lnotab was deprecated ############################################################################################################ if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args) args = ( obj.co_linetable, # Modification for huggingface/datasets ############################################ obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, obj.co_qualname, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_endlinetable, obj.co_columntable, obj.co_exceptiontable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args) args = ( obj.co_linetable, # Modification for huggingface/datasets ####################################### obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, obj.co_qualname, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_exceptiontable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_linetable"): # python 3.10 (16 args) args = ( obj.co_linetable, # Modification for huggingface/datasets ####################################### obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args) args = ( obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: # python 3.7 (15 args) args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) pickler.save_reduce(dill._dill._create_code, args, obj=obj) dill._dill.logger.trace(pickler, "# Co") return
datasets/src/datasets/utils/_dill.py/0
{ "file_path": "datasets/src/datasets/utils/_dill.py", "repo_id": "datasets", "token_count": 8586 }
109
{ "code": "Programming language (C++, Java, Javascript, Python, etc.)", "aa": "Afar", "aaa": "Ghotuo", "aab": "Alumu-Tesu", "aac": "Ari", "aad": "Amal", "aae": "Arbëreshë Albanian", "aaf": "Aranadan", "aag": "Ambrak", "aah": "Abu' Arapesh", "aai": "Arifama-Miniafia", "aak": "Ankave", "aal": "Afade", "aan": "Anambé", "aao": "Algerian Saharan Arabic", "aap": "Pará Arára", "aaq": "Eastern Abnaki", "aas": "Aasáx", "aat": "Arvanitika Albanian", "aau": "Abau", "aav": "Austro-Asiatic languages", "aaw": "Solong", "aax": "Mandobo Atas", "aaz": "Amarasi", "ab": "Abkhazian", "aba": "Abé", "abb": "Bankon", "abc": "Ambala Ayta", "abd": "Manide", "abe": "Western Abnaki", "abf": "Abai Sungai", "abg": "Abaga", "abh": "Tajiki Arabic", "abi": "Abidji", "abj": "Aka-Bea", "abl": "Lampung Nyo", "abm": "Abanyom", "abn": "Abua", "abo": "Abon", "abp": "Abellen Ayta", "abq": "Abaza", "abr": "Abron", "abs": "Ambonese Malay", "abt": "Ambulas", "abu": "Abure", "abv": "Baharna Arabic", "abw": "Pal", "abx": "Inabaknon", "aby": "Aneme Wake", "abz": "Abui", "aca": "Achagua", "acb": "Áncá", "acd": "Gikyode", "ace": "Achinese", "acf": "Saint Lucian Creole French", "ach": "Acoli", "aci": "Aka-Cari", "ack": "Aka-Kora", "acl": "Akar-Bale", "acm": "Mesopotamian Arabic", "acn": "Achang", "acp": "Eastern Acipa", "acq": "Ta'izzi-Adeni Arabic", "acr": "Achi", "acs": "Acroá", "act": "Achterhoeks", "acu": "Achuar-Shiwiar", "acv": "Achumawi", "acw": "Hijazi Arabic", "acx": "Omani Arabic", "acy": "Cypriot Arabic", "acz": "Acheron", "ada": "Adangme", "adb": "Atauran", "add": "Lidzonka; Dzodinka", "ade": "Adele", "adf": "Dhofari Arabic", "adg": "Andegerebinha", "adh": "Adhola", "adi": "Adi", "adj": "Adioukrou", "adl": "Galo", "adn": "Adang", "ado": "Abu", "adq": "Adangbe", "adr": "Adonara", "ads": "Adamorobe Sign Language", "adt": "Adnyamathanha", "adu": "Aduge", "adw": "Amundava", "adx": "Amdo Tibetan", "ady": "Adyghe; Adygei", "adz": "Adzera", "ae": "Avestan", "aea": "Areba", "aeb": "Tunisian Arabic", "aec": "Saidi Arabic", "aed": "Argentine Sign Language", "aee": "Northeast Pashai; Northeast Pashayi", "aek": "Haeke", "ael": "Ambele", "aem": "Arem", "aen": "Armenian Sign Language", "aeq": "Aer", "aer": "Eastern Arrernte", "aes": "Alsea", "aeu": "Akeu", "aew": "Ambakich", "aey": "Amele", "aez": "Aeka", "af": "Afrikaans", "afa": "Afro-Asiatic languages", "afb": "Gulf Arabic", "afd": "Andai", "afe": "Putukwam", "afg": "Afghan Sign Language", "afh": "Afrihili", "afi": "Akrukay; Chini", "afk": "Nanubae", "afn": "Defaka", "afo": "Eloyi", "afp": "Tapei", "afs": "Afro-Seminole Creole", "aft": "Afitti", "afu": "Awutu", "afz": "Obokuitai", "aga": "Aguano", "agb": "Legbo", "agc": "Agatu", "agd": "Agarabi", "age": "Angal", "agf": "Arguni", "agg": "Angor", "agh": "Ngelima", "agi": "Agariya", "agj": "Argobba", "agk": "Isarog Agta", "agl": "Fembe", "agm": "Angaataha", "agn": "Agutaynen", "ago": "Tainae", "agq": "Aghem", "agr": "Aguaruna", "ags": "Esimbi", "agt": "Central Cagayan Agta", "agu": "Aguacateco", "agv": "Remontado Dumagat", "agw": "Kahua", "agx": "Aghul", "agy": "Southern Alta", "agz": "Mt. Iriga Agta", "aha": "Ahanta", "ahb": "Axamb", "ahg": "Qimant", "ahh": "Aghu", "ahi": "Tiagbamrin Aizi", "ahk": "Akha", "ahl": "Igo", "ahm": "Mobumrin Aizi", "ahn": "Àhàn", "aho": "Ahom", "ahp": "Aproumu Aizi", "ahr": "Ahirani", "ahs": "Ashe", "aht": "Ahtena", "aia": "Arosi", "aib": "Ainu (China)", "aic": "Ainbai", "aid": "Alngith", "aie": "Amara", "aif": "Agi", "aig": "Antigua and Barbuda Creole English", "aih": "Ai-Cham", "aii": "Assyrian Neo-Aramaic", "aij": "Lishanid Noshan", "aik": "Ake", "ail": "Aimele", "aim": "Aimol", "ain": "Ainu (Japan)", "aio": "Aiton", "aip": "Burumakok", "aiq": "Aimaq", "air": "Airoran", "ait": "Arikem", "aiw": "Aari", "aix": "Aighon", "aiy": "Ali", "aja": "Aja (South Sudan)", "ajg": "Aja (Benin)", "aji": "Ajië", "ajn": "Andajin", "ajp": "South Levantine Arabic", "ajs": "Algerian Jewish Sign Language", "aju": "Judeo-Moroccan Arabic", "ajw": "Ajawa", "ajz": "Amri Karbi", "ak": "Akan", "akb": "Batak Angkola", "akc": "Mpur", "akd": "Ukpet-Ehom", "ake": "Akawaio", "akf": "Akpa", "akg": "Anakalangu", "akh": "Angal Heneng", "aki": "Aiome", "akj": "Aka-Jeru", "akk": "Akkadian", "akl": "Aklanon", "akm": "Aka-Bo", "ako": "Akurio", "akp": "Siwu", "akq": "Ak", "akr": "Araki", "aks": "Akaselem", "akt": "Akolet", "aku": "Akum", "akv": "Akhvakh", "akw": "Akwa", "akx": "Aka-Kede", "aky": "Aka-Kol", "akz": "Alabama", "ala": "Alago", "alc": "Qawasqar", "ald": "Alladian", "ale": "Aleut", "alf": "Alege", "alg": "Algonquian languages", "alh": "Alawa", "ali": "Amaimon", "alj": "Alangan", "alk": "Alak", "all": "Allar", "alm": "Amblong", "aln": "Gheg Albanian", "alo": "Larike-Wakasihu", "alp": "Alune", "alq": "Algonquin", "alr": "Alutor", "als": "Tosk Albanian", "alt": "Southern Altai", "alu": "'Are'are", "alv": "Atlantic-Congo languages", "alw": "Alaba-K’abeena; Wanbasana", "alx": "Amol", "aly": "Alyawarr", "alz": "Alur", "am": "Amharic", "ama": "Amanayé", "amb": "Ambo", "amc": "Amahuaca", "ame": "Yanesha'", "amf": "Hamer-Banna", "amg": "Amurdak", "ami": "Amis", "amj": "Amdang", "amk": "Ambai", "aml": "War-Jaintia", "amm": "Ama (Papua New Guinea)", "amn": "Amanab", "amo": "Amo", "amp": "Alamblak", "amq": "Amahai", "amr": "Amarakaeri", "ams": "Southern Amami-Oshima", "amt": "Amto", "amu": "Guerrero Amuzgo", "amv": "Ambelau", "amw": "Western Neo-Aramaic", "amx": "Anmatyerre", "amy": "Ami", "amz": "Atampaya", "an": "Aragonese", "ana": "Andaqui", "anb": "Andoa", "anc": "Ngas", "and": "Ansus", "ane": "Xârâcùù", "anf": "Animere", "ang": "Old English (ca. 450-1100)", "anh": "Nend", "ani": "Andi", "anj": "Anor", "ank": "Goemai", "anl": "Anu-Hkongso Chin", "anm": "Anal", "ann": "Obolo", "ano": "Andoque", "anp": "Angika", "anq": "Jarawa (India)", "anr": "Andh", "ans": "Anserma", "ant": "Antakarinya; Antikarinya", "anu": "Anuak", "anv": "Denya", "anw": "Anaang", "anx": "Andra-Hus", "any": "Anyin", "anz": "Anem", "aoa": "Angolar", "aob": "Abom", "aoc": "Pemon", "aod": "Andarum", "aoe": "Angal Enen", "aof": "Bragat", "aog": "Angoram", "aoi": "Anindilyakwa", "aoj": "Mufian", "aok": "Arhö", "aol": "Alor", "aom": "Ömie", "aon": "Bumbita Arapesh", "aor": "Aore", "aos": "Taikat", "aot": "Atong (India); A'tong", "aou": "A'ou", "aox": "Atorada", "aoz": "Uab Meto", "apa": "Apache languages", "apb": "Sa'a", "apc": "North Levantine Arabic", "apd": "Sudanese Arabic", "ape": "Bukiyip", "apf": "Pahanan Agta", "apg": "Ampanang", "aph": "Athpariya", "api": "Apiaká", "apj": "Jicarilla Apache", "apk": "Kiowa Apache", "apl": "Lipan Apache", "apm": "Mescalero-Chiricahua Apache", "apn": "Apinayé", "apo": "Ambul", "app": "Apma", "apq": "A-Pucikwar", "apr": "Arop-Lokep", "aps": "Arop-Sissano", "apt": "Apatani", "apu": "Apurinã", "apv": "Alapmunte", "apw": "Western Apache", "apx": "Aputai", "apy": "Apalaí", "apz": "Safeyoka", "aqa": "Alacalufan languages", "aqc": "Archi", "aqd": "Ampari Dogon", "aqg": "Arigidi", "aqk": "Aninka", "aql": "Algic languages", "aqm": "Atohwaim", "aqn": "Northern Alta", "aqp": "Atakapa", "aqr": "Arhâ", "aqt": "Angaité", "aqz": "Akuntsu", "ar": "Arabic", "arb": "Standard Arabic", "arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)", "ard": "Arabana", "are": "Western Arrarnta", "arh": "Arhuaco", "ari": "Arikara", "arj": "Arapaso", "ark": "Arikapú", "arl": "Arabela", "arn": "Mapudungun; Mapuche", "aro": "Araona", "arp": "Arapaho", "arq": "Algerian Arabic", "arr": "Karo (Brazil)", "ars": "Najdi Arabic", "art": "Artificial languages", "aru": "Aruá (Amazonas State); Arawá", "arv": "Arbore", "arw": "Arawak", "arx": "Aruá (Rodonia State)", "ary": "Moroccan Arabic", "arz": "Egyptian Arabic", "as": "Assamese", "asa": "Asu (Tanzania)", "asb": "Assiniboine", "asc": "Casuarina Coast Asmat", "ase": "American Sign Language", "asf": "Auslan; Australian Sign Language", "asg": "Cishingini", "ash": "Abishira", "asi": "Buruwai", "asj": "Sari", "ask": "Ashkun", "asl": "Asilulu", "asn": "Xingú Asuriní", "aso": "Dano", "asp": "Algerian Sign Language", "asq": "Austrian Sign Language", "asr": "Asuri", "ass": "Ipulo", "ast": "Asturian; Asturleonese; Bable; Leonese", "asu": "Tocantins Asurini", "asv": "Asoa", "asw": "Australian Aborigines Sign Language", "asx": "Muratayak", "asy": "Yaosakor Asmat", "asz": "As", "ata": "Pele-Ata", "atb": "Zaiwa", "atc": "Atsahuaca", "atd": "Ata Manobo", "ate": "Atemble", "atg": "Ivbie North-Okpela-Arhe", "ath": "Athapascan languages", "ati": "Attié", "atj": "Atikamekw", "atk": "Ati", "atl": "Mt. Iraya Agta", "atm": "Ata", "atn": "Ashtiani", "ato": "Atong (Cameroon)", "atp": "Pudtol Atta", "atq": "Aralle-Tabulahan", "atr": "Waimiri-Atroari", "ats": "Gros Ventre", "att": "Pamplona Atta", "atu": "Reel", "atv": "Northern Altai", "atw": "Atsugewi", "atx": "Arutani", "aty": "Aneityum", "atz": "Arta", "aua": "Asumboa", "aub": "Alugu", "auc": "Waorani", "aud": "Anuta", "auf": "Arauan languages", "aug": "Aguna", "auh": "Aushi", "aui": "Anuki", "auj": "Awjilah", "auk": "Heyo", "aul": "Aulua", "aum": "Asu (Nigeria)", "aun": "Molmo One", "auo": "Auyokawa", "aup": "Makayam", "auq": "Anus; Korur", "aur": "Aruek", "aus": "Australian languages", "aut": "Austral", "auu": "Auye", "auw": "Awyi", "aux": "Aurá", "auy": "Awiyaana", "auz": "Uzbeki Arabic", "av": "Avaric", "avb": "Avau", "avd": "Alviri-Vidari", "avi": "Avikam", "avk": "Kotava", "avl": "Eastern Egyptian Bedawi Arabic", "avm": "Angkamuthi", "avn": "Avatime", "avo": "Agavotaguerra", "avs": "Aushiri", "avt": "Au", "avu": "Avokaya", "avv": "Avá-Canoeiro", "awa": "Awadhi", "awb": "Awa (Papua New Guinea)", "awc": "Cicipu", "awd": "Arawakan languages", "awe": "Awetí", "awg": "Anguthimri", "awh": "Awbono", "awi": "Aekyom", "awk": "Awabakal", "awm": "Arawum", "awn": "Awngi", "awo": "Awak", "awr": "Awera", "aws": "South Awyu", "awt": "Araweté", "awu": "Central Awyu", "awv": "Jair Awyu", "aww": "Awun", "awx": "Awara", "awy": "Edera Awyu", "axb": "Abipon", "axe": "Ayerrerenge", "axg": "Mato Grosso Arára", "axk": "Yaka (Central African Republic)", "axl": "Lower Southern Aranda", "axm": "Middle Armenian", "axx": "Xârâgurè", "ay": "Aymara", "aya": "Awar", "ayb": "Ayizo Gbe", "ayc": "Southern Aymara", "ayd": "Ayabadhu", "aye": "Ayere", "ayg": "Ginyanga", "ayh": "Hadrami Arabic", "ayi": "Leyigha", "ayk": "Akuku", "ayl": "Libyan Arabic", "ayn": "Sanaani Arabic", "ayo": "Ayoreo", "ayp": "North Mesopotamian Arabic", "ayq": "Ayi (Papua New Guinea)", "ayr": "Central Aymara", "ays": "Sorsogon Ayta", "ayt": "Magbukun Ayta", "ayu": "Ayu", "ayz": "Mai Brat", "az": "Azerbaijani", "aza": "Azha", "azb": "South Azerbaijani", "azc": "Uto-Aztecan languages", "azd": "Eastern Durango Nahuatl", "azg": "San Pedro Amuzgos Amuzgo", "azj": "North Azerbaijani", "azm": "Ipalapa Amuzgo", "azn": "Western Durango Nahuatl", "azo": "Awing", "azt": "Faire Atta", "azz": "Highland Puebla Nahuatl", "ba": "Bashkir", "baa": "Babatana", "bab": "Bainouk-Gunyuño", "bac": "Badui", "bad": "Banda languages", "bae": "Baré", "baf": "Nubaca", "bag": "Tuki", "bah": "Bahamas Creole English", "bai": "Bamileke languages", "baj": "Barakai", "bal": "Baluchi", "ban": "Balinese", "bao": "Waimaha", "bap": "Bantawa", "bar": "Bavarian", "bas": "Basa (Cameroon)", "bat": "Baltic languages", "bau": "Bada (Nigeria)", "bav": "Vengo", "baw": "Bambili-Bambui", "bax": "Bamun", "bay": "Batuley", "bba": "Baatonum", "bbb": "Barai", "bbc": "Batak Toba", "bbd": "Bau", "bbe": "Bangba", "bbf": "Baibai", "bbg": "Barama", "bbh": "Bugan", "bbi": "Barombi", "bbj": "Ghomálá'", "bbk": "Babanki", "bbl": "Bats", "bbm": "Babango", "bbn": "Uneapa", "bbo": "Northern Bobo Madaré; Konabéré", "bbp": "West Central Banda", "bbq": "Bamali", "bbr": "Girawa", "bbs": "Bakpinka", "bbt": "Mburku", "bbu": "Kulung (Nigeria)", "bbv": "Karnai", "bbw": "Baba", "bbx": "Bubia", "bby": "Befang", "bca": "Central Bai", "bcb": "Bainouk-Samik", "bcc": "Southern Balochi", "bcd": "North Babar", "bce": "Bamenyam", "bcf": "Bamu", "bcg": "Baga Pokur", "bch": "Bariai", "bci": "Baoulé", "bcj": "Bardi", "bck": "Bunuba", "bcl": "Central Bikol", "bcm": "Bannoni", "bcn": "Bali (Nigeria)", "bco": "Kaluli", "bcp": "Bali (Democratic Republic of Congo)", "bcq": "Bench", "bcr": "Babine", "bcs": "Kohumono", "bct": "Bendi", "bcu": "Awad Bing", "bcv": "Shoo-Minda-Nye", "bcw": "Bana", "bcy": "Bacama", "bcz": "Bainouk-Gunyaamolo", "bda": "Bayot", "bdb": "Basap", "bdc": "Emberá-Baudó", "bdd": "Bunama", "bde": "Bade", "bdf": "Biage", "bdg": "Bonggi", "bdh": "Baka (South Sudan)", "bdi": "Burun", "bdj": "Bai (South Sudan); Bai", "bdk": "Budukh", "bdl": "Indonesian Bajau", "bdm": "Buduma", "bdn": "Baldemu", "bdo": "Morom", "bdp": "Bende", "bdq": "Bahnar", "bdr": "West Coast Bajau", "bds": "Burunge", "bdt": "Bokoto", "bdu": "Oroko", "bdv": "Bodo Parja", "bdw": "Baham", "bdx": "Budong-Budong", "bdy": "Bandjalang", "bdz": "Badeshi", "be": "Belarusian", "bea": "Beaver", "beb": "Bebele", "bec": "Iceve-Maci", "bed": "Bedoanas", "bee": "Byangsi", "bef": "Benabena", "beg": "Belait", "beh": "Biali", "bei": "Bekati'", "bej": "Beja; Bedawiyet", "bek": "Bebeli", "bem": "Bemba (Zambia)", "beo": "Beami", "bep": "Besoa", "beq": "Beembe", "ber": "Berber languages", "bes": "Besme", "bet": "Guiberoua Béte", "beu": "Blagar", "bev": "Daloa Bété", "bew": "Betawi", "bex": "Jur Modo", "bey": "Beli (Papua New Guinea)", "bez": "Bena (Tanzania)", "bfa": "Bari", "bfb": "Pauri Bareli", "bfc": "Panyi Bai; Northern Bai", "bfd": "Bafut", "bfe": "Betaf; Tena", "bff": "Bofi", "bfg": "Busang Kayan", "bfh": "Blafe", "bfi": "British Sign Language", "bfj": "Bafanji", "bfk": "Ban Khor Sign Language", "bfl": "Banda-Ndélé", "bfm": "Mmen", "bfn": "Bunak", "bfo": "Malba Birifor", "bfp": "Beba", "bfq": "Badaga", "bfr": "Bazigar", "bfs": "Southern Bai", "bft": "Balti", "bfu": "Gahri", "bfw": "Bondo", "bfx": "Bantayanon", "bfy": "Bagheli", "bfz": "Mahasu Pahari", "bg": "Bulgarian", "bga": "Gwamhi-Wuri", "bgb": "Bobongko", "bgc": "Haryanvi", "bgd": "Rathwi Bareli", "bge": "Bauria", "bgf": "Bangandu", "bgg": "Bugun", "bgi": "Giangan", "bgj": "Bangolan", "bgk": "Bit; Buxinhua", "bgl": "Bo (Laos)", "bgn": "Western Balochi", "bgo": "Baga Koga", "bgp": "Eastern Balochi", "bgq": "Bagri", "bgr": "Bawm Chin", "bgs": "Tagabawa", "bgt": "Bughotu", "bgu": "Mbongno", "bgv": "Warkay-Bipim", "bgw": "Bhatri", "bgx": "Balkan Gagauz Turkish", "bgy": "Benggoi", "bgz": "Banggai", "bh": "Bihari languages", "bha": "Bharia", "bhb": "Bhili", "bhc": "Biga", "bhd": "Bhadrawahi", "bhe": "Bhaya", "bhf": "Odiai", "bhg": "Binandere", "bhh": "Bukharic", "bhi": "Bhilali", "bhj": "Bahing", "bhl": "Bimin", "bhm": "Bathari", "bhn": "Bohtan Neo-Aramaic", "bho": "Bhojpuri", "bhp": "Bima", "bhq": "Tukang Besi South", "bhr": "Bara Malagasy", "bhs": "Buwal", "bht": "Bhattiyali", "bhu": "Bhunjia", "bhv": "Bahau", "bhw": "Biak", "bhx": "Bhalay", "bhy": "Bhele", "bhz": "Bada (Indonesia)", "bi": "Bislama", "bia": "Badimaya", "bib": "Bissa; Bisa", "bid": "Bidiyo", "bie": "Bepour", "bif": "Biafada", "big": "Biangai", "bik": "Bikol", "bil": "Bile", "bim": "Bimoba", "bin": "Bini; Edo", "bio": "Nai", "bip": "Bila", "biq": "Bipi", "bir": "Bisorio", "bit": "Berinomo", "biu": "Biete", "biv": "Southern Birifor", "biw": "Kol (Cameroon)", "bix": "Bijori", "biy": "Birhor", "biz": "Baloi", "bja": "Budza", "bjb": "Banggarla", "bjc": "Bariji", "bje": "Biao-Jiao Mien", "bjf": "Barzani Jewish Neo-Aramaic", "bjg": "Bidyogo", "bjh": "Bahinemo", "bji": "Burji", "bjj": "Kanauji", "bjk": "Barok", "bjl": "Bulu (Papua New Guinea)", "bjm": "Bajelani", "bjn": "Banjar", "bjo": "Mid-Southern Banda", "bjp": "Fanamaket", "bjr": "Binumarien", "bjs": "Bajan", "bjt": "Balanta-Ganja", "bju": "Busuu", "bjv": "Bedjond", "bjw": "Bakwé", "bjx": "Banao Itneg", "bjy": "Bayali", "bjz": "Baruga", "bka": "Kyak", "bkc": "Baka (Cameroon)", "bkd": "Binukid; Talaandig", "bkf": "Beeke", "bkg": "Buraka", "bkh": "Bakoko", "bki": "Baki", "bkj": "Pande", "bkk": "Brokskat", "bkl": "Berik", "bkm": "Kom (Cameroon)", "bkn": "Bukitan", "bko": "Kwa'", "bkp": "Boko (Democratic Republic of Congo)", "bkq": "Bakairí", "bkr": "Bakumpai", "bks": "Northern Sorsoganon", "bkt": "Boloki", "bku": "Buhid", "bkv": "Bekwarra", "bkw": "Bekwel", "bkx": "Baikeno", "bky": "Bokyi", "bkz": "Bungku", "bla": "Siksika", "blb": "Bilua", "blc": "Bella Coola", "bld": "Bolango", "ble": "Balanta-Kentohe", "blf": "Buol", "blh": "Kuwaa", "bli": "Bolia", "blj": "Bolongan", "blk": "Pa'o Karen; Pa'O", "bll": "Biloxi", "blm": "Beli (South Sudan)", "bln": "Southern Catanduanes Bikol", "blo": "Anii", "blp": "Blablanga", "blq": "Baluan-Pam", "blr": "Blang", "bls": "Balaesang", "blt": "Tai Dam", "blv": "Kibala; Bolo", "blw": "Balangao", "blx": "Mag-Indi Ayta", "bly": "Notre", "blz": "Balantak", "bm": "Bambara", "bma": "Lame", "bmb": "Bembe", "bmc": "Biem", "bmd": "Baga Manduri", "bme": "Limassa", "bmf": "Bom-Kim", "bmg": "Bamwe", "bmh": "Kein", "bmi": "Bagirmi", "bmj": "Bote-Majhi", "bmk": "Ghayavi", "bml": "Bomboli", "bmm": "Northern Betsimisaraka Malagasy", "bmn": "Bina (Papua New Guinea)", "bmo": "Bambalang", "bmp": "Bulgebi", "bmq": "Bomu", "bmr": "Muinane", "bms": "Bilma Kanuri", "bmt": "Biao Mon", "bmu": "Somba-Siawari", "bmv": "Bum", "bmw": "Bomwali", "bmx": "Baimak", "bmz": "Baramu", "bn": "Bengali; Bangla", "bna": "Bonerate", "bnb": "Bookan", "bnc": "Bontok", "bnd": "Banda (Indonesia)", "bne": "Bintauna", "bnf": "Masiwang", "bng": "Benga", "bni": "Bangi", "bnj": "Eastern Tawbuid", "bnk": "Bierebo", "bnl": "Boon", "bnm": "Batanga", "bnn": "Bunun", "bno": "Bantoanon", "bnp": "Bola", "bnq": "Bantik", "bnr": "Butmas-Tur", "bns": "Bundeli", "bnt": "Bantu languages", "bnu": "Bentong", "bnv": "Bonerif; Beneraf; Edwas", "bnw": "Bisis", "bnx": "Bangubangu", "bny": "Bintulu", "bnz": "Beezen", "bo": "Tibetan", "boa": "Bora", "bob": "Aweer", "boe": "Mundabli", "bof": "Bolon", "bog": "Bamako Sign Language", "boh": "Boma", "boi": "Barbareño", "boj": "Anjam", "bok": "Bonjo", "bol": "Bole", "bom": "Berom", "bon": "Bine", "boo": "Tiemacèwè Bozo", "bop": "Bonkiman", "boq": "Bogaya", "bor": "Borôro", "bot": "Bongo", "bou": "Bondei", "bov": "Tuwuli", "bow": "Rema", "box": "Buamu", "boy": "Bodo (Central African Republic)", "boz": "Tiéyaxo Bozo", "bpa": "Daakaka", "bpc": "Mbuk", "bpd": "Banda-Banda", "bpe": "Bauni", "bpg": "Bonggo", "bph": "Botlikh", "bpi": "Bagupi", "bpj": "Binji", "bpk": "Orowe; 'Ôrôê", "bpl": "Broome Pearling Lugger Pidgin", "bpm": "Biyom", "bpn": "Dzao Min", "bpo": "Anasi", "bpp": "Kaure", "bpq": "Banda Malay", "bpr": "Koronadal Blaan", "bps": "Sarangani Blaan", "bpt": "Barrow Point", "bpu": "Bongu", "bpv": "Bian Marind", "bpw": "Bo (Papua New Guinea)", "bpx": "Palya Bareli", "bpy": "Bishnupriya", "bpz": "Bilba", "bqa": "Tchumbuli", "bqb": "Bagusa", "bqc": "Boko (Benin); Boo", "bqd": "Bung", "bqf": "Baga Kaloum", "bqg": "Bago-Kusuntu", "bqh": "Baima", "bqi": "Bakhtiari", "bqj": "Bandial", "bqk": "Banda-Mbrès", "bql": "Bilakura", "bqm": "Wumboko", "bqn": "Bulgarian Sign Language", "bqo": "Balo", "bqp": "Busa", "bqq": "Biritai", "bqr": "Burusu", "bqs": "Bosngun", "bqt": "Bamukumbit", "bqu": "Boguru", "bqv": "Koro Wachi; Begbere-Ejar", "bqw": "Buru (Nigeria)", "bqx": "Baangi", "bqy": "Bengkala Sign Language", "bqz": "Bakaka", "br": "Breton", "bra": "Braj", "brb": "Brao; Lave", "brc": "Berbice Creole Dutch", "brd": "Baraamu", "brf": "Bira", "brg": "Baure", "brh": "Brahui", "bri": "Mokpwe", "brj": "Bieria", "brk": "Birked", "brl": "Birwa", "brm": "Barambu", "brn": "Boruca", "bro": "Brokkat", "brp": "Barapasi", "brq": "Breri", "brr": "Birao", "brs": "Baras", "brt": "Bitare", "bru": "Eastern Bru", "brv": "Western Bru", "brw": "Bellari", "brx": "Bodo (India)", "bry": "Burui", "brz": "Bilbil", "bs": "Bosnian", "bsa": "Abinomn", "bsb": "Brunei Bisaya", "bsc": "Bassari; Oniyan", "bse": "Wushi", "bsf": "Bauchi", "bsg": "Bashkardi", "bsh": "Kati", "bsi": "Bassossi", "bsj": "Bangwinji", "bsk": "Burushaski", "bsl": "Basa-Gumna", "bsm": "Busami", "bsn": "Barasana-Eduria", "bso": "Buso", "bsp": "Baga Sitemu", "bsq": "Bassa", "bsr": "Bassa-Kontagora", "bss": "Akoose", "bst": "Basketo", "bsu": "Bahonsuai", "bsv": "Baga Sobané", "bsw": "Baiso", "bsx": "Yangkam", "bsy": "Sabah Bisaya", "bta": "Bata", "btc": "Bati (Cameroon)", "btd": "Batak Dairi", "bte": "Gamo-Ningi", "btf": "Birgit", "btg": "Gagnoa Bété", "bth": "Biatah Bidayuh", "bti": "Burate", "btj": "Bacanese Malay", "btk": "Batak languages", "btm": "Batak Mandailing", "btn": "Ratagnon", "bto": "Rinconada Bikol", "btp": "Budibud", "btq": "Batek", "btr": "Baetora", "bts": "Batak Simalungun", "btt": "Bete-Bendi", "btu": "Batu", "btv": "Bateri", "btw": "Butuanon", "btx": "Batak Karo", "bty": "Bobot", "btz": "Batak Alas-Kluet", "bua": "Buriat", "bub": "Bua", "buc": "Bushi", "bud": "Ntcham", "bue": "Beothuk", "buf": "Bushoong", "bug": "Buginese", "buh": "Younuo Bunu", "bui": "Bongili", "buj": "Basa-Gurmana", "buk": "Bugawac", "bum": "Bulu (Cameroon)", "bun": "Sherbro", "buo": "Terei", "bup": "Busoa", "buq": "Brem", "bus": "Bokobaru", "but": "Bungain", "buu": "Budu", "buv": "Bun", "buw": "Bubi", "bux": "Boghom", "buy": "Bullom So", "buz": "Bukwen", "bva": "Barein", "bvb": "Bube", "bvc": "Baelelea", "bvd": "Baeggu", "bve": "Berau Malay", "bvf": "Boor", "bvg": "Bonkeng", "bvh": "Bure", "bvi": "Belanda Viri", "bvj": "Baan", "bvk": "Bukat", "bvl": "Bolivian Sign Language", "bvm": "Bamunka", "bvn": "Buna", "bvo": "Bolgo", "bvp": "Bumang", "bvq": "Birri", "bvr": "Burarra", "bvt": "Bati (Indonesia)", "bvu": "Bukit Malay", "bvv": "Baniva", "bvw": "Boga", "bvx": "Dibole", "bvy": "Baybayanon", "bvz": "Bauzi", "bwa": "Bwatoo", "bwb": "Namosi-Naitasiri-Serua", "bwc": "Bwile", "bwd": "Bwaidoka", "bwe": "Bwe Karen", "bwf": "Boselewa", "bwg": "Barwe", "bwh": "Bishuo", "bwi": "Baniwa", "bwj": "Láá Láá Bwamu", "bwk": "Bauwaki", "bwl": "Bwela", "bwm": "Biwat", "bwn": "Wunai Bunu", "bwo": "Boro (Ethiopia); Borna (Ethiopia)", "bwp": "Mandobo Bawah", "bwq": "Southern Bobo Madaré", "bwr": "Bura-Pabir", "bws": "Bomboma", "bwt": "Bafaw-Balong", "bwu": "Buli (Ghana)", "bww": "Bwa", "bwx": "Bu-Nao Bunu", "bwy": "Cwi Bwamu", "bwz": "Bwisi", "bxa": "Tairaha", "bxb": "Belanda Bor", "bxc": "Molengue", "bxd": "Pela", "bxe": "Birale", "bxf": "Bilur; Minigir", "bxg": "Bangala", "bxh": "Buhutu", "bxi": "Pirlatapa", "bxj": "Bayungu", "bxk": "Bukusu; Lubukusu", "bxl": "Jalkunan", "bxm": "Mongolia Buriat", "bxn": "Burduna", "bxo": "Barikanchi", "bxp": "Bebil", "bxq": "Beele", "bxr": "Russia Buriat", "bxs": "Busam", "bxu": "China Buriat", "bxv": "Berakou", "bxw": "Bankagooma", "bxz": "Binahari", "bya": "Batak", "byb": "Bikya", "byc": "Ubaghara", "byd": "Benyadu'", "bye": "Pouye", "byf": "Bete", "byg": "Baygo", "byh": "Bhujel", "byi": "Buyu", "byj": "Bina (Nigeria)", "byk": "Biao", "byl": "Bayono", "bym": "Bidjara", "byn": "Bilin; Blin", "byo": "Biyo", "byp": "Bumaji", "byq": "Basay", "byr": "Baruya; Yipma", "bys": "Burak", "byt": "Berti", "byv": "Medumba", "byw": "Belhariya", "byx": "Qaqet", "byz": "Banaro", "bza": "Bandi", "bzb": "Andio", "bzc": "Southern Betsimisaraka Malagasy", "bzd": "Bribri", "bze": "Jenaama Bozo", "bzf": "Boikin", "bzg": "Babuza", "bzh": "Mapos Buang", "bzi": "Bisu", "bzj": "Belize Kriol English", "bzk": "Nicaragua Creole English", "bzl": "Boano (Sulawesi)", "bzm": "Bolondo", "bzn": "Boano (Maluku)", "bzo": "Bozaba", "bzp": "Kemberano", "bzq": "Buli (Indonesia)", "bzr": "Biri", "bzs": "Brazilian Sign Language", "bzt": "Brithenig", "bzu": "Burmeso", "bzv": "Naami", "bzw": "Basa (Nigeria)", "bzx": "Kɛlɛngaxo Bozo", "bzy": "Obanliku", "bzz": "Evant", "ca": "Catalan; Valencian", "caa": "Chortí", "cab": "Garifuna", "cac": "Chuj", "cad": "Caddo", "cae": "Lehar; Laalaa", "caf": "Southern Carrier", "cag": "Nivaclé", "cah": "Cahuarano", "cai": "Central American Indian languages", "caj": "Chané", "cak": "Kaqchikel; Cakchiquel", "cal": "Carolinian", "cam": "Cemuhî", "can": "Chambri", "cao": "Chácobo", "cap": "Chipaya", "caq": "Car Nicobarese", "car": "Galibi Carib", "cas": "Tsimané", "cau": "Caucasian languages", "cav": "Cavineña", "caw": "Callawalla", "cax": "Chiquitano", "cay": "Cayuga", "caz": "Canichana", "cba": "Chibchan languages", "cbb": "Cabiyarí", "cbc": "Carapana", "cbd": "Carijona", "cbg": "Chimila", "cbi": "Chachi", "cbj": "Ede Cabe", "cbk": "Chavacano", "cbl": "Bualkhaw Chin", "cbn": "Nyahkur", "cbo": "Izora", "cbq": "Tsucuba; Cuba", "cbr": "Cashibo-Cacataibo", "cbs": "Cashinahua", "cbt": "Chayahuita", "cbu": "Candoshi-Shapra", "cbv": "Cacua", "cbw": "Kinabalian", "cby": "Carabayo", "ccc": "Chamicuro", "ccd": "Cafundo Creole", "cce": "Chopi", "ccg": "Samba Daka", "cch": "Atsam", "ccj": "Kasanga", "ccl": "Cutchi-Swahili", "ccm": "Malaccan Creole Malay", "ccn": "North Caucasian languages", "cco": "Comaltepec Chinantec", "ccp": "Chakma", "ccr": "Cacaopera", "ccs": "South Caucasian languages", "cda": "Choni", "cdc": "Chadic languages", "cdd": "Caddoan languages", "cde": "Chenchu", "cdf": "Chiru", "cdh": "Chambeali", "cdi": "Chodri", "cdj": "Churahi", "cdm": "Chepang", "cdn": "Chaudangsi", "cdo": "Min Dong Chinese", "cdr": "Cinda-Regi-Tiyal", "cds": "Chadian Sign Language", "cdy": "Chadong", "cdz": "Koda", "ce": "Chechen", "cea": "Lower Chehalis", "ceb": "Cebuano", "ceg": "Chamacoco", "cek": "Eastern Khumi Chin", "cel": "Celtic languages", "cen": "Cen", "cet": "Centúúm", "cey": "Ekai Chin", "cfa": "Dijim-Bwilim", "cfd": "Cara", "cfg": "Como Karim", "cfm": "Falam Chin", "cga": "Changriwa", "cgc": "Kagayanen", "cgg": "Chiga", "cgk": "Chocangacakha", "ch": "Chamorro", "chb": "Chibcha", "chc": "Catawba", "chd": "Highland Oaxaca Chontal", "chf": "Tabasco Chontal", "chg": "Chagatai", "chh": "Chinook", "chj": "Ojitlán Chinantec", "chk": "Chuukese", "chl": "Cahuilla", "chm": "Mari (Russia)", "chn": "Chinook jargon", "cho": "Choctaw", "chp": "Chipewyan; Dene Suline", "chq": "Quiotepec Chinantec", "chr": "Cherokee", "cht": "Cholón", "chw": "Chuwabu", "chx": "Chantyal", "chy": "Cheyenne", "chz": "Ozumacín Chinantec", "cia": "Cia-Cia", "cib": "Ci Gbe", "cic": "Chickasaw", "cid": "Chimariko", "cie": "Cineni", "cih": "Chinali", "cik": "Chitkuli Kinnauri", "cim": "Cimbrian", "cin": "Cinta Larga", "cip": "Chiapanec", "cir": "Tiri; Haméa; Méa", "ciw": "Chippewa", "ciy": "Chaima", "cja": "Western Cham", "cje": "Chru", "cjh": "Upper Chehalis", "cji": "Chamalal", "cjk": "Chokwe", "cjm": "Eastern Cham", "cjn": "Chenapian", "cjo": "Ashéninka Pajonal", "cjp": "Cabécar", "cjs": "Shor", "cjv": "Chuave", "cjy": "Jinyu Chinese", "ckb": "Central Kurdish", "ckh": "Chak", "ckl": "Cibak", "ckm": "Chakavian", "ckn": "Kaang Chin", "cko": "Anufo", "ckq": "Kajakse", "ckr": "Kairak", "cks": "Tayo", "ckt": "Chukot", "cku": "Koasati", "ckv": "Kavalan", "ckx": "Caka", "cky": "Cakfem-Mushere", "ckz": "Cakchiquel-Quiché Mixed Language", "cla": "Ron", "clc": "Chilcotin", "cld": "Chaldean Neo-Aramaic", "cle": "Lealao Chinantec", "clh": "Chilisso", "cli": "Chakali", "clj": "Laitu Chin", "clk": "Idu-Mishmi", "cll": "Chala", "clm": "Clallam", "clo": "Lowland Oaxaca Chontal", "clt": "Lautu Chin", "clu": "Caluyanun", "clw": "Chulym", "cly": "Eastern Highland Chatino", "cma": "Maa", "cmc": "Chamic languages", "cme": "Cerma", "cmg": "Classical Mongolian", "cmi": "Emberá-Chamí", "cml": "Campalagian", "cmm": "Michigamea", "cmn": "Mandarin Chinese", "cmo": "Central Mnong", "cmr": "Mro-Khimi Chin", "cms": "Messapic", "cmt": "Camtho", "cna": "Changthang", "cnb": "Chinbon Chin", "cnc": "Côông", "cng": "Northern Qiang", "cnh": "Hakha Chin; Haka Chin", "cni": "Asháninka", "cnk": "Khumi Chin", "cnl": "Lalana Chinantec", "cno": "Con", "cnp": "Northern Ping Chinese; Northern Pinghua", "cnq": "Chung", "cnr": "Montenegrin", "cns": "Central Asmat", "cnt": "Tepetotutla Chinantec", "cnu": "Chenoua", "cnw": "Ngawn Chin", "cnx": "Middle Cornish", "co": "Corsican", "coa": "Cocos Islands Malay", "cob": "Chicomuceltec", "coc": "Cocopa", "cod": "Cocama-Cocamilla", "coe": "Koreguaje", "cof": "Colorado", "cog": "Chong", "coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma", "coj": "Cochimi", "cok": "Santa Teresa Cora", "col": "Columbia-Wenatchi", "com": "Comanche", "con": "Cofán", "coo": "Comox", "cop": "Coptic", "coq": "Coquille", "cot": "Caquinte", "cou": "Wamey", "cov": "Cao Miao", "cow": "Cowlitz", "cox": "Nanti", "coz": "Chochotec", "cpa": "Palantla Chinantec", "cpb": "Ucayali-Yurúa Ashéninka", "cpc": "Ajyíninka Apurucayali", "cpe": "English-based creoles and pidgins", "cpf": "French-based creoles and pidgins", "cpg": "Cappadocian Greek", "cpi": "Chinese Pidgin English", "cpn": "Cherepon", "cpo": "Kpeego", "cpp": "Portuguese-based creoles and pidgins", "cps": "Capiznon", "cpu": "Pichis Ashéninka", "cpx": "Pu-Xian Chinese", "cpy": "South Ucayali Ashéninka", "cqd": "Chuanqiandian Cluster Miao", "cr": "Cree", "cra": "Chara", "crb": "Island Carib", "crc": "Lonwolwol", "crd": "Coeur d'Alene", "crf": "Caramanta", "crg": "Michif", "crh": "Crimean Tatar; Crimean Turkish", "cri": "Sãotomense", "crj": "Southern East Cree", "crk": "Plains Cree", "crl": "Northern East Cree", "crm": "Moose Cree", "crn": "El Nayar Cora", "cro": "Crow", "crp": "Creoles and pidgins", "crq": "Iyo'wujwa Chorote", "crr": "Carolina Algonquian", "crs": "Seselwa Creole French", "crt": "Iyojwa'ja Chorote", "crv": "Chaura", "crw": "Chrau", "crx": "Carrier", "cry": "Cori", "crz": "Cruzeño", "cs": "Czech", "csa": "Chiltepec Chinantec", "csb": "Kashubian", "csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana", "csd": "Chiangmai Sign Language", "cse": "Czech Sign Language", "csf": "Cuba Sign Language", "csg": "Chilean Sign Language", "csh": "Asho Chin", "csi": "Coast Miwok", "csj": "Songlai Chin", "csk": "Jola-Kasa", "csl": "Chinese Sign Language", "csm": "Central Sierra Miwok", "csn": "Colombian Sign Language", "cso": "Sochiapam Chinantec; Sochiapan Chinantec", "csp": "Southern Ping Chinese; Southern Pinghua", "csq": "Croatia Sign Language", "csr": "Costa Rican Sign Language", "css": "Southern Ohlone", "cst": "Northern Ohlone", "csu": "Central Sudanic languages", "csv": "Sumtu Chin", "csw": "Swampy Cree", "csx": "Cambodian Sign Language", "csy": "Siyin Chin", "csz": "Coos", "cta": "Tataltepec Chatino", "ctc": "Chetco", "ctd": "Tedim Chin", "cte": "Tepinapa Chinantec", "ctg": "Chittagonian", "cth": "Thaiphum Chin", "ctl": "Tlacoatzintepec Chinantec", "ctm": "Chitimacha", "ctn": "Chhintange", "cto": "Emberá-Catío", "ctp": "Western Highland Chatino", "cts": "Northern Catanduanes Bikol", "ctt": "Wayanad Chetti", "ctu": "Chol", "cty": "Moundadan Chetty", "ctz": "Zacatepec Chatino", "cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic", "cua": "Cua", "cub": "Cubeo", "cuc": "Usila Chinantec", "cuh": "Chuka; Gichuka", "cui": "Cuiba", "cuj": "Mashco Piro", "cuk": "San Blas Kuna", "cul": "Culina; Kulina", "cuo": "Cumanagoto", "cup": "Cupeño", "cuq": "Cun", "cur": "Chhulung", "cus": "Cushitic languages", "cut": "Teutila Cuicatec", "cuu": "Tai Ya", "cuv": "Cuvok", "cuw": "Chukwa", "cux": "Tepeuxila Cuicatec", "cuy": "Cuitlatec", "cv": "Chuvash", "cvg": "Chug", "cvn": "Valle Nacional Chinantec", "cwa": "Kabwa", "cwb": "Maindo", "cwd": "Woods Cree", "cwe": "Kwere", "cwg": "Chewong; Cheq Wong", "cwt": "Kuwaataay", "cy": "Welsh", "cya": "Nopala Chatino", "cyb": "Cayubaba", "cyo": "Cuyonon", "czh": "Huizhou Chinese", "czk": "Knaanic", "czn": "Zenzontepec Chatino", "czo": "Min Zhong Chinese", "czt": "Zotung Chin", "da": "Danish", "daa": "Dangaléat", "dac": "Dambi", "dad": "Marik", "dae": "Duupa", "dag": "Dagbani", "dah": "Gwahatike", "dai": "Day", "daj": "Dar Fur Daju", "dak": "Dakota", "dal": "Dahalo", "dam": "Damakawa", "dao": "Daai Chin", "daq": "Dandami Maria", "dar": "Dargwa", "das": "Daho-Doo", "dau": "Dar Sila Daju", "dav": "Taita; Dawida", "daw": "Davawenyo", "dax": "Dayi", "day": "Land Dayak languages", "daz": "Dao", "dba": "Bangime", "dbb": "Deno", "dbd": "Dadiya", "dbe": "Dabe", "dbf": "Edopi", "dbg": "Dogul Dom Dogon", "dbi": "Doka", "dbj": "Ida'an", "dbl": "Dyirbal", "dbm": "Duguri", "dbn": "Duriankere", "dbo": "Dulbu", "dbp": "Duwai", "dbq": "Daba", "dbr": "Dabarre", "dbt": "Ben Tey Dogon", "dbu": "Bondum Dom Dogon", "dbv": "Dungu", "dbw": "Bankan Tey Dogon", "dby": "Dibiyaso", "dcc": "Deccan", "dcr": "Negerhollands", "dda": "Dadi Dadi", "ddd": "Dongotono", "dde": "Doondo", "ddg": "Fataluku", "ddi": "West Goodenough", "ddj": "Jaru", "ddn": "Dendi (Benin)", "ddo": "Dido", "ddr": "Dhudhuroa", "dds": "Donno So Dogon", "ddw": "Dawera-Daweloor", "de": "German", "dec": "Dagik", "ded": "Dedua", "dee": "Dewoin", "def": "Dezfuli", "deg": "Degema", "deh": "Dehwari", "dei": "Demisa", "dek": "Dek", "del": "Delaware", "dem": "Dem", "den": "Slave (Athapascan)", "dep": "Pidgin Delaware", "deq": "Dendi (Central African Republic)", "der": "Deori", "des": "Desano", "dev": "Domung", "dez": "Dengese", "dga": "Southern Dagaare", "dgb": "Bunoge Dogon", "dgc": "Casiguran Dumagat Agta", "dgd": "Dagaari Dioula", "dge": "Degenan", "dgg": "Doga", "dgh": "Dghwede", "dgi": "Northern Dagara", "dgk": "Dagba", "dgl": "Andaandi; Dongolawi", "dgn": "Dagoman", "dgo": "Dogri (individual language)", "dgr": "Dogrib; Tłı̨chǫ", "dgs": "Dogoso", "dgt": "Ndra'ngith", "dgw": "Daungwurrung", "dgx": "Doghoro", "dgz": "Daga", "dhd": "Dhundari", "dhg": "Dhangu-Djangu; Dhangu; Djangu", "dhi": "Dhimal", "dhl": "Dhalandji", "dhm": "Zemba", "dhn": "Dhanki", "dho": "Dhodia", "dhr": "Dhargari", "dhs": "Dhaiso", "dhu": "Dhurga", "dhv": "Dehu; Drehu", "dhw": "Dhanwar (Nepal)", "dhx": "Dhungaloo", "dia": "Dia", "dib": "South Central Dinka", "dic": "Lakota Dida", "did": "Didinga", "dif": "Dieri; Diyari", "dig": "Digo; Chidigo", "dih": "Kumiai", "dii": "Dimbong", "dij": "Dai", "dik": "Southwestern Dinka", "dil": "Dilling", "dim": "Dime", "din": "Dinka", "dio": "Dibo", "dip": "Northeastern Dinka", "diq": "Dimli (individual language)", "dir": "Dirim", "dis": "Dimasa", "diu": "Diriku", "diw": "Northwestern Dinka", "dix": "Dixon Reef", "diy": "Diuwe", "diz": "Ding", "dja": "Djadjawurrung", "djb": "Djinba", "djc": "Dar Daju Daju", "djd": "Djamindjung; Ngaliwurru", "dje": "Zarma", "djf": "Djangun", "dji": "Djinang", "djj": "Djeebbana", "djk": "Eastern Maroon Creole; Businenge Tongo; Nenge", "djm": "Jamsay Dogon", "djn": "Jawoyn; Djauan", "djo": "Jangkang", "djr": "Djambarrpuyngu", "dju": "Kapriman", "djw": "Djawi", "dka": "Dakpakha", "dkg": "Kadung", "dkk": "Dakka", "dkr": "Kuijau", "dks": "Southeastern Dinka", "dkx": "Mazagway", "dlg": "Dolgan", "dlk": "Dahalik", "dlm": "Dalmatian", "dln": "Darlong", "dma": "Duma", "dmb": "Mombo Dogon", "dmc": "Gavak", "dmd": "Madhi Madhi", "dme": "Dugwor", "dmf": "Medefaidrin", "dmg": "Upper Kinabatangan", "dmk": "Domaaki", "dml": "Dameli", "dmm": "Dama", "dmn": "Mande languages", "dmo": "Kemedzung", "dmr": "East Damar", "dms": "Dampelas", "dmu": "Dubu; Tebi", "dmv": "Dumpas", "dmw": "Mudburra", "dmx": "Dema", "dmy": "Demta; Sowari", "dna": "Upper Grand Valley Dani", "dnd": "Daonda", "dne": "Ndendeule", "dng": "Dungan", "dni": "Lower Grand Valley Dani", "dnj": "Dan", "dnk": "Dengka", "dnn": "Dzùùngoo", "dno": "Ndrulo; Northern Lendu", "dnr": "Danaru", "dnt": "Mid Grand Valley Dani", "dnu": "Danau", "dnv": "Danu", "dnw": "Western Dani", "dny": "Dení", "doa": "Dom", "dob": "Dobu", "doc": "Northern Dong", "doe": "Doe", "dof": "Domu", "doh": "Dong", "doi": "Dogri (macrolanguage)", "dok": "Dondo", "dol": "Doso", "don": "Toura (Papua New Guinea)", "doo": "Dongo", "dop": "Lukpa", "doq": "Dominican Sign Language", "dor": "Dori'o", "dos": "Dogosé", "dot": "Dass", "dov": "Dombe", "dow": "Doyayo", "dox": "Bussa", "doy": "Dompo", "doz": "Dorze", "dpp": "Papar", "dra": "Dravidian languages", "drb": "Dair", "drc": "Minderico", "drd": "Darmiya", "dre": "Dolpo", "drg": "Rungus", "dri": "C'Lela", "drl": "Paakantyi", "drn": "West Damar", "dro": "Daro-Matu Melanau", "drq": "Dura", "drs": "Gedeo", "drt": "Drents", "dru": "Rukai", "dry": "Darai", "dsb": "Lower Sorbian", "dse": "Dutch Sign Language", "dsh": "Daasanach", "dsi": "Disa", "dsl": "Danish Sign Language", "dsn": "Dusner", "dso": "Desiya", "dsq": "Tadaksahak", "dsz": "Mardin Sign Language", "dta": "Daur", "dtb": "Labuk-Kinabatangan Kadazan", "dtd": "Ditidaht", "dth": "Adithinngithigh", "dti": "Ana Tinga Dogon", "dtk": "Tene Kan Dogon", "dtm": "Tomo Kan Dogon", "dtn": "Daatsʼíin", "dto": "Tommo So Dogon", "dtp": "Kadazan Dusun; Central Dusun", "dtr": "Lotud", "dts": "Toro So Dogon", "dtt": "Toro Tegu Dogon", "dtu": "Tebul Ure Dogon", "dty": "Dotyali", "dua": "Duala", "dub": "Dubli", "duc": "Duna", "due": "Umiray Dumaget Agta", "duf": "Dumbea; Drubea", "dug": "Duruma; Chiduruma", "duh": "Dungra Bhil", "dui": "Dumun", "duk": "Uyajitaya", "dul": "Alabat Island Agta", "dum": "Middle Dutch (ca. 1050-1350)", "dun": "Dusun Deyah", "duo": "Dupaninan Agta", "dup": "Duano", "duq": "Dusun Malang", "dur": "Dii", "dus": "Dumi", "duu": "Drung", "duv": "Duvle", "duw": "Dusun Witu", "dux": "Duungooma", "duy": "Dicamay Agta", "duz": "Duli-Gey", "dv": "Dhivehi; Divehi; Maldivian", "dva": "Duau", "dwa": "Diri", "dwk": "Dawik Kui", "dwr": "Dawro", "dws": "Dutton World Speedwords", "dwu": "Dhuwal", "dww": "Dawawa", "dwy": "Dhuwaya", "dwz": "Dewas Rai", "dya": "Dyan", "dyb": "Dyaberdyaber", "dyd": "Dyugun", "dyg": "Villa Viciosa Agta", "dyi": "Djimini Senoufo", "dym": "Yanda Dom Dogon", "dyn": "Dyangadi; Dhanggatti", "dyo": "Jola-Fonyi", "dyu": "Dyula", "dyy": "Djabugay; Dyaabugay", "dz": "Dzongkha", "dza": "Tunzu", "dze": "Djiwarli", "dzg": "Dazaga", "dzl": "Dzalakha", "dzn": "Dzando", "eaa": "Karenggapa", "ebc": "Beginci", "ebg": "Ebughu", "ebk": "Eastern Bontok", "ebo": "Teke-Ebo", "ebr": "Ebrié", "ebu": "Embu; Kiembu", "ecr": "Eteocretan", "ecs": "Ecuadorian Sign Language", "ecy": "Eteocypriot", "ee": "Ewe", "eee": "E", "efa": "Efai", "efe": "Efe", "efi": "Efik", "ega": "Ega", "egl": "Emilian", "egm": "Benamanga", "ego": "Eggon", "egx": "Egyptian languages", "egy": "Egyptian (Ancient)", "ehs": "Miyakubo Sign Language", "ehu": "Ehueun", "eip": "Eipomek", "eit": "Eitiep", "eiv": "Askopan", "eja": "Ejamat", "eka": "Ekajuk", "eke": "Ekit", "ekg": "Ekari", "eki": "Eki", "ekk": "Standard Estonian", "ekl": "Kol (Bangladesh); Kol", "ekm": "Elip", "eko": "Koti", "ekp": "Ekpeye", "ekr": "Yace", "eky": "Eastern Kayah", "el": "Modern Greek (1453-)", "ele": "Elepi", "elh": "El Hugeirat", "eli": "Nding", "elk": "Elkei", "elm": "Eleme", "elo": "El Molo", "elu": "Elu", "elx": "Elamite", "ema": "Emai-Iuleha-Ora", "emb": "Embaloh", "eme": "Emerillon", "emg": "Eastern Meohang", "emi": "Mussau-Emira", "emk": "Eastern Maninkakan", "emm": "Mamulique", "emn": "Eman", "emp": "Northern Emberá", "emq": "Eastern Minyag", "ems": "Pacific Gulf Yupik", "emu": "Eastern Muria", "emw": "Emplawas", "emx": "Erromintxela", "emy": "Epigraphic Mayan", "emz": "Mbessa", "en": "English", "ena": "Apali", "enb": "Markweeta", "enc": "En", "end": "Ende", "enf": "Forest Enets", "enh": "Tundra Enets", "enl": "Enlhet", "enm": "Middle English (1100-1500)", "enn": "Engenni", "eno": "Enggano", "enq": "Enga", "enr": "Emumu; Emem", "enu": "Enu", "env": "Enwan (Edo State)", "enw": "Enwan (Akwa Ibom State)", "enx": "Enxet", "eo": "Esperanto", "eot": "Beti (Côte d'Ivoire)", "epi": "Epie", "era": "Eravallan", "erg": "Sie", "erh": "Eruwa", "eri": "Ogea", "erk": "South Efate", "ero": "Horpa", "err": "Erre", "ers": "Ersu", "ert": "Eritai", "erw": "Erokwanas", "es": "Spanish; Castilian", "ese": "Ese Ejja", "esg": "Aheri Gondi", "esh": "Eshtehardi", "esi": "North Alaskan Inupiatun", "esk": "Northwest Alaska Inupiatun", "esl": "Egypt Sign Language", "esm": "Esuma", "esn": "Salvadoran Sign Language", "eso": "Estonian Sign Language", "esq": "Esselen", "ess": "Central Siberian Yupik", "esu": "Central Yupik", "esx": "Eskimo-Aleut languages", "esy": "Eskayan", "et": "Estonian", "etb": "Etebi", "etc": "Etchemin", "eth": "Ethiopian Sign Language", "etn": "Eton (Vanuatu)", "eto": "Eton (Cameroon)", "etr": "Edolo", "ets": "Yekhee", "ett": "Etruscan", "etu": "Ejagham", "etx": "Eten", "etz": "Semimi", "eu": "Basque", "euq": "Basque (family)", "eve": "Even", "evh": "Uvbie", "evn": "Evenki", "ewo": "Ewondo", "ext": "Extremaduran", "eya": "Eyak", "eyo": "Keiyo", "eza": "Ezaa", "eze": "Uzekwe", "fa": "Persian", "faa": "Fasu", "fab": "Fa d'Ambu", "fad": "Wagi", "faf": "Fagani", "fag": "Finongan", "fah": "Baissa Fali", "fai": "Faiwol", "faj": "Faita", "fak": "Fang (Cameroon)", "fal": "South Fali", "fam": "Fam", "fan": "Fang (Equatorial Guinea)", "fap": "Paloor", "far": "Fataleka", "fat": "Fanti", "fau": "Fayu", "fax": "Fala", "fay": "Southwestern Fars", "faz": "Northwestern Fars", "fbl": "West Albay Bikol", "fcs": "Quebec Sign Language", "fer": "Feroge", "ff": "Fulah", "ffi": "Foia Foia", "ffm": "Maasina Fulfulde", "fgr": "Fongoro", "fi": "Finnish", "fia": "Nobiin", "fie": "Fyer", "fif": "Faifi", "fil": "Filipino; Pilipino", "fip": "Fipa", "fir": "Firan", "fit": "Tornedalen Finnish; Meänkieli", "fiu": "Finno-Ugrian languages", "fiw": "Fiwaga", "fj": "Fijian", "fkk": "Kirya-Konzəl", "fkv": "Kven Finnish", "fla": "Kalispel-Pend d'Oreille", "flh": "Foau", "fli": "Fali", "fll": "North Fali", "fln": "Flinders Island", "flr": "Fuliiru", "fly": "Flaaitaal; Tsotsitaal", "fmp": "Fe'fe'", "fmu": "Far Western Muria", "fnb": "Fanbak", "fng": "Fanagalo", "fni": "Fania", "fo": "Faroese", "fod": "Foodo", "foi": "Foi", "fom": "Foma", "fon": "Fon", "for": "Fore", "fos": "Siraya", "fox": "Formosan languages", "fpe": "Fernando Po Creole English", "fqs": "Fas", "fr": "French", "frc": "Cajun French", "frd": "Fordata", "frk": "Frankish", "frm": "Middle French (ca. 1400-1600)", "fro": "Old French (842-ca. 1400)", "frp": "Arpitan; Francoprovençal", "frq": "Forak", "frr": "Northern Frisian", "frs": "Eastern Frisian", "frt": "Fortsenal", "fse": "Finnish Sign Language", "fsl": "French Sign Language", "fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli", "fub": "Adamawa Fulfulde", "fuc": "Pulaar", "fud": "East Futuna", "fue": "Borgu Fulfulde", "fuf": "Pular", "fuh": "Western Niger Fulfulde", "fui": "Bagirmi Fulfulde", "fuj": "Ko", "fum": "Fum", "fun": "Fulniô", "fuq": "Central-Eastern Niger Fulfulde", "fur": "Friulian", "fut": "Futuna-Aniwa", "fuu": "Furu", "fuv": "Nigerian Fulfulde", "fuy": "Fuyug", "fvr": "Fur", "fwa": "Fwâi", "fwe": "Fwe", "fy": "Western Frisian", "ga": "Irish", "gaa": "Ga", "gab": "Gabri", "gac": "Mixed Great Andamanese", "gad": "Gaddang", "gae": "Guarequena", "gaf": "Gende", "gag": "Gagauz", "gah": "Alekano", "gai": "Borei", "gaj": "Gadsup", "gak": "Gamkonora", "gal": "Galolen", "gam": "Kandawo", "gan": "Gan Chinese", "gao": "Gants", "gap": "Gal", "gaq": "Gata'", "gar": "Galeya", "gas": "Adiwasi Garasia", "gat": "Kenati", "gau": "Mudhili Gadaba", "gaw": "Nobonob", "gax": "Borana-Arsi-Guji Oromo", "gay": "Gayo", "gaz": "West Central Oromo", "gba": "Gbaya (Central African Republic)", "gbb": "Kaytetye", "gbd": "Karajarri", "gbe": "Niksek", "gbf": "Gaikundi", "gbg": "Gbanziri", "gbh": "Defi Gbe", "gbi": "Galela", "gbj": "Bodo Gadaba", "gbk": "Gaddi", "gbl": "Gamit", "gbm": "Garhwali", "gbn": "Mo'da", "gbo": "Northern Grebo", "gbp": "Gbaya-Bossangoa", "gbq": "Gbaya-Bozoum", "gbr": "Gbagyi", "gbs": "Gbesi Gbe", "gbu": "Gagadu", "gbv": "Gbanu", "gbw": "Gabi-Gabi", "gbx": "Eastern Xwla Gbe", "gby": "Gbari", "gbz": "Zoroastrian Dari", "gcc": "Mali", "gcd": "Ganggalida", "gce": "Galice", "gcf": "Guadeloupean Creole French", "gcl": "Grenadian Creole English", "gcn": "Gaina", "gcr": "Guianese Creole French", "gct": "Colonia Tovar German", "gd": "Scottish Gaelic; Gaelic", "gda": "Gade Lohar", "gdb": "Pottangi Ollar Gadaba", "gdc": "Gugu Badhun", "gdd": "Gedaged", "gde": "Gude", "gdf": "Guduf-Gava", "gdg": "Ga'dang", "gdh": "Gadjerawang; Gajirrabeng", "gdi": "Gundi", "gdj": "Gurdjar", "gdk": "Gadang", "gdl": "Dirasha", "gdm": "Laal", "gdn": "Umanakaina", "gdo": "Ghodoberi", "gdq": "Mehri", "gdr": "Wipi", "gds": "Ghandruk Sign Language", "gdt": "Kungardutyi", "gdu": "Gudu", "gdx": "Godwari", "gea": "Geruma", "geb": "Kire", "gec": "Gboloo Grebo", "ged": "Gade", "gef": "Gerai", "geg": "Gengle", "geh": "Hutterite German; Hutterisch", "gei": "Gebe", "gej": "Gen", "gek": "Ywom", "gel": "ut-Ma'in", "gem": "Germanic languages", "geq": "Geme", "ges": "Geser-Gorom", "gev": "Eviya", "gew": "Gera", "gex": "Garre", "gey": "Enya", "gez": "Geez", "gfk": "Patpatar", "gft": "Gafat", "gga": "Gao", "ggb": "Gbii", "ggd": "Gugadj", "gge": "Gurr-goni", "ggg": "Gurgula", "ggk": "Kungarakany", "ggl": "Ganglau", "ggt": "Gitua", "ggu": "Gagu; Gban", "ggw": "Gogodala", "gha": "Ghadamès", "ghc": "Hiberno-Scottish Gaelic", "ghe": "Southern Ghale", "ghh": "Northern Ghale", "ghk": "Geko Karen", "ghl": "Ghulfan", "ghn": "Ghanongga", "gho": "Ghomara", "ghr": "Ghera", "ghs": "Guhu-Samane", "ght": "Kuke; Kutang Ghale", "gia": "Kija", "gib": "Gibanawa", "gic": "Gail", "gid": "Gidar", "gie": "Gaɓogbo; Guébie", "gig": "Goaria", "gih": "Githabul", "gii": "Girirra", "gil": "Gilbertese", "gim": "Gimi (Eastern Highlands)", "gin": "Hinukh", "gip": "Gimi (West New Britain)", "giq": "Green Gelao", "gir": "Red Gelao", "gis": "North Giziga", "git": "Gitxsan", "giu": "Mulao", "giw": "White Gelao", "gix": "Gilima", "giy": "Giyug", "giz": "South Giziga", "gjk": "Kachi Koli", "gjm": "Gunditjmara", "gjn": "Gonja", "gjr": "Gurindji Kriol", "gju": "Gujari", "gka": "Guya", "gkd": "Magɨ (Madang Province)", "gke": "Ndai", "gkn": "Gokana", "gko": "Kok-Nar", "gkp": "Guinea Kpelle", "gku": "ǂUngkue", "gl": "Galician", "glb": "Belning", "glc": "Bon Gula", "gld": "Nanai", "glh": "Northwest Pashai; Northwest Pashayi", "glj": "Gula Iro", "glk": "Gilaki", "gll": "Garlali", "glo": "Galambu", "glr": "Glaro-Twabo", "glu": "Gula (Chad)", "glw": "Glavda", "gly": "Gule", "gma": "Gambera", "gmb": "Gula'alaa", "gmd": "Mághdì", "gme": "East Germanic languages", "gmg": "Magɨyi", "gmh": "Middle High German (ca. 1050-1500)", "gml": "Middle Low German", "gmm": "Gbaya-Mbodomo", "gmn": "Gimnime", "gmq": "North Germanic languages", "gmr": "Mirning; Mirniny", "gmu": "Gumalu", "gmv": "Gamo", "gmw": "West Germanic languages", "gmx": "Magoma", "gmy": "Mycenaean Greek", "gmz": "Mgbolizhia", "gn": "Guarani", "gna": "Kaansa", "gnb": "Gangte", "gnc": "Guanche", "gnd": "Zulgo-Gemzek", "gne": "Ganang", "gng": "Ngangam", "gnh": "Lere", "gni": "Gooniyandi", "gnj": "Ngen", "gnk": "ǁGana", "gnl": "Gangulu", "gnm": "Ginuman", "gnn": "Gumatj", "gno": "Northern Gondi", "gnq": "Gana", "gnr": "Gureng Gureng", "gnt": "Guntai", "gnu": "Gnau", "gnw": "Western Bolivian Guaraní", "gnz": "Ganzi", "goa": "Guro", "gob": "Playero", "goc": "Gorakor", "god": "Godié", "goe": "Gongduk", "gof": "Gofa", "gog": "Gogo", "goh": "Old High German (ca. 750-1050)", "goi": "Gobasi", "goj": "Gowlan", "gok": "Gowli", "gol": "Gola", "gom": "Goan Konkani", "gon": "Gondi", "goo": "Gone Dau", "gop": "Yeretuar", "goq": "Gorap", "gor": "Gorontalo", "gos": "Gronings", "got": "Gothic", "gou": "Gavar", "gov": "Goo", "gow": "Gorowa", "gox": "Gobu", "goy": "Goundo", "goz": "Gozarkhani", "gpa": "Gupa-Abawa", "gpe": "Ghanaian Pidgin English", "gpn": "Taiap", "gqa": "Ga'anda", "gqi": "Guiqiong", "gqn": "Guana (Brazil)", "gqr": "Gor", "gqu": "Qau", "gra": "Rajput Garasia", "grb": "Grebo", "grc": "Ancient Greek (to 1453)", "grd": "Guruntum-Mbaaru", "grg": "Madi", "grh": "Gbiri-Niragu", "gri": "Ghari", "grj": "Southern Grebo", "grk": "Greek languages", "grm": "Kota Marudu Talantang", "gro": "Groma", "grq": "Gorovu", "grr": "Taznatit", "grs": "Gresi", "grt": "Garo", "gru": "Kistane", "grv": "Central Grebo", "grw": "Gweda", "grx": "Guriaso", "gry": "Barclayville Grebo", "grz": "Guramalum", "gse": "Ghanaian Sign Language", "gsg": "German Sign Language", "gsl": "Gusilay", "gsm": "Guatemalan Sign Language", "gsn": "Nema; Gusan", "gso": "Southwest Gbaya", "gsp": "Wasembo", "gss": "Greek Sign Language", "gsw": "Swiss German; Alemannic; Alsatian", "gta": "Guató", "gtu": "Aghu-Tharnggala", "gu": "Gujarati", "gua": "Shiki", "gub": "Guajajára", "guc": "Wayuu", "gud": "Yocoboué Dida", "gue": "Gurindji", "guf": "Gupapuyngu", "gug": "Paraguayan Guaraní", "guh": "Guahibo", "gui": "Eastern Bolivian Guaraní", "guk": "Gumuz", "gul": "Sea Island Creole English", "gum": "Guambiano", "gun": "Mbyá Guaraní", "guo": "Guayabero", "gup": "Gunwinggu", "guq": "Aché", "gur": "Farefare", "gus": "Guinean Sign Language", "gut": "Maléku Jaíka", "guu": "Yanomamö", "guw": "Gun", "gux": "Gourmanchéma", "guz": "Gusii; Ekegusii", "gv": "Manx", "gva": "Guana (Paraguay)", "gvc": "Guanano", "gve": "Duwet", "gvf": "Golin", "gvj": "Guajá", "gvl": "Gulay", "gvm": "Gurmana", "gvn": "Kuku-Yalanji", "gvo": "Gavião Do Jiparaná", "gvp": "Pará Gavião", "gvr": "Gurung", "gvs": "Gumawana", "gvy": "Guyani", "gwa": "Mbato", "gwb": "Gwa", "gwc": "Gawri; Kalami", "gwd": "Gawwada", "gwe": "Gweno", "gwf": "Gowro", "gwg": "Moo", "gwi": "Gwichʼin", "gwj": "ǀGwi", "gwm": "Awngthim", "gwn": "Gwandara", "gwr": "Gwere", "gwt": "Gawar-Bati", "gwu": "Guwamu", "gww": "Kwini", "gwx": "Gua", "gxx": "Wè Southern", "gya": "Northwest Gbaya", "gyb": "Garus", "gyd": "Kayardild", "gye": "Gyem", "gyf": "Gungabula", "gyg": "Gbayi", "gyi": "Gyele", "gyl": "Gayil", "gym": "Ngäbere", "gyn": "Guyanese Creole English", "gyo": "Gyalsumdo", "gyr": "Guarayu", "gyy": "Gunya", "gyz": "Geji; Gyaazi", "gza": "Ganza", "gzi": "Gazi", "gzn": "Gane", "ha": "Hausa", "haa": "Han", "hab": "Hanoi Sign Language", "hac": "Gurani", "had": "Hatam", "hae": "Eastern Oromo", "haf": "Haiphong Sign Language", "hag": "Hanga", "hah": "Hahon", "hai": "Haida", "haj": "Hajong", "hak": "Hakka Chinese", "hal": "Halang", "ham": "Hewa", "han": "Hangaza", "hao": "Hakö", "hap": "Hupla", "haq": "Ha", "har": "Harari", "has": "Haisla", "hav": "Havu", "haw": "Hawaiian", "hax": "Southern Haida", "hay": "Haya", "haz": "Hazaragi", "hba": "Hamba", "hbb": "Huba", "hbn": "Heiban", "hbo": "Ancient Hebrew", "hbu": "Habu", "hca": "Andaman Creole Hindi", "hch": "Huichol", "hdn": "Northern Haida", "hds": "Honduras Sign Language", "hdy": "Hadiyya", "he": "Hebrew", "hea": "Northern Qiandong Miao", "hed": "Herdé", "heg": "Helong", "heh": "Hehe", "hei": "Heiltsuk", "hem": "Hemba", "hgm": "Haiǁom", "hgw": "Haigwai", "hhi": "Hoia Hoia", "hhr": "Kerak", "hhy": "Hoyahoya", "hi": "Hindi", "hia": "Lamang", "hib": "Hibito", "hid": "Hidatsa", "hif": "Fiji Hindi", "hig": "Kamwe", "hih": "Pamosu", "hii": "Hinduri", "hij": "Hijuk", "hik": "Seit-Kaitetu", "hil": "Hiligaynon", "him": "Himachali languages; Western Pahari languages", "hio": "Tsoa", "hir": "Himarimã", "hit": "Hittite", "hiw": "Hiw", "hix": "Hixkaryána", "hji": "Haji", "hka": "Kahe", "hke": "Hunde", "hkh": "Khah; Poguli", "hkk": "Hunjara-Kaina Ke", "hkn": "Mel-Khaonh", "hks": "Hong Kong Sign Language; Heung Kong Sau Yue", "hla": "Halia", "hlb": "Halbi", "hld": "Halang Doan", "hle": "Hlersu", "hlt": "Matu Chin", "hlu": "Hieroglyphic Luwian", "hma": "Southern Mashan Hmong; Southern Mashan Miao", "hmb": "Humburi Senni Songhay", "hmc": "Central Huishui Hmong; Central Huishui Miao", "hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao", "hme": "Eastern Huishui Hmong; Eastern Huishui Miao", "hmf": "Hmong Don", "hmg": "Southwestern Guiyang Hmong", "hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao", "hmi": "Northern Huishui Hmong; Northern Huishui Miao", "hmj": "Ge; Gejia", "hmk": "Maek", "hml": "Luopohe Hmong; Luopohe Miao", "hmm": "Central Mashan Hmong; Central Mashan Miao", "hmn": "Hmong; Mong", "hmp": "Northern Mashan Hmong; Northern Mashan Miao", "hmq": "Eastern Qiandong Miao", "hmr": "Hmar", "hms": "Southern Qiandong Miao", "hmt": "Hamtai", "hmu": "Hamap", "hmv": "Hmong Dô", "hmw": "Western Mashan Hmong; Western Mashan Miao", "hmx": "Hmong-Mien languages", "hmy": "Southern Guiyang Hmong; Southern Guiyang Miao", "hmz": "Hmong Shua; Sinicized Miao", "hna": "Mina (Cameroon)", "hnd": "Southern Hindko", "hne": "Chhattisgarhi", "hng": "Hungu", "hnh": "ǁAni", "hni": "Hani", "hnj": "Hmong Njua; Mong Leng; Mong Njua", "hnn": "Hanunoo", "hno": "Northern Hindko", "hns": "Caribbean Hindustani", "hnu": "Hung", "ho": "Hiri Motu", "hoa": "Hoava", "hob": "Mari (Madang Province)", "hoc": "Ho", "hod": "Holma", "hoe": "Horom", "hoh": "Hobyót", "hoi": "Holikachuk", "hoj": "Hadothi; Haroti", "hok": "Hokan languages", "hol": "Holu", "hom": "Homa", "hoo": "Holoholo", "hop": "Hopi", "hor": "Horo", "hos": "Ho Chi Minh City Sign Language", "hot": "Hote; Malê", "hov": "Hovongan", "how": "Honi", "hoy": "Holiya", "hoz": "Hozo", "hpo": "Hpon", "hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language", "hr": "Croatian", "hra": "Hrangkhol", "hrc": "Niwer Mil", "hre": "Hre", "hrk": "Haruku", "hrm": "Horned Miao", "hro": "Haroi", "hrp": "Nhirrpi", "hrt": "Hértevin", "hru": "Hruso", "hrw": "Warwar Feni", "hrx": "Hunsrik", "hrz": "Harzani", "hsb": "Upper Sorbian", "hsh": "Hungarian Sign Language", "hsl": "Hausa Sign Language", "hsn": "Xiang Chinese", "hss": "Harsusi", "ht": "Haitian; Haitian Creole", "hti": "Hoti", "hto": "Minica Huitoto", "hts": "Hadza", "htu": "Hitu", "htx": "Middle Hittite", "hu": "Hungarian", "hub": "Huambisa", "huc": "ǂHua; ǂʼAmkhoe", "hud": "Huaulu", "hue": "San Francisco Del Mar Huave", "huf": "Humene", "hug": "Huachipaeri", "huh": "Huilliche", "hui": "Huli", "huj": "Northern Guiyang Hmong; Northern Guiyang Miao", "huk": "Hulung", "hul": "Hula", "hum": "Hungana", "huo": "Hu", "hup": "Hupa", "huq": "Tsat", "hur": "Halkomelem", "hus": "Huastec", "hut": "Humla", "huu": "Murui Huitoto", "huv": "San Mateo Del Mar Huave", "huw": "Hukumina", "hux": "Nüpode Huitoto", "huy": "Hulaulá", "huz": "Hunzib", "hvc": "Haitian Vodoun Culture Language", "hve": "San Dionisio Del Mar Huave", "hvk": "Haveke", "hvn": "Sabu", "hvv": "Santa María Del Mar Huave", "hwa": "Wané", "hwc": "Hawai'i Creole English; Hawai'i Pidgin", "hwo": "Hwana", "hy": "Armenian", "hya": "Hya", "hyw": "Western Armenian", "hyx": "Armenian (family)", "hz": "Herero", "ia": "Interlingua (International Auxiliary Language Association)", "iai": "Iaai", "ian": "Iatmul", "iar": "Purari", "iba": "Iban", "ibb": "Ibibio", "ibd": "Iwaidja", "ibe": "Akpes", "ibg": "Ibanag", "ibh": "Bih", "ibl": "Ibaloi", "ibm": "Agoi", "ibn": "Ibino", "ibr": "Ibuoro", "ibu": "Ibu", "iby": "Ibani", "ica": "Ede Ica", "ich": "Etkywan", "icl": "Icelandic Sign Language", "icr": "Islander Creole English", "id": "Indonesian", "ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi", "idb": "Indo-Portuguese", "idc": "Idon; Ajiya", "idd": "Ede Idaca", "ide": "Idere", "idi": "Idi", "idr": "Indri", "ids": "Idesa", "idt": "Idaté", "idu": "Idoma", "ie": "Interlingue; Occidental", "ifa": "Amganad Ifugao", "ifb": "Batad Ifugao; Ayangan Ifugao", "ife": "Ifè", "iff": "Ifo", "ifk": "Tuwali Ifugao", "ifm": "Teke-Fuumu", "ifu": "Mayoyao Ifugao", "ify": "Keley-I Kallahan", "ig": "Igbo", "igb": "Ebira", "ige": "Igede", "igg": "Igana", "igl": "Igala", "igm": "Kanggape", "ign": "Ignaciano", "igo": "Isebe", "igs": "Interglossa", "igw": "Igwe", "ihb": "Iha Based Pidgin", "ihi": "Ihievbe", "ihp": "Iha", "ihw": "Bidhawal", "ii": "Sichuan Yi; Nuosu", "iin": "Thiin", "iir": "Indo-Iranian languages", "ijc": "Izon", "ije": "Biseni", "ijj": "Ede Ije", "ijn": "Kalabari", "ijo": "Ijo languages", "ijs": "Southeast Ijo", "ik": "Inupiaq", "ike": "Eastern Canadian Inuktitut", "iki": "Iko", "ikk": "Ika", "ikl": "Ikulu", "iko": "Olulumo-Ikom", "ikp": "Ikpeshi", "ikr": "Ikaranggal", "iks": "Inuit Sign Language", "ikt": "Inuinnaqtun; Western Canadian Inuktitut", "ikv": "Iku-Gora-Ankwa", "ikw": "Ikwere", "ikx": "Ik", "ikz": "Ikizu", "ila": "Ile Ape", "ilb": "Ila", "ilg": "Garig-Ilgar", "ili": "Ili Turki", "ilk": "Ilongot", "ilm": "Iranun (Malaysia)", "ilo": "Iloko", "ilp": "Iranun (Philippines)", "ils": "International Sign", "ilu": "Ili'uun", "ilv": "Ilue", "ima": "Mala Malasar", "imi": "Anamgura", "iml": "Miluk", "imn": "Imonda", "imo": "Imbongu", "imr": "Imroing", "ims": "Marsian", "imt": "Imotong", "imy": "Milyan", "inb": "Inga", "inc": "Indic languages", "ine": "Indo-European languages", "ing": "Degexit'an", "inh": "Ingush", "inj": "Jungle Inga", "inl": "Indonesian Sign Language", "inm": "Minaean", "inn": "Isinai", "ino": "Inoke-Yate", "inp": "Iñapari", "ins": "Indian Sign Language", "int": "Intha", "inz": "Ineseño", "io": "Ido", "ior": "Inor", "iou": "Tuma-Irumu", "iow": "Iowa-Oto", "ipi": "Ipili", "ipo": "Ipiko", "iqu": "Iquito", "iqw": "Ikwo", "ira": "Iranian languages", "ire": "Iresim", "irh": "Irarutu", "iri": "Rigwe; Irigwe", "irk": "Iraqw", "irn": "Irántxe", "iro": "Iroquoian languages", "irr": "Ir", "iru": "Irula", "irx": "Kamberau", "iry": "Iraya", "is": "Icelandic", "isa": "Isabi", "isc": "Isconahua", "isd": "Isnag", "ise": "Italian Sign Language", "isg": "Irish Sign Language", "ish": "Esan", "isi": "Nkem-Nkum", "isk": "Ishkashimi", "ism": "Masimasi", "isn": "Isanzu", "iso": "Isoko", "isr": "Israeli Sign Language", "ist": "Istriot", "isu": "Isu (Menchum Division)", "it": "Italian", "itb": "Binongan Itneg", "itc": "Italic languages", "itd": "Southern Tidung", "ite": "Itene", "iti": "Inlaod Itneg", "itk": "Judeo-Italian", "itl": "Itelmen", "itm": "Itu Mbon Uzo", "ito": "Itonama", "itr": "Iteri", "its": "Isekiri", "itt": "Maeng Itneg", "itv": "Itawit", "itw": "Ito", "itx": "Itik", "ity": "Moyadan Itneg", "itz": "Itzá", "iu": "Inuktitut", "ium": "Iu Mien", "ivb": "Ibatan", "ivv": "Ivatan", "iwk": "I-Wak", "iwm": "Iwam", "iwo": "Iwur", "iws": "Sepik Iwam", "ixc": "Ixcatec", "ixl": "Ixil", "iya": "Iyayu", "iyo": "Mesaka", "iyx": "Yaka (Congo)", "izh": "Ingrian", "izr": "Izere", "izz": "Izii", "ja": "Japanese", "jaa": "Jamamadí", "jab": "Hyam", "jac": "Popti'; Jakalteko", "jad": "Jahanka", "jae": "Yabem", "jaf": "Jara", "jah": "Jah Hut", "jaj": "Zazao", "jak": "Jakun", "jal": "Yalahatan", "jam": "Jamaican Creole English", "jan": "Jandai", "jao": "Yanyuwa", "jaq": "Yaqay", "jas": "New Caledonian Javanese", "jat": "Jakati", "jau": "Yaur", "jax": "Jambi Malay", "jay": "Yan-nhangu; Nhangu", "jaz": "Jawe", "jbe": "Judeo-Berber", "jbi": "Badjiri", "jbj": "Arandai", "jbk": "Barikewa", "jbm": "Bijim", "jbn": "Nafusi", "jbo": "Lojban", "jbr": "Jofotek-Bromnya", "jbt": "Jabutí", "jbu": "Jukun Takum", "jbw": "Yawijibaya", "jcs": "Jamaican Country Sign Language", "jct": "Krymchak", "jda": "Jad", "jdg": "Jadgali", "jdt": "Judeo-Tat", "jeb": "Jebero", "jee": "Jerung", "jeh": "Jeh", "jei": "Yei", "jek": "Jeri Kuo", "jel": "Yelmek", "jen": "Dza", "jer": "Jere", "jet": "Manem", "jeu": "Jonkor Bourmataguil", "jgb": "Ngbee", "jge": "Judeo-Georgian", "jgk": "Gwak", "jgo": "Ngomba", "jhi": "Jehai", "jhs": "Jhankot Sign Language", "jia": "Jina", "jib": "Jibu", "jic": "Tol", "jid": "Bu (Kaduna State)", "jie": "Jilbe", "jig": "Jingulu; Djingili", "jih": "sTodsde; Shangzhai", "jii": "Jiiddu", "jil": "Jilim", "jim": "Jimi (Cameroon)", "jio": "Jiamao", "jiq": "Guanyinqiao; Lavrung", "jit": "Jita", "jiu": "Youle Jinuo", "jiv": "Shuar", "jiy": "Buyuan Jinuo", "jje": "Jejueo", "jjr": "Bankal", "jka": "Kaera", "jkm": "Mobwa Karen", "jko": "Kubo", "jkp": "Paku Karen", "jkr": "Koro (India)", "jks": "Amami Koniya Sign Language", "jku": "Labir", "jle": "Ngile", "jls": "Jamaican Sign Language", "jma": "Dima", "jmb": "Zumbun", "jmc": "Machame", "jmd": "Yamdena", "jmi": "Jimi (Nigeria)", "jml": "Jumli", "jmn": "Makuri Naga", "jmr": "Kamara", "jms": "Mashi (Nigeria)", "jmw": "Mouwase", "jmx": "Western Juxtlahuaca Mixtec", "jna": "Jangshung", "jnd": "Jandavra", "jng": "Yangman", "jni": "Janji", "jnj": "Yemsa", "jnl": "Rawat", "jns": "Jaunsari", "job": "Joba", "jod": "Wojenaka", "jog": "Jogi", "jor": "Jorá", "jos": "Jordanian Sign Language", "jow": "Jowulu", "jpa": "Jewish Palestinian Aramaic", "jpr": "Judeo-Persian", "jpx": "Japanese (family)", "jqr": "Jaqaru", "jra": "Jarai", "jrb": "Judeo-Arabic", "jrr": "Jiru", "jrt": "Jakattoe", "jru": "Japrería", "jsl": "Japanese Sign Language", "jua": "Júma", "jub": "Wannu", "juc": "Jurchen", "jud": "Worodougou", "juh": "Hõne", "jui": "Ngadjuri", "juk": "Wapan", "jul": "Jirel", "jum": "Jumjum", "jun": "Juang", "juo": "Jiba", "jup": "Hupdë", "jur": "Jurúna", "jus": "Jumla Sign Language", "jut": "Jutish", "juu": "Ju", "juw": "Wãpha", "juy": "Juray", "jv": "Javanese", "jvd": "Javindo", "jvn": "Caribbean Javanese", "jwi": "Jwira-Pepesa", "jya": "Jiarong", "jye": "Judeo-Yemeni Arabic", "jyy": "Jaya", "ka": "Georgian", "kaa": "Kara-Kalpak; Karakalpak", "kab": "Kabyle", "kac": "Kachin; Jingpho", "kad": "Adara", "kae": "Ketangalan", "kaf": "Katso", "kag": "Kajaman", "kah": "Kara (Central African Republic)", "kai": "Karekare", "kaj": "Jju", "kak": "Kalanguya; Kayapa Kallahan", "kam": "Kamba (Kenya)", "kao": "Xaasongaxango", "kap": "Bezhta", "kaq": "Capanahua", "kar": "Karen languages", "kav": "Katukína", "kaw": "Kawi", "kax": "Kao", "kay": "Kamayurá", "kba": "Kalarko", "kbb": "Kaxuiâna", "kbc": "Kadiwéu", "kbd": "Kabardian", "kbe": "Kanju", "kbg": "Khamba", "kbh": "Camsá", "kbi": "Kaptiau", "kbj": "Kari", "kbk": "Grass Koiari", "kbl": "Kanembu", "kbm": "Iwal", "kbn": "Kare (Central African Republic)", "kbo": "Keliko", "kbp": "Kabiyè", "kbq": "Kamano", "kbr": "Kafa", "kbs": "Kande", "kbt": "Abadi", "kbu": "Kabutra", "kbv": "Dera (Indonesia)", "kbw": "Kaiep", "kbx": "Ap Ma", "kby": "Manga Kanuri", "kbz": "Duhwa", "kca": "Khanty", "kcb": "Kawacha", "kcc": "Lubila", "kcd": "Ngkâlmpw Kanum", "kce": "Kaivi", "kcf": "Ukaan", "kcg": "Tyap", "kch": "Vono", "kci": "Kamantan", "kcj": "Kobiana", "kck": "Kalanga", "kcl": "Kela (Papua New Guinea); Kala", "kcm": "Gula (Central African Republic)", "kcn": "Nubi", "kco": "Kinalakna", "kcp": "Kanga", "kcq": "Kamo", "kcr": "Katla", "kcs": "Koenoem", "kct": "Kaian", "kcu": "Kami (Tanzania)", "kcv": "Kete", "kcw": "Kabwari", "kcx": "Kachama-Ganjule", "kcy": "Korandje", "kcz": "Konongo", "kda": "Worimi", "kdc": "Kutu", "kdd": "Yankunytjatjara", "kde": "Makonde", "kdf": "Mamusi", "kdg": "Seba", "kdh": "Tem", "kdi": "Kumam", "kdj": "Karamojong", "kdk": "Numèè; Kwényi", "kdl": "Tsikimba", "kdm": "Kagoma", "kdn": "Kunda", "kdo": "Kordofanian languages", "kdp": "Kaningdon-Nindem", "kdq": "Koch", "kdr": "Karaim", "kdt": "Kuy", "kdu": "Kadaru", "kdw": "Koneraw", "kdx": "Kam", "kdy": "Keder; Keijar", "kdz": "Kwaja", "kea": "Kabuverdianu", "keb": "Kélé", "kec": "Keiga", "ked": "Kerewe", "kee": "Eastern Keres", "kef": "Kpessi", "keg": "Tese", "keh": "Keak", "kei": "Kei", "kej": "Kadar", "kek": "Kekchí", "kel": "Kela (Democratic Republic of Congo)", "kem": "Kemak", "ken": "Kenyang", "keo": "Kakwa", "kep": "Kaikadi", "keq": "Kamar", "ker": "Kera", "kes": "Kugbo", "ket": "Ket", "keu": "Akebu", "kev": "Kanikkaran", "kew": "West Kewa", "kex": "Kukna", "key": "Kupia", "kez": "Kukele", "kfa": "Kodava", "kfb": "Northwestern Kolami", "kfc": "Konda-Dora", "kfd": "Korra Koraga", "kfe": "Kota (India)", "kff": "Koya", "kfg": "Kudiya", "kfh": "Kurichiya", "kfi": "Kannada Kurumba", "kfj": "Kemiehua", "kfk": "Kinnauri", "kfl": "Kung", "kfm": "Khunsari", "kfn": "Kuk", "kfo": "Koro (Côte d'Ivoire)", "kfp": "Korwa", "kfq": "Korku", "kfr": "Kachhi; Kutchi", "kfs": "Bilaspuri", "kft": "Kanjari", "kfu": "Katkari", "kfv": "Kurmukar", "kfw": "Kharam Naga", "kfx": "Kullu Pahari", "kfy": "Kumaoni", "kfz": "Koromfé", "kg": "Kongo", "kga": "Koyaga", "kgb": "Kawe", "kge": "Komering", "kgf": "Kube", "kgg": "Kusunda", "kgi": "Selangor Sign Language", "kgj": "Gamale Kham", "kgk": "Kaiwá", "kgl": "Kunggari", "kgm": "Karipúna", "kgn": "Karingani", "kgo": "Krongo", "kgp": "Kaingang", "kgq": "Kamoro", "kgr": "Abun", "kgs": "Kumbainggar", "kgt": "Somyev", "kgu": "Kobol", "kgv": "Karas", "kgw": "Karon Dori", "kgx": "Kamaru", "kgy": "Kyerung", "kha": "Khasi", "khb": "Lü", "khc": "Tukang Besi North", "khd": "Bädi Kanum", "khe": "Korowai", "khf": "Khuen", "khg": "Khams Tibetan", "khh": "Kehu", "khi": "Khoisan languages", "khj": "Kuturmi", "khk": "Halh Mongolian", "khl": "Lusi", "khn": "Khandesi", "kho": "Khotanese; Sakan", "khp": "Kapori; Kapauri", "khq": "Koyra Chiini Songhay", "khr": "Kharia", "khs": "Kasua", "kht": "Khamti", "khu": "Nkhumbi", "khv": "Khvarshi", "khw": "Khowar", "khx": "Kanu", "khy": "Kele (Democratic Republic of Congo)", "khz": "Keapara", "ki": "Kikuyu; Gikuyu", "kia": "Kim", "kib": "Koalib", "kic": "Kickapoo", "kid": "Koshin", "kie": "Kibet", "kif": "Eastern Parbate Kham", "kig": "Kimaama; Kimaghima", "kih": "Kilmeri", "kii": "Kitsai", "kij": "Kilivila", "kil": "Kariya", "kim": "Karagas", "kio": "Kiowa", "kip": "Sheshi Kham", "kiq": "Kosadle; Kosare", "kis": "Kis", "kit": "Agob", "kiu": "Kirmanjki (individual language)", "kiv": "Kimbu", "kiw": "Northeast Kiwai", "kix": "Khiamniungan Naga", "kiy": "Kirikiri", "kiz": "Kisi", "kj": "Kuanyama; Kwanyama", "kja": "Mlap", "kjb": "Q'anjob'al; Kanjobal", "kjc": "Coastal Konjo", "kjd": "Southern Kiwai", "kje": "Kisar", "kjg": "Khmu", "kjh": "Khakas", "kji": "Zabana", "kjj": "Khinalugh", "kjk": "Highland Konjo", "kjl": "Western Parbate Kham", "kjm": "Kháng", "kjn": "Kunjen", "kjo": "Harijan Kinnauri", "kjp": "Pwo Eastern Karen", "kjq": "Western Keres", "kjr": "Kurudu", "kjs": "East Kewa", "kjt": "Phrae Pwo Karen", "kju": "Kashaya", "kjv": "Kaikavian Literary Language", "kjx": "Ramopa", "kjy": "Erave", "kjz": "Bumthangkha", "kk": "Kazakh", "kka": "Kakanda", "kkb": "Kwerisa", "kkc": "Odoodee", "kkd": "Kinuku", "kke": "Kakabe", "kkf": "Kalaktang Monpa", "kkg": "Mabaka Valley Kalinga", "kkh": "Khün", "kki": "Kagulu", "kkj": "Kako", "kkk": "Kokota", "kkl": "Kosarek Yale", "kkm": "Kiong", "kkn": "Kon Keu", "kko": "Karko", "kkp": "Gugubera; Koko-Bera", "kkq": "Kaeku", "kkr": "Kir-Balar", "kks": "Giiwo", "kkt": "Koi", "kku": "Tumi", "kkv": "Kangean", "kkw": "Teke-Kukuya", "kkx": "Kohin", "kky": "Guugu Yimidhirr; Guguyimidjir", "kkz": "Kaska", "kl": "Kalaallisut; Greenlandic", "kla": "Klamath-Modoc", "klb": "Kiliwa", "klc": "Kolbila", "kld": "Gamilaraay", "kle": "Kulung (Nepal)", "klf": "Kendeje", "klg": "Tagakaulo", "klh": "Weliki", "kli": "Kalumpang", "klj": "Khalaj", "klk": "Kono (Nigeria)", "kll": "Kagan Kalagan", "klm": "Migum", "kln": "Kalenjin", "klo": "Kapya", "klp": "Kamasa", "klq": "Rumu", "klr": "Khaling", "kls": "Kalasha", "klt": "Nukna", "klu": "Klao", "klv": "Maskelynes", "klw": "Tado; Lindu", "klx": "Koluwawa", "kly": "Kalao", "klz": "Kabola", "km": "Khmer; Central Khmer", "kma": "Konni", "kmb": "Kimbundu", "kmc": "Southern Dong", "kmd": "Majukayang Kalinga", "kme": "Bakole", "kmf": "Kare (Papua New Guinea)", "kmg": "Kâte", "kmh": "Kalam", "kmi": "Kami (Nigeria)", "kmj": "Kumarbhag Paharia", "kmk": "Limos Kalinga", "kml": "Tanudan Kalinga", "kmm": "Kom (India)", "kmn": "Awtuw", "kmo": "Kwoma", "kmp": "Gimme", "kmq": "Kwama", "kmr": "Northern Kurdish", "kms": "Kamasau", "kmt": "Kemtuik", "kmu": "Kanite", "kmv": "Karipúna Creole French", "kmw": "Komo (Democratic Republic of Congo)", "kmx": "Waboda", "kmy": "Koma", "kmz": "Khorasani Turkish", "kn": "Kannada", "kna": "Dera (Nigeria)", "knb": "Lubuagan Kalinga", "knc": "Central Kanuri", "knd": "Konda", "kne": "Kankanaey", "knf": "Mankanya", "kng": "Koongo", "kni": "Kanufi", "knj": "Western Kanjobal", "knk": "Kuranko", "knl": "Keninjal", "knm": "Kanamarí", "knn": "Konkani (individual language)", "kno": "Kono (Sierra Leone)", "knp": "Kwanja", "knq": "Kintaq", "knr": "Kaningra", "kns": "Kensiu", "knt": "Panoan Katukína", "knu": "Kono (Guinea)", "knv": "Tabo", "knw": "Kung-Ekoka", "knx": "Kendayan; Salako", "kny": "Kanyok", "knz": "Kalamsé", "ko": "Korean", "koa": "Konomala", "koc": "Kpati", "kod": "Kodi", "koe": "Kacipo-Bale Suri", "kof": "Kubi", "kog": "Cogui; Kogi", "koh": "Koyo", "koi": "Komi-Permyak", "kok": "Konkani (macrolanguage)", "kol": "Kol (Papua New Guinea)", "koo": "Konzo", "kop": "Waube", "koq": "Kota (Gabon)", "kos": "Kosraean", "kot": "Lagwan", "kou": "Koke", "kov": "Kudu-Camo", "kow": "Kugama", "koy": "Koyukon", "koz": "Korak", "kpa": "Kutto", "kpb": "Mullu Kurumba", "kpc": "Curripaco", "kpd": "Koba", "kpe": "Kpelle", "kpf": "Komba", "kpg": "Kapingamarangi", "kph": "Kplang", "kpi": "Kofei", "kpj": "Karajá", "kpk": "Kpan", "kpl": "Kpala", "kpm": "Koho", "kpn": "Kepkiriwát", "kpo": "Ikposo", "kpq": "Korupun-Sela", "kpr": "Korafe-Yegha", "kps": "Tehit", "kpt": "Karata", "kpu": "Kafoa", "kpv": "Komi-Zyrian", "kpw": "Kobon", "kpx": "Mountain Koiali", "kpy": "Koryak", "kpz": "Kupsabiny", "kqa": "Mum", "kqb": "Kovai", "kqc": "Doromu-Koki", "kqd": "Koy Sanjaq Surat", "kqe": "Kalagan", "kqf": "Kakabai", "kqg": "Khe", "kqh": "Kisankasa", "kqi": "Koitabu", "kqj": "Koromira", "kqk": "Kotafon Gbe", "kql": "Kyenele", "kqm": "Khisa", "kqn": "Kaonde", "kqo": "Eastern Krahn", "kqp": "Kimré", "kqq": "Krenak", "kqr": "Kimaragang", "kqs": "Northern Kissi", "kqt": "Klias River Kadazan", "kqu": "Seroa", "kqv": "Okolod", "kqw": "Kandas", "kqx": "Mser", "kqy": "Koorete", "kqz": "Korana", "kr": "Kanuri", "kra": "Kumhali", "krb": "Karkin", "krc": "Karachay-Balkar", "krd": "Kairui-Midiki", "kre": "Panará", "krf": "Koro (Vanuatu)", "krh": "Kurama", "kri": "Krio", "krj": "Kinaray-A", "krk": "Kerek", "krl": "Karelian", "krn": "Sapo", "kro": "Kru languages", "krp": "Korop", "krr": "Krung", "krs": "Gbaya (Sudan)", "krt": "Tumari Kanuri", "kru": "Kurukh", "krv": "Kavet", "krw": "Western Krahn", "krx": "Karon", "kry": "Kryts", "krz": "Sota Kanum", "ks": "Kashmiri", "ksa": "Shuwa-Zamani", "ksb": "Shambala", "ksc": "Southern Kalinga", "ksd": "Kuanua", "kse": "Kuni", "ksf": "Bafia", "ksg": "Kusaghe", "ksh": "Kölsch", "ksi": "Krisa; I'saka", "ksj": "Uare", "ksk": "Kansa", "ksl": "Kumalu", "ksm": "Kumba", "ksn": "Kasiguranin", "kso": "Kofa", "ksp": "Kaba", "ksq": "Kwaami", "ksr": "Borong", "kss": "Southern Kisi", "kst": "Winyé", "ksu": "Khamyang", "ksv": "Kusu", "ksw": "S'gaw Karen", "ksx": "Kedang", "ksy": "Kharia Thar", "ksz": "Kodaku", "kta": "Katua", "ktb": "Kambaata", "ktc": "Kholok", "ktd": "Kokata; Kukatha", "kte": "Nubri", "ktf": "Kwami", "ktg": "Kalkutung", "kth": "Karanga", "kti": "North Muyu", "ktj": "Plapo Krumen", "ktk": "Kaniet", "ktl": "Koroshi", "ktm": "Kurti", "ktn": "Karitiâna", "kto": "Kuot", "ktp": "Kaduo", "ktq": "Katabaga", "kts": "South Muyu", "ktt": "Ketum", "ktu": "Kituba (Democratic Republic of Congo)", "ktv": "Eastern Katu", "ktw": "Kato", "ktx": "Kaxararí", "kty": "Kango (Bas-Uélé District)", "ktz": "Juǀʼhoan; Juǀʼhoansi", "ku": "Kurdish", "kub": "Kutep", "kuc": "Kwinsu", "kud": "'Auhelawa", "kue": "Kuman (Papua New Guinea)", "kuf": "Western Katu", "kug": "Kupa", "kuh": "Kushi", "kui": "Kuikúro-Kalapálo; Kalapalo", "kuj": "Kuria", "kuk": "Kepo'", "kul": "Kulere", "kum": "Kumyk", "kun": "Kunama", "kuo": "Kumukio", "kup": "Kunimaipa", "kuq": "Karipuna", "kus": "Kusaal", "kut": "Kutenai", "kuu": "Upper Kuskokwim", "kuv": "Kur", "kuw": "Kpagua", "kux": "Kukatja", "kuy": "Kuuku-Ya'u", "kuz": "Kunza", "kv": "Komi", "kva": "Bagvalal", "kvb": "Kubu", "kvc": "Kove", "kvd": "Kui (Indonesia)", "kve": "Kalabakan", "kvf": "Kabalai", "kvg": "Kuni-Boazi", "kvh": "Komodo", "kvi": "Kwang", "kvj": "Psikye", "kvk": "Korean Sign Language", "kvl": "Kayaw", "kvm": "Kendem", "kvn": "Border Kuna", "kvo": "Dobel", "kvp": "Kompane", "kvq": "Geba Karen", "kvr": "Kerinci", "kvt": "Lahta Karen; Lahta", "kvu": "Yinbaw Karen", "kvv": "Kola", "kvw": "Wersing", "kvx": "Parkari Koli", "kvy": "Yintale Karen; Yintale", "kvz": "Tsakwambo; Tsaukambo", "kw": "Cornish", "kwa": "Dâw", "kwb": "Kwa", "kwc": "Likwala", "kwd": "Kwaio", "kwe": "Kwerba", "kwf": "Kwara'ae", "kwg": "Sara Kaba Deme", "kwh": "Kowiai", "kwi": "Awa-Cuaiquer", "kwj": "Kwanga", "kwk": "Kwakiutl", "kwl": "Kofyar", "kwm": "Kwambi", "kwn": "Kwangali", "kwo": "Kwomtari", "kwp": "Kodia", "kwr": "Kwer", "kws": "Kwese", "kwt": "Kwesten", "kwu": "Kwakum", "kwv": "Sara Kaba Náà", "kww": "Kwinti", "kwx": "Khirwar", "kwy": "San Salvador Kongo", "kwz": "Kwadi", "kxa": "Kairiru", "kxb": "Krobu", "kxc": "Konso; Khonso", "kxd": "Brunei", "kxf": "Manumanaw Karen; Manumanaw", "kxh": "Karo (Ethiopia)", "kxi": "Keningau Murut", "kxj": "Kulfa", "kxk": "Zayein Karen", "kxm": "Northern Khmer", "kxn": "Kanowit-Tanjong Melanau", "kxo": "Kanoé", "kxp": "Wadiyara Koli", "kxq": "Smärky Kanum", "kxr": "Koro (Papua New Guinea)", "kxs": "Kangjia", "kxt": "Koiwat", "kxv": "Kuvi", "kxw": "Konai", "kxx": "Likuba", "kxy": "Kayong", "kxz": "Kerewo", "ky": "Kirghiz; Kyrgyz", "kya": "Kwaya", "kyb": "Butbut Kalinga", "kyc": "Kyaka", "kyd": "Karey", "kye": "Krache", "kyf": "Kouya", "kyg": "Keyagana", "kyh": "Karok", "kyi": "Kiput", "kyj": "Karao", "kyk": "Kamayo", "kyl": "Kalapuya", "kym": "Kpatili", "kyn": "Northern Binukidnon", "kyo": "Kelon", "kyp": "Kang", "kyq": "Kenga", "kyr": "Kuruáya", "kys": "Baram Kayan", "kyt": "Kayagar", "kyu": "Western Kayah", "kyv": "Kayort", "kyw": "Kudmali", "kyx": "Rapoisi", "kyy": "Kambaira", "kyz": "Kayabí", "kza": "Western Karaboro", "kzb": "Kaibobo", "kzc": "Bondoukou Kulango", "kzd": "Kadai", "kze": "Kosena", "kzf": "Da'a Kaili", "kzg": "Kikai", "kzi": "Kelabit", "kzk": "Kazukuru", "kzl": "Kayeli", "kzm": "Kais", "kzn": "Kokola", "kzo": "Kaningi", "kzp": "Kaidipang", "kzq": "Kaike", "kzr": "Karang", "kzs": "Sugut Dusun", "kzu": "Kayupulau", "kzv": "Komyandaret", "kzw": "Karirí-Xocó", "kzx": "Kamarian", "kzy": "Kango (Tshopo District)", "kzz": "Kalabra", "la": "Latin", "laa": "Southern Subanen", "lab": "Linear A", "lac": "Lacandon", "lad": "Ladino", "lae": "Pattani", "laf": "Lafofa", "lag": "Langi", "lah": "Lahnda", "lai": "Lambya", "laj": "Lango (Uganda)", "lal": "Lalia", "lam": "Lamba", "lan": "Laru", "lap": "Laka (Chad)", "laq": "Qabiao", "lar": "Larteh", "las": "Lama (Togo)", "lau": "Laba", "law": "Lauje", "lax": "Tiwa", "lay": "Lama Bai", "laz": "Aribwatsa", "lb": "Luxembourgish; Letzeburgesch", "lbb": "Label", "lbc": "Lakkia", "lbe": "Lak", "lbf": "Tinani", "lbg": "Laopang", "lbi": "La'bi", "lbj": "Ladakhi", "lbk": "Central Bontok", "lbl": "Libon Bikol", "lbm": "Lodhi", "lbn": "Rmeet", "lbo": "Laven", "lbq": "Wampar", "lbr": "Lohorung", "lbs": "Libyan Sign Language", "lbt": "Lachi", "lbu": "Labu", "lbv": "Lavatbura-Lamusong", "lbw": "Tolaki", "lbx": "Lawangan", "lby": "Lamalama; Lamu-Lamu", "lbz": "Lardil", "lcc": "Legenyem", "lcd": "Lola", "lce": "Loncong; Sekak", "lcf": "Lubu", "lch": "Luchazi", "lcl": "Lisela", "lcm": "Tungag", "lcp": "Western Lawa", "lcq": "Luhu", "lcs": "Lisabata-Nuniali", "lda": "Kla-Dan", "ldb": "Dũya", "ldd": "Luri", "ldg": "Lenyima", "ldh": "Lamja-Dengsa-Tola", "ldi": "Laari", "ldj": "Lemoro", "ldk": "Leelau", "ldl": "Kaan", "ldm": "Landoma", "ldn": "Láadan", "ldo": "Loo", "ldp": "Tso", "ldq": "Lufu", "lea": "Lega-Shabunda", "leb": "Lala-Bisa", "lec": "Leco", "led": "Lendu", "lee": "Lyélé", "lef": "Lelemi", "leh": "Lenje", "lei": "Lemio", "lej": "Lengola", "lek": "Leipon", "lel": "Lele (Democratic Republic of Congo)", "lem": "Nomaande", "len": "Lenca", "leo": "Leti (Cameroon)", "lep": "Lepcha", "leq": "Lembena", "ler": "Lenkau", "les": "Lese", "let": "Lesing-Gelimi; Amio-Gelimi", "leu": "Kara (Papua New Guinea)", "lev": "Lamma", "lew": "Ledo Kaili", "lex": "Luang", "ley": "Lemolang", "lez": "Lezghian", "lfa": "Lefa", "lfn": "Lingua Franca Nova", "lg": "Ganda; Luganda", "lga": "Lungga", "lgb": "Laghu", "lgg": "Lugbara", "lgh": "Laghuu", "lgi": "Lengilu", "lgk": "Lingarak; Neverver", "lgl": "Wala", "lgm": "Lega-Mwenga", "lgn": "T'apo; Opuuo", "lgo": "Lango (South Sudan)", "lgq": "Logba", "lgr": "Lengo", "lgt": "Pahi", "lgu": "Longgu", "lgz": "Ligenza", "lha": "Laha (Viet Nam)", "lhh": "Laha (Indonesia)", "lhi": "Lahu Shi", "lhl": "Lahul Lohar", "lhm": "Lhomi", "lhn": "Lahanan", "lhp": "Lhokpu", "lhs": "Mlahsö", "lht": "Lo-Toga", "lhu": "Lahu", "li": "Limburgan; Limburger; Limburgish", "lia": "West-Central Limba", "lib": "Likum", "lic": "Hlai", "lid": "Nyindrou", "lie": "Likila", "lif": "Limbu", "lig": "Ligbi", "lih": "Lihir", "lij": "Ligurian", "lik": "Lika", "lil": "Lillooet", "lio": "Liki", "lip": "Sekpele", "liq": "Libido", "lir": "Liberian English", "lis": "Lisu", "liu": "Logorik", "liv": "Liv", "liw": "Col", "lix": "Liabuku", "liy": "Banda-Bambari", "liz": "Libinza", "lja": "Golpa", "lje": "Rampi", "lji": "Laiyolo", "ljl": "Li'o", "ljp": "Lampung Api", "ljw": "Yirandali", "ljx": "Yuru", "lka": "Lakalei", "lkb": "Kabras; Lukabaras", "lkc": "Kucong", "lkd": "Lakondê", "lke": "Kenyi", "lkh": "Lakha", "lki": "Laki", "lkj": "Remun", "lkl": "Laeko-Libuat", "lkm": "Kalaamaya", "lkn": "Lakon; Vure", "lko": "Khayo; Olukhayo", "lkr": "Päri", "lks": "Kisa; Olushisa", "lkt": "Lakota", "lku": "Kungkari", "lky": "Lokoya", "lla": "Lala-Roba", "llb": "Lolo", "llc": "Lele (Guinea)", "lld": "Ladin", "lle": "Lele (Papua New Guinea)", "llf": "Hermit", "llg": "Lole", "llh": "Lamu", "lli": "Teke-Laali", "llj": "Ladji Ladji", "llk": "Lelak", "lll": "Lilau", "llm": "Lasalimu", "lln": "Lele (Chad)", "llp": "North Efate", "llq": "Lolak", "lls": "Lithuanian Sign Language", "llu": "Lau", "llx": "Lauan", "lma": "East Limba", "lmb": "Merei", "lmc": "Limilngan", "lmd": "Lumun", "lme": "Pévé", "lmf": "South Lembata", "lmg": "Lamogai", "lmh": "Lambichhong", "lmi": "Lombi", "lmj": "West Lembata", "lmk": "Lamkang", "lml": "Hano", "lmn": "Lambadi", "lmo": "Lombard", "lmp": "Limbum", "lmq": "Lamatuka", "lmr": "Lamalera", "lmu": "Lamenu", "lmv": "Lomaiviti", "lmw": "Lake Miwok", "lmx": "Laimbue", "lmy": "Lamboya", "ln": "Lingala", "lna": "Langbashe", "lnb": "Mbalanhu", "lnd": "Lundayeh; Lun Bawang", "lng": "Langobardic", "lnh": "Lanoh", "lni": "Daantanai'", "lnj": "Leningitij", "lnl": "South Central Banda", "lnm": "Langam", "lnn": "Lorediakarkar", "lns": "Lamnso'", "lnu": "Longuda", "lnw": "Lanima", "lnz": "Lonzo", "lo": "Lao", "loa": "Loloda", "lob": "Lobi", "loc": "Inonhan", "loe": "Saluan", "lof": "Logol", "log": "Logo", "loh": "Narim", "loi": "Loma (Côte d'Ivoire)", "loj": "Lou", "lok": "Loko", "lol": "Mongo", "lom": "Loma (Liberia)", "lon": "Malawi Lomwe", "loo": "Lombo", "lop": "Lopa", "loq": "Lobala", "lor": "Téén", "los": "Loniu", "lot": "Otuho", "lou": "Louisiana Creole", "lov": "Lopi", "low": "Tampias Lobu", "lox": "Loun", "loy": "Loke", "loz": "Lozi", "lpa": "Lelepa", "lpe": "Lepki", "lpn": "Long Phuri Naga", "lpo": "Lipo", "lpx": "Lopit", "lqr": "Logir", "lra": "Rara Bakati'", "lrc": "Northern Luri", "lre": "Laurentian", "lrg": "Laragia", "lri": "Marachi; Olumarachi", "lrk": "Loarki", "lrl": "Lari", "lrm": "Marama; Olumarama", "lrn": "Lorang", "lro": "Laro", "lrr": "Southern Yamphu", "lrt": "Larantuka Malay", "lrv": "Larevat", "lrz": "Lemerig", "lsa": "Lasgerdi", "lsb": "Burundian Sign Language; Langue des Signes Burundaise", "lsc": "Albarradas Sign Language; Lengua de señas Albarradas", "lsd": "Lishana Deni", "lse": "Lusengo", "lsh": "Lish", "lsi": "Lashi", "lsl": "Latvian Sign Language", "lsm": "Saamia; Olusamia", "lsn": "Tibetan Sign Language", "lso": "Laos Sign Language", "lsp": "Panamanian Sign Language; Lengua de Señas Panameñas", "lsr": "Aruop", "lss": "Lasi", "lst": "Trinidad and Tobago Sign Language", "lsv": "Sivia Sign Language", "lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise", "lsy": "Mauritian Sign Language", "lt": "Lithuanian", "ltc": "Late Middle Chinese", "ltg": "Latgalian", "lth": "Thur", "lti": "Leti (Indonesia)", "ltn": "Latundê", "lto": "Tsotso; Olutsotso", "lts": "Tachoni; Lutachoni", "ltu": "Latu", "lu": "Luba-Katanga", "lua": "Luba-Lulua", "luc": "Aringa", "lud": "Ludian", "lue": "Luvale", "luf": "Laua", "lui": "Luiseno", "luj": "Luna", "luk": "Lunanakha", "lul": "Olu'bo", "lum": "Luimbi", "lun": "Lunda", "luo": "Luo (Kenya and Tanzania); Dholuo", "lup": "Lumbu", "luq": "Lucumi", "lur": "Laura", "lus": "Lushai", "lut": "Lushootseed", "luu": "Lumba-Yakkha", "luv": "Luwati", "luw": "Luo (Cameroon)", "luy": "Luyia; Oluluyia", "luz": "Southern Luri", "lv": "Latvian", "lva": "Maku'a", "lvi": "Lavi", "lvk": "Lavukaleve", "lvs": "Standard Latvian", "lvu": "Levuka", "lwa": "Lwalu", "lwe": "Lewo Eleng", "lwg": "Wanga; Oluwanga", "lwh": "White Lachi", "lwl": "Eastern Lawa", "lwm": "Laomian", "lwo": "Luwo", "lws": "Malawian Sign Language", "lwt": "Lewotobi", "lwu": "Lawu", "lww": "Lewo", "lxm": "Lakurumau", "lya": "Layakha", "lyg": "Lyngngam", "lyn": "Luyana", "lzh": "Literary Chinese", "lzl": "Litzlitz", "lzn": "Leinong Naga", "lzz": "Laz", "maa": "San Jerónimo Tecóatl Mazatec", "mab": "Yutanduchi Mixtec", "mad": "Madurese", "mae": "Bo-Rukul", "maf": "Mafa", "mag": "Magahi", "mai": "Maithili", "maj": "Jalapa De Díaz Mazatec", "mak": "Makasar", "mam": "Mam", "man": "Mandingo; Manding", "map": "Austronesian languages", "maq": "Chiquihuitlán Mazatec", "mas": "Masai", "mat": "San Francisco Matlatzinca", "mau": "Huautla Mazatec", "mav": "Sateré-Mawé", "maw": "Mampruli", "max": "North Moluccan Malay", "maz": "Central Mazahua", "mba": "Higaonon", "mbb": "Western Bukidnon Manobo", "mbc": "Macushi", "mbd": "Dibabawon Manobo", "mbe": "Molale", "mbf": "Baba Malay", "mbh": "Mangseng", "mbi": "Ilianen Manobo", "mbj": "Nadëb", "mbk": "Malol", "mbl": "Maxakalí", "mbm": "Ombamba", "mbn": "Macaguán", "mbo": "Mbo (Cameroon)", "mbp": "Malayo", "mbq": "Maisin", "mbr": "Nukak Makú", "mbs": "Sarangani Manobo", "mbt": "Matigsalug Manobo", "mbu": "Mbula-Bwazza", "mbv": "Mbulungish", "mbw": "Maring", "mbx": "Mari (East Sepik Province)", "mby": "Memoni", "mbz": "Amoltepec Mixtec", "mca": "Maca", "mcb": "Machiguenga", "mcc": "Bitur", "mcd": "Sharanahua", "mce": "Itundujia Mixtec", "mcf": "Matsés", "mcg": "Mapoyo", "mch": "Maquiritari", "mci": "Mese", "mcj": "Mvanip", "mck": "Mbunda", "mcl": "Macaguaje", "mcm": "Malaccan Creole Portuguese", "mcn": "Masana", "mco": "Coatlán Mixe", "mcp": "Makaa", "mcq": "Ese", "mcr": "Menya", "mcs": "Mambai", "mct": "Mengisa", "mcu": "Cameroon Mambila", "mcv": "Minanibai", "mcw": "Mawa (Chad)", "mcx": "Mpiemo", "mcy": "South Watut", "mcz": "Mawan", "mda": "Mada (Nigeria)", "mdb": "Morigi", "mdc": "Male (Papua New Guinea)", "mdd": "Mbum", "mde": "Maba (Chad)", "mdf": "Moksha", "mdg": "Massalat", "mdh": "Maguindanaon", "mdi": "Mamvu", "mdj": "Mangbetu", "mdk": "Mangbutu", "mdl": "Maltese Sign Language", "mdm": "Mayogo", "mdn": "Mbati", "mdp": "Mbala", "mdq": "Mbole", "mdr": "Mandar", "mds": "Maria (Papua New Guinea)", "mdt": "Mbere", "mdu": "Mboko", "mdv": "Santa Lucía Monteverde Mixtec", "mdw": "Mbosi", "mdx": "Dizin", "mdy": "Male (Ethiopia)", "mdz": "Suruí Do Pará", "mea": "Menka", "meb": "Ikobi", "mec": "Marra", "med": "Melpa", "mee": "Mengen", "mef": "Megam", "meh": "Southwestern Tlaxiaco Mixtec", "mei": "Midob", "mej": "Meyah", "mek": "Mekeo", "mel": "Central Melanau", "mem": "Mangala", "men": "Mende (Sierra Leone)", "meo": "Kedah Malay", "mep": "Miriwoong", "meq": "Merey", "mer": "Meru", "mes": "Masmaje", "met": "Mato", "meu": "Motu", "mev": "Mano", "mew": "Maaka", "mey": "Hassaniyya", "mez": "Menominee", "mfa": "Pattani Malay", "mfb": "Bangka", "mfc": "Mba", "mfd": "Mendankwe-Nkwen", "mfe": "Morisyen", "mff": "Naki", "mfg": "Mogofin", "mfh": "Matal", "mfi": "Wandala", "mfj": "Mefele", "mfk": "North Mofu", "mfl": "Putai", "mfm": "Marghi South", "mfn": "Cross River Mbembe", "mfo": "Mbe", "mfp": "Makassar Malay", "mfq": "Moba", "mfr": "Marrithiyel", "mfs": "Mexican Sign Language", "mft": "Mokerang", "mfu": "Mbwela", "mfv": "Mandjak", "mfw": "Mulaha", "mfx": "Melo", "mfy": "Mayo", "mfz": "Mabaan", "mg": "Malagasy", "mga": "Middle Irish (900-1200)", "mgb": "Mararit", "mgc": "Morokodo", "mgd": "Moru", "mge": "Mango", "mgf": "Maklew", "mgg": "Mpumpong", "mgh": "Makhuwa-Meetto", "mgi": "Lijili", "mgj": "Abureni", "mgk": "Mawes", "mgl": "Maleu-Kilenge", "mgm": "Mambae", "mgn": "Mbangi", "mgo": "Meta'", "mgp": "Eastern Magar", "mgq": "Malila", "mgr": "Mambwe-Lungu", "mgs": "Manda (Tanzania)", "mgt": "Mongol", "mgu": "Mailu", "mgv": "Matengo", "mgw": "Matumbi", "mgy": "Mbunga", "mgz": "Mbugwe", "mh": "Marshallese", "mha": "Manda (India)", "mhb": "Mahongwe", "mhc": "Mocho", "mhd": "Mbugu", "mhe": "Besisi; Mah Meri", "mhf": "Mamaa", "mhg": "Margu", "mhi": "Ma'di", "mhj": "Mogholi", "mhk": "Mungaka", "mhl": "Mauwake", "mhm": "Makhuwa-Moniga", "mhn": "Mócheno", "mho": "Mashi (Zambia)", "mhp": "Balinese Malay", "mhq": "Mandan", "mhr": "Eastern Mari", "mhs": "Buru (Indonesia)", "mht": "Mandahuaca", "mhu": "Digaro-Mishmi; Darang Deng", "mhw": "Mbukushu", "mhx": "Maru; Lhaovo", "mhy": "Ma'anyan", "mhz": "Mor (Mor Islands)", "mi": "Maori", "mia": "Miami", "mib": "Atatláhuca Mixtec", "mic": "Mi'kmaq; Micmac", "mid": "Mandaic", "mie": "Ocotepec Mixtec", "mif": "Mofu-Gudur", "mig": "San Miguel El Grande Mixtec", "mih": "Chayuco Mixtec", "mii": "Chigmecatitlán Mixtec", "mij": "Abar; Mungbam", "mik": "Mikasuki", "mil": "Peñoles Mixtec", "mim": "Alacatlatzala Mixtec", "min": "Minangkabau", "mio": "Pinotepa Nacional Mixtec", "mip": "Apasco-Apoala Mixtec", "miq": "Mískito", "mir": "Isthmus Mixe", "mit": "Southern Puebla Mixtec", "miu": "Cacaloxtepec Mixtec", "miw": "Akoye", "mix": "Mixtepec Mixtec", "miy": "Ayutla Mixtec", "miz": "Coatzospan Mixtec", "mjb": "Makalero", "mjc": "San Juan Colorado Mixtec", "mjd": "Northwest Maidu", "mje": "Muskum", "mjg": "Tu", "mjh": "Mwera (Nyasa)", "mji": "Kim Mun", "mjj": "Mawak", "mjk": "Matukar", "mjl": "Mandeali", "mjm": "Medebur", "mjn": "Ma (Papua New Guinea)", "mjo": "Malankuravan", "mjp": "Malapandaram", "mjq": "Malaryan", "mjr": "Malavedan", "mjs": "Miship", "mjt": "Sauria Paharia", "mju": "Manna-Dora", "mjv": "Mannan", "mjw": "Karbi", "mjx": "Mahali", "mjy": "Mahican", "mjz": "Majhi", "mk": "Macedonian", "mka": "Mbre", "mkb": "Mal Paharia", "mkc": "Siliput", "mke": "Mawchi", "mkf": "Miya", "mkg": "Mak (China)", "mkh": "Mon-Khmer languages", "mki": "Dhatki", "mkj": "Mokilese", "mkk": "Byep", "mkl": "Mokole", "mkm": "Moklen", "mkn": "Kupang Malay", "mko": "Mingang Doso", "mkp": "Moikodi", "mkq": "Bay Miwok", "mkr": "Malas", "mks": "Silacayoapan Mixtec", "mkt": "Vamale", "mku": "Konyanka Maninka", "mkv": "Mafea", "mkw": "Kituba (Congo)", "mkx": "Kinamiging Manobo", "mky": "East Makian", "mkz": "Makasae", "ml": "Malayalam", "mla": "Malo", "mlb": "Mbule", "mlc": "Cao Lan", "mle": "Manambu", "mlf": "Mal", "mlh": "Mape", "mli": "Malimpung", "mlj": "Miltu", "mlk": "Ilwana; Kiwilwana", "mll": "Malua Bay", "mlm": "Mulam", "mln": "Malango", "mlo": "Mlomp", "mlp": "Bargam", "mlq": "Western Maninkakan", "mlr": "Vame", "mls": "Masalit", "mlu": "To'abaita", "mlv": "Motlav; Mwotlap", "mlw": "Moloko", "mlx": "Malfaxal; Naha'ai", "mlz": "Malaynon", "mma": "Mama", "mmb": "Momina", "mmc": "Michoacán Mazahua", "mmd": "Maonan", "mme": "Mae", "mmf": "Mundat", "mmg": "North Ambrym", "mmh": "Mehináku", "mmi": "Musar", "mmj": "Majhwar", "mmk": "Mukha-Dora", "mml": "Man Met", "mmm": "Maii", "mmn": "Mamanwa", "mmo": "Mangga Buang", "mmp": "Siawi", "mmq": "Musak", "mmr": "Western Xiangxi Miao", "mmt": "Malalamai", "mmu": "Mmaala", "mmv": "Miriti", "mmw": "Emae", "mmx": "Madak", "mmy": "Migaama", "mmz": "Mabaale", "mn": "Mongolian", "mna": "Mbula", "mnb": "Muna", "mnc": "Manchu", "mnd": "Mondé", "mne": "Naba", "mnf": "Mundani", "mng": "Eastern Mnong", "mnh": "Mono (Democratic Republic of Congo)", "mni": "Manipuri", "mnj": "Munji", "mnk": "Mandinka", "mnl": "Tiale", "mnm": "Mapena", "mnn": "Southern Mnong", "mno": "Manobo languages", "mnp": "Min Bei Chinese", "mnq": "Minriq", "mnr": "Mono (USA)", "mns": "Mansi", "mnu": "Mer", "mnv": "Rennell-Bellona", "mnw": "Mon", "mnx": "Manikion", "mny": "Manyawa", "mnz": "Moni", "moa": "Mwan", "moc": "Mocoví", "mod": "Mobilian", "moe": "Innu; Montagnais", "mog": "Mongondow", "moh": "Mohawk", "moi": "Mboi", "moj": "Monzombo", "mok": "Morori", "mom": "Mangue", "moo": "Monom", "mop": "Mopán Maya", "moq": "Mor (Bomberai Peninsula)", "mor": "Moro", "mos": "Mossi", "mot": "Barí", "mou": "Mogum", "mov": "Mohave", "mow": "Moi (Congo)", "mox": "Molima", "moy": "Shekkacho", "moz": "Mukulu; Gergiko", "mpa": "Mpoto", "mpb": "Malak Malak; Mullukmulluk", "mpc": "Mangarrayi", "mpd": "Machinere", "mpe": "Majang", "mpg": "Marba", "mph": "Maung", "mpi": "Mpade", "mpj": "Martu Wangka; Wangkajunga", "mpk": "Mbara (Chad)", "mpl": "Middle Watut", "mpm": "Yosondúa Mixtec", "mpn": "Mindiri", "mpo": "Miu", "mpp": "Migabac", "mpq": "Matís", "mpr": "Vangunu", "mps": "Dadibi", "mpt": "Mian", "mpu": "Makuráp", "mpv": "Mungkip", "mpw": "Mapidian", "mpx": "Misima-Panaeati", "mpy": "Mapia", "mpz": "Mpi", "mqa": "Maba (Indonesia)", "mqb": "Mbuko", "mqc": "Mangole", "mqe": "Matepi", "mqf": "Momuna", "mqg": "Kota Bangun Kutai Malay", "mqh": "Tlazoyaltepec Mixtec", "mqi": "Mariri", "mqj": "Mamasa", "mqk": "Rajah Kabunsuwan Manobo", "mql": "Mbelime", "mqm": "South Marquesan", "mqn": "Moronene", "mqo": "Modole", "mqp": "Manipa", "mqq": "Minokok", "mqr": "Mander", "mqs": "West Makian", "mqt": "Mok", "mqu": "Mandari", "mqv": "Mosimo", "mqw": "Murupi", "mqx": "Mamuju", "mqy": "Manggarai", "mqz": "Pano", "mr": "Marathi", "mra": "Mlabri", "mrb": "Marino", "mrc": "Maricopa", "mrd": "Western Magar", "mre": "Martha's Vineyard Sign Language", "mrf": "Elseng", "mrg": "Mising", "mrh": "Mara Chin", "mrj": "Western Mari", "mrk": "Hmwaveke", "mrl": "Mortlockese", "mrm": "Merlav; Mwerlap", "mrn": "Cheke Holo", "mro": "Mru", "mrp": "Morouas", "mrq": "North Marquesan", "mrr": "Maria (India)", "mrs": "Maragus", "mrt": "Marghi Central", "mru": "Mono (Cameroon)", "mrv": "Mangareva", "mrw": "Maranao", "mrx": "Maremgi; Dineor", "mry": "Mandaya", "mrz": "Marind", "ms": "Malay (macrolanguage)", "msb": "Masbatenyo", "msc": "Sankaran Maninka", "msd": "Yucatec Maya Sign Language", "mse": "Musey", "msf": "Mekwei", "msg": "Moraid", "msh": "Masikoro Malagasy", "msi": "Sabah Malay", "msj": "Ma (Democratic Republic of Congo)", "msk": "Mansaka", "msl": "Molof; Poule", "msm": "Agusan Manobo", "msn": "Vurës", "mso": "Mombum", "msp": "Maritsauá", "msq": "Caac", "msr": "Mongolian Sign Language", "mss": "West Masela", "msu": "Musom", "msv": "Maslam", "msw": "Mansoanka", "msx": "Moresada", "msy": "Aruamu", "msz": "Momare", "mt": "Maltese", "mta": "Cotabato Manobo", "mtb": "Anyin Morofo", "mtc": "Munit", "mtd": "Mualang", "mte": "Mono (Solomon Islands)", "mtf": "Murik (Papua New Guinea)", "mtg": "Una", "mth": "Munggui", "mti": "Maiwa (Papua New Guinea)", "mtj": "Moskona", "mtk": "Mbe'", "mtl": "Montol", "mtm": "Mator", "mtn": "Matagalpa", "mto": "Totontepec Mixe", "mtp": "Wichí Lhamtés Nocten", "mtq": "Muong", "mtr": "Mewari", "mts": "Yora", "mtt": "Mota", "mtu": "Tututepec Mixtec", "mtv": "Asaro'o", "mtw": "Southern Binukidnon", "mtx": "Tidaá Mixtec", "mty": "Nabi", "mua": "Mundang", "mub": "Mubi", "muc": "Ajumbu", "mud": "Mednyj Aleut", "mue": "Media Lengua", "mug": "Musgu", "muh": "Mündü", "mui": "Musi", "muj": "Mabire", "muk": "Mugom", "mum": "Maiwala", "mun": "Munda languages", "muo": "Nyong", "mup": "Malvi", "muq": "Eastern Xiangxi Miao", "mur": "Murle", "mus": "Creek", "mut": "Western Muria", "muu": "Yaaku", "muv": "Muthuvan", "mux": "Bo-Ung", "muy": "Muyang", "muz": "Mursi", "mva": "Manam", "mvb": "Mattole", "mvd": "Mamboru", "mve": "Marwari (Pakistan)", "mvf": "Peripheral Mongolian", "mvg": "Yucuañe Mixtec", "mvh": "Mulgi", "mvi": "Miyako", "mvk": "Mekmek", "mvl": "Mbara (Australia)", "mvn": "Minaveha", "mvo": "Marovo", "mvp": "Duri", "mvq": "Moere", "mvr": "Marau", "mvs": "Massep", "mvt": "Mpotovoro", "mvu": "Marfa", "mvv": "Tagal Murut", "mvw": "Machinga", "mvx": "Meoswar", "mvy": "Indus Kohistani", "mvz": "Mesqan", "mwa": "Mwatebu", "mwb": "Juwal", "mwc": "Are", "mwe": "Mwera (Chimwera)", "mwf": "Murrinh-Patha", "mwg": "Aiklep", "mwh": "Mouk-Aria", "mwi": "Labo; Ninde", "mwk": "Kita Maninkakan", "mwl": "Mirandese", "mwm": "Sar", "mwn": "Nyamwanga", "mwo": "Central Maewo", "mwp": "Kala Lagaw Ya", "mwq": "Mün Chin", "mwr": "Marwari", "mws": "Mwimbi-Muthambi", "mwt": "Moken", "mwu": "Mittu", "mwv": "Mentawai", "mww": "Hmong Daw", "mwz": "Moingi", "mxa": "Northwest Oaxaca Mixtec", "mxb": "Tezoatlán Mixtec", "mxc": "Manyika", "mxd": "Modang", "mxe": "Mele-Fila", "mxf": "Malgbe", "mxg": "Mbangala", "mxh": "Mvuba", "mxi": "Mozarabic", "mxj": "Miju-Mishmi; Geman Deng", "mxk": "Monumbo", "mxl": "Maxi Gbe", "mxm": "Meramera", "mxn": "Moi (Indonesia)", "mxo": "Mbowe", "mxp": "Tlahuitoltepec Mixe", "mxq": "Juquila Mixe", "mxr": "Murik (Malaysia)", "mxs": "Huitepec Mixtec", "mxt": "Jamiltepec Mixtec", "mxu": "Mada (Cameroon)", "mxv": "Metlatónoc Mixtec", "mxw": "Namo", "mxx": "Mahou; Mawukakan", "mxy": "Southeastern Nochixtlán Mixtec", "mxz": "Central Masela", "my": "Burmese", "myb": "Mbay", "myc": "Mayeka", "mye": "Myene", "myf": "Bambassi", "myg": "Manta", "myh": "Makah", "myj": "Mangayat", "myk": "Mamara Senoufo", "myl": "Moma", "mym": "Me'en", "myn": "Mayan languages", "myo": "Anfillo", "myp": "Pirahã", "myr": "Muniche", "mys": "Mesmes", "myu": "Mundurukú", "myv": "Erzya", "myw": "Muyuw", "myx": "Masaaba", "myy": "Macuna", "myz": "Classical Mandaic", "mza": "Santa María Zacatepec Mixtec", "mzb": "Tumzabt", "mzc": "Madagascar Sign Language", "mzd": "Malimba", "mze": "Morawa", "mzg": "Monastic Sign Language", "mzh": "Wichí Lhamtés Güisnay", "mzi": "Ixcatlán Mazatec", "mzj": "Manya", "mzk": "Nigeria Mambila", "mzl": "Mazatlán Mixe", "mzm": "Mumuye", "mzn": "Mazanderani", "mzo": "Matipuhy", "mzp": "Movima", "mzq": "Mori Atas", "mzr": "Marúbo", "mzs": "Macanese", "mzt": "Mintil", "mzu": "Inapang", "mzv": "Manza", "mzw": "Deg", "mzx": "Mawayana", "mzy": "Mozambican Sign Language", "mzz": "Maiadomu", "na": "Nauru", "naa": "Namla", "nab": "Southern Nambikuára", "nac": "Narak", "nae": "Naka'ela", "naf": "Nabak", "nag": "Naga Pidgin", "nah": "Nahuatl languages", "nai": "North American Indian languages", "naj": "Nalu", "nak": "Nakanai", "nal": "Nalik", "nam": "Ngan'gityemerri", "nan": "Min Nan Chinese", "nao": "Naaba", "nap": "Neapolitan", "naq": "Khoekhoe; Nama (Namibia)", "nar": "Iguta", "nas": "Naasioi", "nat": "Ca̱hungwa̱rya̱; Hungworo", "naw": "Nawuri", "nax": "Nakwi", "nay": "Ngarrindjeri", "naz": "Coatepec Nahuatl", "nb": "Norwegian Bokmål", "nba": "Nyemba", "nbb": "Ndoe", "nbc": "Chang Naga", "nbd": "Ngbinda", "nbe": "Konyak Naga", "nbg": "Nagarchal", "nbh": "Ngamo", "nbi": "Mao Naga", "nbj": "Ngarinyman", "nbk": "Nake", "nbm": "Ngbaka Ma'bo", "nbn": "Kuri", "nbo": "Nkukoli", "nbp": "Nnam", "nbq": "Nggem", "nbr": "Numana", "nbs": "Namibian Sign Language", "nbt": "Na", "nbu": "Rongmei Naga", "nbv": "Ngamambo", "nbw": "Southern Ngbandi", "nby": "Ningera", "nca": "Iyo", "ncb": "Central Nicobarese", "ncc": "Ponam", "ncd": "Nachering", "nce": "Yale", "ncf": "Notsi", "ncg": "Nisga'a", "nch": "Central Huasteca Nahuatl", "nci": "Classical Nahuatl", "ncj": "Northern Puebla Nahuatl", "nck": "Na-kara", "ncl": "Michoacán Nahuatl", "ncm": "Nambo", "ncn": "Nauna", "nco": "Sibe", "ncq": "Northern Katang", "ncr": "Ncane", "ncs": "Nicaraguan Sign Language", "nct": "Chothe Naga", "ncu": "Chumburung", "ncx": "Central Puebla Nahuatl", "ncz": "Natchez", "nd": "North Ndebele", "nda": "Ndasa", "ndb": "Kenswei Nsei", "ndc": "Ndau", "ndd": "Nde-Nsele-Nta", "ndf": "Nadruvian", "ndg": "Ndengereko", "ndh": "Ndali", "ndi": "Samba Leko", "ndj": "Ndamba", "ndk": "Ndaka", "ndl": "Ndolo", "ndm": "Ndam", "ndn": "Ngundi", "ndp": "Ndo", "ndq": "Ndombe", "ndr": "Ndoola", "nds": "Low German; Low Saxon", "ndt": "Ndunga", "ndu": "Dugun", "ndv": "Ndut", "ndw": "Ndobo", "ndx": "Nduga", "ndy": "Lutos", "ndz": "Ndogo", "ne": "Nepali (macrolanguage)", "nea": "Eastern Ngad'a", "neb": "Toura (Côte d'Ivoire)", "nec": "Nedebang", "ned": "Nde-Gbite", "nee": "Nêlêmwa-Nixumwak", "nef": "Nefamese", "neg": "Negidal", "neh": "Nyenkha", "nei": "Neo-Hittite", "nej": "Neko", "nek": "Neku", "nem": "Nemi", "nen": "Nengone", "neo": "Ná-Meo", "neq": "North Central Mixe", "ner": "Yahadian", "nes": "Bhoti Kinnauri", "net": "Nete", "neu": "Neo", "nev": "Nyaheun", "new": "Newari; Nepal Bhasa", "nex": "Neme", "ney": "Neyo", "nez": "Nez Perce", "nfa": "Dhao", "nfd": "Ahwai", "nfl": "Ayiwo; Äiwoo", "nfr": "Nafaanra", "nfu": "Mfumte", "ng": "Ndonga", "nga": "Ngbaka", "ngb": "Northern Ngbandi", "ngc": "Ngombe (Democratic Republic of Congo)", "ngd": "Ngando (Central African Republic)", "nge": "Ngemba", "ngf": "Trans-New Guinea languages", "ngg": "Ngbaka Manza", "ngh": "Nǁng", "ngi": "Ngizim", "ngj": "Ngie", "ngk": "Dalabon", "ngl": "Lomwe", "ngm": "Ngatik Men's Creole", "ngn": "Ngwo", "ngp": "Ngulu", "ngq": "Ngurimi; Ngoreme", "ngr": "Engdewu", "ngs": "Gvoko", "ngt": "Kriang; Ngeq", "ngu": "Guerrero Nahuatl", "ngv": "Nagumi", "ngw": "Ngwaba", "ngx": "Nggwahyi", "ngy": "Tibea", "ngz": "Ngungwel", "nha": "Nhanda", "nhb": "Beng", "nhc": "Tabasco Nahuatl", "nhd": "Chiripá; Ava Guaraní", "nhe": "Eastern Huasteca Nahuatl", "nhf": "Nhuwala", "nhg": "Tetelcingo Nahuatl", "nhh": "Nahari", "nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl", "nhk": "Isthmus-Cosoleacaque Nahuatl", "nhm": "Morelos Nahuatl", "nhn": "Central Nahuatl", "nho": "Takuu", "nhp": "Isthmus-Pajapan Nahuatl", "nhq": "Huaxcaleca Nahuatl", "nhr": "Naro", "nht": "Ometepec Nahuatl", "nhu": "Noone", "nhv": "Temascaltepec Nahuatl", "nhw": "Western Huasteca Nahuatl", "nhx": "Isthmus-Mecayapan Nahuatl", "nhy": "Northern Oaxaca Nahuatl", "nhz": "Santa María La Alta Nahuatl", "nia": "Nias", "nib": "Nakame", "nic": "Niger-Kordofanian languages", "nid": "Ngandi", "nie": "Niellim", "nif": "Nek", "nig": "Ngalakgan", "nih": "Nyiha (Tanzania)", "nii": "Nii", "nij": "Ngaju", "nik": "Southern Nicobarese", "nil": "Nila", "nim": "Nilamba", "nin": "Ninzo", "nio": "Nganasan", "niq": "Nandi", "nir": "Nimboran", "nis": "Nimi", "nit": "Southeastern Kolami", "niu": "Niuean", "niv": "Gilyak", "niw": "Nimo", "nix": "Hema", "niy": "Ngiti", "niz": "Ningil", "nja": "Nzanyi", "njb": "Nocte Naga", "njd": "Ndonde Hamba", "njh": "Lotha Naga", "nji": "Gudanji", "njj": "Njen", "njl": "Njalgulgule", "njm": "Angami Naga", "njn": "Liangmai Naga", "njo": "Ao Naga", "njr": "Njerep", "njs": "Nisa", "njt": "Ndyuka-Trio Pidgin", "nju": "Ngadjunmaya", "njx": "Kunyi", "njy": "Njyem", "njz": "Nyishi", "nka": "Nkoya", "nkb": "Khoibu Naga", "nkc": "Nkongho", "nkd": "Koireng", "nke": "Duke", "nkf": "Inpui Naga", "nkg": "Nekgini", "nkh": "Khezha Naga", "nki": "Thangal Naga", "nkj": "Nakai", "nkk": "Nokuku", "nkm": "Namat", "nkn": "Nkangala", "nko": "Nkonya", "nkp": "Niuatoputapu", "nkq": "Nkami", "nkr": "Nukuoro", "nks": "North Asmat", "nkt": "Nyika (Tanzania)", "nku": "Bouna Kulango", "nkv": "Nyika (Malawi and Zambia)", "nkw": "Nkutu", "nkx": "Nkoroo", "nkz": "Nkari", "nl": "Dutch; Flemish", "nla": "Ngombale", "nlc": "Nalca", "nle": "East Nyala", "nlg": "Gela", "nli": "Grangali", "nlj": "Nyali", "nlk": "Ninia Yali", "nll": "Nihali", "nlm": "Mankiyali", "nlo": "Ngul", "nlq": "Lao Naga", "nlu": "Nchumbulu", "nlv": "Orizaba Nahuatl", "nlw": "Walangama", "nlx": "Nahali", "nly": "Nyamal", "nlz": "Nalögo", "nma": "Maram Naga", "nmb": "Big Nambas; V'ënen Taut", "nmc": "Ngam", "nmd": "Ndumu", "nme": "Mzieme Naga", "nmf": "Tangkhul Naga (India)", "nmg": "Kwasio", "nmh": "Monsang Naga", "nmi": "Nyam", "nmj": "Ngombe (Central African Republic)", "nmk": "Namakura", "nml": "Ndemli", "nmm": "Manangba", "nmn": "ǃXóõ", "nmo": "Moyon Naga", "nmp": "Nimanbur", "nmq": "Nambya", "nmr": "Nimbari", "nms": "Letemboi", "nmt": "Namonuito", "nmu": "Northeast Maidu", "nmv": "Ngamini", "nmw": "Nimoa; Rifao", "nmx": "Nama (Papua New Guinea)", "nmy": "Namuyi", "nmz": "Nawdm", "nn": "Norwegian Nynorsk", "nna": "Nyangumarta", "nnb": "Nande", "nnc": "Nancere", "nnd": "West Ambae", "nne": "Ngandyera", "nnf": "Ngaing", "nng": "Maring Naga", "nnh": "Ngiemboon", "nni": "North Nuaulu", "nnj": "Nyangatom", "nnk": "Nankina", "nnl": "Northern Rengma Naga", "nnm": "Namia", "nnn": "Ngete", "nnp": "Wancho Naga", "nnq": "Ngindo", "nnr": "Narungga", "nnt": "Nanticoke", "nnu": "Dwang", "nnv": "Nugunu (Australia)", "nnw": "Southern Nuni", "nny": "Nyangga", "nnz": "Nda'nda'", "no": "Norwegian", "noa": "Woun Meu", "noc": "Nuk", "nod": "Northern Thai", "noe": "Nimadi", "nof": "Nomane", "nog": "Nogai", "noh": "Nomu", "noi": "Noiri", "noj": "Nonuya", "nok": "Nooksack", "nol": "Nomlaki", "nom": "Nocamán", "non": "Old Norse", "nop": "Numanggang", "noq": "Ngongo", "nos": "Eastern Nisu", "not": "Nomatsiguenga", "nou": "Ewage-Notu", "nov": "Novial", "now": "Nyambo", "noy": "Noy", "noz": "Nayi", "npa": "Nar Phu", "npb": "Nupbikha", "npg": "Ponyo-Gongwang Naga", "nph": "Phom Naga", "npi": "Nepali (individual language)", "npl": "Southeastern Puebla Nahuatl", "npn": "Mondropolon", "npo": "Pochuri Naga", "nps": "Nipsan", "npu": "Puimei Naga", "npx": "Noipx", "npy": "Napu", "nqg": "Southern Nago", "nqk": "Kura Ede Nago", "nql": "Ngendelengo", "nqm": "Ndom", "nqn": "Nen", "nqo": "N'Ko; N’Ko", "nqq": "Kyan-Karyaw Naga", "nqt": "Nteng", "nqy": "Akyaung Ari Naga", "nr": "South Ndebele", "nra": "Ngom", "nrb": "Nara", "nrc": "Noric", "nre": "Southern Rengma Naga", "nrf": "Jèrriais; Guernésiais", "nrg": "Narango", "nri": "Chokri Naga", "nrk": "Ngarla", "nrl": "Ngarluma", "nrm": "Narom", "nrn": "Norn", "nrp": "North Picene", "nrr": "Norra; Nora", "nrt": "Northern Kalapuya", "nru": "Narua", "nrx": "Ngurmbur", "nrz": "Lala", "nsa": "Sangtam Naga", "nsb": "Lower Nossob", "nsc": "Nshi", "nsd": "Southern Nisu", "nse": "Nsenga", "nsf": "Northwestern Nisu", "nsg": "Ngasa", "nsh": "Ngoshie", "nsi": "Nigerian Sign Language", "nsk": "Naskapi", "nsl": "Norwegian Sign Language", "nsm": "Sumi Naga", "nsn": "Nehan", "nso": "Pedi; Northern Sotho; Sepedi", "nsp": "Nepalese Sign Language", "nsq": "Northern Sierra Miwok", "nsr": "Maritime Sign Language", "nss": "Nali", "nst": "Tase Naga", "nsu": "Sierra Negra Nahuatl", "nsv": "Southwestern Nisu", "nsw": "Navut", "nsx": "Nsongo", "nsy": "Nasal", "nsz": "Nisenan", "ntd": "Northern Tidung", "nte": "Nathembo", "ntg": "Ngantangarra", "nti": "Natioro", "ntj": "Ngaanyatjarra", "ntk": "Ikoma-Nata-Isenye", "ntm": "Nateni", "nto": "Ntomba", "ntp": "Northern Tepehuan", "ntr": "Delo", "ntu": "Natügu", "ntw": "Nottoway", "ntx": "Tangkhul Naga (Myanmar)", "nty": "Mantsi", "ntz": "Natanzi", "nua": "Yuanga", "nub": "Nubian languages", "nuc": "Nukuini", "nud": "Ngala", "nue": "Ngundu", "nuf": "Nusu", "nug": "Nungali", "nuh": "Ndunda", "nui": "Ngumbi", "nuj": "Nyole", "nuk": "Nuu-chah-nulth; Nuuchahnulth", "nul": "Nusa Laut", "num": "Niuafo'ou", "nun": "Anong", "nuo": "Nguôn", "nup": "Nupe-Nupe-Tako", "nuq": "Nukumanu", "nur": "Nukuria", "nus": "Nuer", "nut": "Nung (Viet Nam)", "nuu": "Ngbundu", "nuv": "Northern Nuni", "nuw": "Nguluwan", "nux": "Mehek", "nuy": "Nunggubuyu", "nuz": "Tlamacazapa Nahuatl", "nv": "Navajo; Navaho", "nvh": "Nasarian", "nvm": "Namiae", "nvo": "Nyokon", "nwa": "Nawathinehena", "nwb": "Nyabwa", "nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari", "nwe": "Ngwe", "nwg": "Ngayawung", "nwi": "Southwest Tanna", "nwm": "Nyamusa-Molo", "nwo": "Nauo", "nwr": "Nawaru", "nww": "Ndwewe", "nwx": "Middle Newar", "nwy": "Nottoway-Meherrin", "nxa": "Nauete", "nxd": "Ngando (Democratic Republic of Congo)", "nxe": "Nage", "nxg": "Ngad'a", "nxi": "Nindi", "nxk": "Koki Naga", "nxl": "South Nuaulu", "nxm": "Numidian", "nxn": "Ngawun", "nxo": "Ndambomo", "nxq": "Naxi", "nxr": "Ninggerum", "nxx": "Nafri", "ny": "Nyanja; Chewa; Chichewa", "nyb": "Nyangbo", "nyc": "Nyanga-li", "nyd": "Nyore; Olunyole", "nye": "Nyengo", "nyf": "Giryama; Kigiryama", "nyg": "Nyindu", "nyh": "Nyikina", "nyi": "Ama (Sudan)", "nyj": "Nyanga", "nyk": "Nyaneka", "nyl": "Nyeu", "nym": "Nyamwezi", "nyn": "Nyankole", "nyo": "Nyoro", "nyp": "Nyang'i", "nyq": "Nayini", "nyr": "Nyiha (Malawi)", "nys": "Nyungar", "nyt": "Nyawaygi", "nyu": "Nyungwe", "nyv": "Nyulnyul", "nyw": "Nyaw", "nyx": "Nganyaywana", "nyy": "Nyakyusa-Ngonde", "nza": "Tigon Mbembe", "nzb": "Njebi", "nzd": "Nzadi", "nzi": "Nzima", "nzk": "Nzakara", "nzm": "Zeme Naga", "nzs": "New Zealand Sign Language", "nzu": "Teke-Nzikou", "nzy": "Nzakambay", "nzz": "Nanga Dama Dogon", "oaa": "Orok", "oac": "Oroch", "oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)", "oav": "Old Avar", "obi": "Obispeño", "obk": "Southern Bontok", "obl": "Oblo", "obm": "Moabite", "obo": "Obo Manobo", "obr": "Old Burmese", "obt": "Old Breton", "obu": "Obulom", "oc": "Occitan (post 1500)", "oca": "Ocaina", "och": "Old Chinese", "ocm": "Old Cham", "oco": "Old Cornish", "ocu": "Atzingo Matlatzinca", "oda": "Odut", "odk": "Od", "odt": "Old Dutch", "odu": "Odual", "ofo": "Ofo", "ofs": "Old Frisian", "ofu": "Efutop", "ogb": "Ogbia", "ogc": "Ogbah", "oge": "Old Georgian", "ogg": "Ogbogolo", "ogo": "Khana", "ogu": "Ogbronuagum", "oht": "Old Hittite", "ohu": "Old Hungarian", "oia": "Oirata", "oie": "Okolie", "oin": "Inebu One", "oj": "Ojibwa", "ojb": "Northwestern Ojibwa", "ojc": "Central Ojibwa", "ojg": "Eastern Ojibwa", "ojp": "Old Japanese", "ojs": "Severn Ojibwa", "ojv": "Ontong Java", "ojw": "Western Ojibwa", "oka": "Okanagan", "okb": "Okobo", "okc": "Kobo", "okd": "Okodia", "oke": "Okpe (Southwestern Edo)", "okg": "Koko Babangk", "okh": "Koresh-e Rostam", "oki": "Okiek", "okj": "Oko-Juwoi", "okk": "Kwamtim One", "okl": "Old Kentish Sign Language", "okm": "Middle Korean (10th-16th cent.)", "okn": "Oki-No-Erabu", "oko": "Old Korean (3rd-9th cent.)", "okr": "Kirike", "oks": "Oko-Eni-Osayen", "oku": "Oku", "okv": "Orokaiva", "okx": "Okpe (Northwestern Edo)", "okz": "Old Khmer", "ola": "Walungge", "old": "Mochi", "ole": "Olekha", "olk": "Olkol", "olm": "Oloma", "olo": "Livvi", "olr": "Olrat", "olt": "Old Lithuanian", "olu": "Kuvale", "om": "Oromo", "oma": "Omaha-Ponca", "omb": "East Ambae", "omc": "Mochica", "omg": "Omagua", "omi": "Omi", "omk": "Omok", "oml": "Ombo", "omn": "Minoan", "omo": "Utarmbung", "omp": "Old Manipuri", "omq": "Oto-Manguean languages", "omr": "Old Marathi", "omt": "Omotik", "omu": "Omurano", "omv": "Omotic languages", "omw": "South Tairora", "omx": "Old Mon", "omy": "Old Malay", "ona": "Ona", "onb": "Lingao", "one": "Oneida", "ong": "Olo", "oni": "Onin", "onj": "Onjob", "onk": "Kabore One", "onn": "Onobasulu", "ono": "Onondaga", "onp": "Sartang", "onr": "Northern One", "ons": "Ono", "ont": "Ontenu", "onu": "Unua", "onw": "Old Nubian", "onx": "Onin Based Pidgin", "ood": "Tohono O'odham", "oog": "Ong", "oon": "Önge", "oor": "Oorlams", "oos": "Old Ossetic", "opa": "Okpamheri", "opk": "Kopkaka", "opm": "Oksapmin", "opo": "Opao", "opt": "Opata", "opy": "Ofayé", "or": "Oriya (macrolanguage); Odia (macrolanguage)", "ora": "Oroha", "orc": "Orma", "ore": "Orejón", "org": "Oring", "orh": "Oroqen", "orn": "Orang Kanaq", "oro": "Orokolo", "orr": "Oruma", "ors": "Orang Seletar", "ort": "Adivasi Oriya", "oru": "Ormuri", "orv": "Old Russian", "orw": "Oro Win", "orx": "Oro", "ory": "Odia (individual language); Oriya (individual language)", "orz": "Ormu", "os": "Ossetian; Ossetic", "osa": "Osage", "osc": "Oscan", "osi": "Osing", "osn": "Old Sundanese", "oso": "Ososo", "osp": "Old Spanish", "ost": "Osatu", "osu": "Southern One", "osx": "Old Saxon", "ota": "Ottoman Turkish (1500-1928)", "otb": "Old Tibetan", "otd": "Ot Danum", "ote": "Mezquital Otomi", "oti": "Oti", "otk": "Old Turkish", "otl": "Tilapa Otomi", "otm": "Eastern Highland Otomi", "otn": "Tenango Otomi", "oto": "Otomian languages", "otq": "Querétaro Otomi", "otr": "Otoro", "ots": "Estado de México Otomi", "ott": "Temoaya Otomi", "otu": "Otuke", "otw": "Ottawa", "otx": "Texcatepec Otomi", "oty": "Old Tamil", "otz": "Ixtenco Otomi", "oua": "Tagargrent", "oub": "Glio-Oubi", "oue": "Oune", "oui": "Old Uighur", "oum": "Ouma", "ovd": "Elfdalian; Övdalian", "owi": "Owiniga", "owl": "Old Welsh", "oyb": "Oy", "oyd": "Oyda", "oym": "Wayampi", "oyy": "Oya'oya", "ozm": "Koonzime", "pa": "Panjabi; Punjabi", "paa": "Papuan languages", "pab": "Parecís", "pac": "Pacoh", "pad": "Paumarí", "pae": "Pagibete", "paf": "Paranawát", "pag": "Pangasinan", "pah": "Tenharim", "pai": "Pe", "pak": "Parakanã", "pal": "Pahlavi", "pam": "Pampanga; Kapampangan", "pao": "Northern Paiute", "pap": "Papiamento", "paq": "Parya", "par": "Panamint; Timbisha", "pas": "Papasena", "pau": "Palauan", "pav": "Pakaásnovos", "paw": "Pawnee", "pax": "Pankararé", "pay": "Pech", "paz": "Pankararú", "pbb": "Páez", "pbc": "Patamona", "pbe": "Mezontla Popoloca", "pbf": "Coyotepec Popoloca", "pbg": "Paraujano", "pbh": "E'ñapa Woromaipu", "pbi": "Parkwa", "pbl": "Mak (Nigeria)", "pbm": "Puebla Mazatec", "pbn": "Kpasam", "pbo": "Papel", "pbp": "Badyara", "pbr": "Pangwa", "pbs": "Central Pame", "pbt": "Southern Pashto", "pbu": "Northern Pashto", "pbv": "Pnar", "pby": "Pyu (Papua New Guinea)", "pca": "Santa Inés Ahuatempan Popoloca", "pcb": "Pear", "pcc": "Bouyei", "pcd": "Picard", "pce": "Ruching Palaung", "pcf": "Paliyan", "pcg": "Paniya", "pch": "Pardhan", "pci": "Duruwa", "pcj": "Parenga", "pck": "Paite Chin", "pcl": "Pardhi", "pcm": "Nigerian Pidgin", "pcn": "Piti", "pcp": "Pacahuara", "pcw": "Pyapun", "pda": "Anam", "pdc": "Pennsylvania German", "pdi": "Pa Di", "pdn": "Podena; Fedan", "pdo": "Padoe", "pdt": "Plautdietsch", "pdu": "Kayan", "pea": "Peranakan Indonesian", "peb": "Eastern Pomo", "ped": "Mala (Papua New Guinea)", "pee": "Taje", "pef": "Northeastern Pomo", "peg": "Pengo", "peh": "Bonan", "pei": "Chichimeca-Jonaz", "pej": "Northern Pomo", "pek": "Penchal", "pel": "Pekal", "pem": "Phende", "peo": "Old Persian (ca. 600-400 B.C.)", "pep": "Kunja", "peq": "Southern Pomo", "pes": "Iranian Persian", "pev": "Pémono", "pex": "Petats", "pey": "Petjo", "pez": "Eastern Penan", "pfa": "Pááfang", "pfe": "Pere", "pfl": "Pfaelzisch", "pga": "Sudanese Creole Arabic", "pgd": "Gāndhārī", "pgg": "Pangwali", "pgi": "Pagi", "pgk": "Rerep", "pgl": "Primitive Irish", "pgn": "Paelignian", "pgs": "Pangseng", "pgu": "Pagu", "pgz": "Papua New Guinean Sign Language", "pha": "Pa-Hng", "phd": "Phudagi", "phg": "Phuong", "phh": "Phukha", "phi": "Philippine languages", "phj": "Pahari", "phk": "Phake", "phl": "Phalura; Palula", "phm": "Phimbi", "phn": "Phoenician", "pho": "Phunoi", "phq": "Phana'", "phr": "Pahari-Potwari", "pht": "Phu Thai", "phu": "Phuan", "phv": "Pahlavani", "phw": "Phangduwali", "pi": "Pali", "pia": "Pima Bajo", "pib": "Yine", "pic": "Pinji", "pid": "Piaroa", "pie": "Piro", "pif": "Pingelapese", "pig": "Pisabo", "pih": "Pitcairn-Norfolk", "pij": "Pijao", "pil": "Yom", "pim": "Powhatan", "pin": "Piame", "pio": "Piapoco", "pip": "Pero", "pir": "Piratapuyo", "pis": "Pijin", "pit": "Pitta Pitta", "piu": "Pintupi-Luritja", "piv": "Pileni; Vaeakau-Taumako", "piw": "Pimbwe", "pix": "Piu", "piy": "Piya-Kwonci", "piz": "Pije", "pjt": "Pitjantjatjara", "pka": "Ardhamāgadhī Prākrit", "pkb": "Pokomo; Kipfokomo", "pkc": "Paekche", "pkg": "Pak-Tong", "pkh": "Pankhu", "pkn": "Pakanha", "pko": "Pökoot", "pkp": "Pukapuka", "pkr": "Attapady Kurumba", "pks": "Pakistan Sign Language", "pkt": "Maleng", "pku": "Paku", "pl": "Polish", "pla": "Miani", "plb": "Polonombauk", "plc": "Central Palawano", "pld": "Polari", "ple": "Palu'e", "plf": "Central Malayo-Polynesian languages", "plg": "Pilagá", "plh": "Paulohi", "plj": "Polci", "plk": "Kohistani Shina", "pll": "Shwe Palaung", "pln": "Palenquero", "plo": "Oluta Popoluca", "plq": "Palaic", "plr": "Palaka Senoufo", "pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca", "plt": "Plateau Malagasy", "plu": "Palikúr", "plv": "Southwest Palawano", "plw": "Brooke's Point Palawano", "ply": "Bolyu", "plz": "Paluan", "pma": "Paama", "pmb": "Pambia", "pmd": "Pallanganmiddang", "pme": "Pwaamei", "pmf": "Pamona", "pmh": "Māhārāṣṭri Prākrit", "pmi": "Northern Pumi", "pmj": "Southern Pumi", "pmk": "Pamlico", "pml": "Lingua Franca", "pmm": "Pomo", "pmn": "Pam", "pmo": "Pom", "pmq": "Northern Pame", "pmr": "Paynamar", "pms": "Piemontese", "pmt": "Tuamotuan", "pmw": "Plains Miwok", "pmx": "Poumei Naga", "pmy": "Papuan Malay", "pmz": "Southern Pame", "pna": "Punan Bah-Biau", "pnb": "Western Panjabi", "pnc": "Pannei", "pnd": "Mpinda", "pne": "Western Penan", "png": "Pangu; Pongu", "pnh": "Penrhyn", "pni": "Aoheng", "pnj": "Pinjarup", "pnk": "Paunaka", "pnl": "Paleni", "pnm": "Punan Batu 1", "pnn": "Pinai-Hagahai", "pno": "Panobo", "pnp": "Pancana", "pnq": "Pana (Burkina Faso)", "pnr": "Panim", "pns": "Ponosakan", "pnt": "Pontic", "pnu": "Jiongnai Bunu", "pnv": "Pinigura", "pnw": "Banyjima; Panytyima", "pnx": "Phong-Kniang", "pny": "Pinyin", "pnz": "Pana (Central African Republic)", "poc": "Poqomam", "poe": "San Juan Atzingo Popoloca", "pof": "Poke", "pog": "Potiguára", "poh": "Poqomchi'", "poi": "Highland Popoluca", "pok": "Pokangá", "pom": "Southeastern Pomo", "pon": "Pohnpeian", "poo": "Central Pomo", "pop": "Pwapwâ", "poq": "Texistepec Popoluca", "pos": "Sayula Popoluca", "pot": "Potawatomi", "pov": "Upper Guinea Crioulo", "pow": "San Felipe Otlaltepec Popoloca", "pox": "Polabian", "poy": "Pogolo", "poz": "Malayo-Polynesian languages", "ppe": "Papi", "ppi": "Paipai", "ppk": "Uma", "ppl": "Pipil; Nicarao", "ppm": "Papuma", "ppn": "Papapana", "ppo": "Folopa", "ppp": "Pelende", "ppq": "Pei", "pps": "San Luís Temalacayuca Popoloca", "ppt": "Pare", "ppu": "Papora", "pqa": "Pa'a", "pqe": "Eastern Malayo-Polynesian languages", "pqm": "Malecite-Passamaquoddy", "pqw": "Western Malayo-Polynesian languages", "pra": "Prakrit languages", "prc": "Parachi", "prd": "Parsi-Dari", "pre": "Principense", "prf": "Paranan", "prg": "Prussian", "prh": "Porohanon", "pri": "Paicî", "prk": "Parauk", "prl": "Peruvian Sign Language", "prm": "Kibiri", "prn": "Prasuni", "pro": "Old Provençal (to 1500); Old Occitan (to 1500)", "prp": "Parsi", "prq": "Ashéninka Perené", "prr": "Puri", "prs": "Dari; Afghan Persian", "prt": "Phai", "pru": "Puragi", "prw": "Parawen", "prx": "Purik", "prz": "Providencia Sign Language", "ps": "Pushto; Pashto", "psa": "Asue Awyu", "psc": "Iranian Sign Language; Persian Sign Language", "psd": "Plains Indian Sign Language", "pse": "Central Malay", "psg": "Penang Sign Language", "psh": "Southwest Pashai; Southwest Pashayi", "psi": "Southeast Pashai; Southeast Pashayi", "psl": "Puerto Rican Sign Language", "psm": "Pauserna", "psn": "Panasuan", "pso": "Polish Sign Language", "psp": "Philippine Sign Language", "psq": "Pasi", "psr": "Portuguese Sign Language", "pss": "Kaulong", "pst": "Central Pashto", "psu": "Sauraseni Prākrit", "psw": "Port Sandwich", "psy": "Piscataway", "pt": "Portuguese", "pta": "Pai Tavytera", "pth": "Pataxó Hã-Ha-Hãe", "pti": "Pindiini; Wangkatha", "ptn": "Patani", "pto": "Zo'é", "ptp": "Patep", "ptq": "Pattapu", "ptr": "Piamatsina", "ptt": "Enrekang", "ptu": "Bambam", "ptv": "Port Vato", "ptw": "Pentlatch", "pty": "Pathiya", "pua": "Western Highland Purepecha", "pub": "Purum", "puc": "Punan Merap", "pud": "Punan Aput", "pue": "Puelche", "puf": "Punan Merah", "pug": "Phuie", "pui": "Puinave", "puj": "Punan Tubu", "pum": "Puma", "puo": "Puoc", "pup": "Pulabu", "puq": "Puquina", "pur": "Puruborá", "put": "Putoh", "puu": "Punu", "puw": "Puluwatese", "pux": "Puare", "puy": "Purisimeño", "pwa": "Pawaia", "pwb": "Panawa", "pwg": "Gapapaiwa", "pwi": "Patwin", "pwm": "Molbog", "pwn": "Paiwan", "pwo": "Pwo Western Karen", "pwr": "Powari", "pww": "Pwo Northern Karen", "pxm": "Quetzaltepec Mixe", "pye": "Pye Krumen", "pym": "Fyam", "pyn": "Poyanáwa", "pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay", "pyu": "Puyuma", "pyx": "Pyu (Myanmar)", "pyy": "Pyen", "pzh": "Pazeh", "pzn": "Jejara Naga; Para Naga", "qu": "Quechua", "qua": "Quapaw", "qub": "Huallaga Huánuco Quechua", "quc": "K'iche'; Quiché", "qud": "Calderón Highland Quichua", "quf": "Lambayeque Quechua", "qug": "Chimborazo Highland Quichua", "quh": "South Bolivian Quechua", "qui": "Quileute", "quk": "Chachapoyas Quechua", "qul": "North Bolivian Quechua", "qum": "Sipacapense", "qun": "Quinault", "qup": "Southern Pastaza Quechua", "quq": "Quinqui", "qur": "Yanahuanca Pasco Quechua", "qus": "Santiago del Estero Quichua", "quv": "Sacapulteco", "quw": "Tena Lowland Quichua", "qux": "Yauyos Quechua", "quy": "Ayacucho Quechua", "quz": "Cusco Quechua", "qva": "Ambo-Pasco Quechua", "qvc": "Cajamarca Quechua", "qve": "Eastern Apurímac Quechua", "qvh": "Huamalíes-Dos de Mayo Huánuco Quechua", "qvi": "Imbabura Highland Quichua", "qvj": "Loja Highland Quichua", "qvl": "Cajatambo North Lima Quechua", "qvm": "Margos-Yarowilca-Lauricocha Quechua", "qvn": "North Junín Quechua", "qvo": "Napo Lowland Quechua", "qvp": "Pacaraos Quechua", "qvs": "San Martín Quechua", "qvw": "Huaylla Wanca Quechua", "qvy": "Queyu", "qvz": "Northern Pastaza Quichua", "qwa": "Corongo Ancash Quechua", "qwc": "Classical Quechua", "qwe": "Quechuan (family)", "qwh": "Huaylas Ancash Quechua", "qwm": "Kuman (Russia)", "qws": "Sihuas Ancash Quechua", "qwt": "Kwalhioqua-Tlatskanai", "qxa": "Chiquián Ancash Quechua", "qxc": "Chincha Quechua", "qxh": "Panao Huánuco Quechua", "qxl": "Salasaca Highland Quichua", "qxn": "Northern Conchucos Ancash Quechua", "qxo": "Southern Conchucos Ancash Quechua", "qxp": "Puno Quechua", "qxq": "Qashqa'i", "qxr": "Cañar Highland Quichua", "qxs": "Southern Qiang", "qxt": "Santa Ana de Tusi Pasco Quechua", "qxu": "Arequipa-La Unión Quechua", "qxw": "Jauja Wanca Quechua", "qya": "Quenya", "qyp": "Quiripi", "raa": "Dungmali", "rab": "Camling", "rac": "Rasawa", "rad": "Rade", "raf": "Western Meohang", "rag": "Logooli; Lulogooli", "rah": "Rabha", "rai": "Ramoaaina", "raj": "Rajasthani", "rak": "Tulu-Bohuai", "ral": "Ralte", "ram": "Canela", "ran": "Riantana", "rao": "Rao", "rap": "Rapanui", "raq": "Saam", "rar": "Rarotongan; Cook Islands Maori", "ras": "Tegali", "rat": "Razajerdi", "rau": "Raute", "rav": "Sampang", "raw": "Rawang", "rax": "Rang", "ray": "Rapa", "raz": "Rahambuu", "rbb": "Rumai Palaung", "rbk": "Northern Bontok", "rbl": "Miraya Bikol", "rbp": "Barababaraba", "rcf": "Réunion Creole French", "rdb": "Rudbari", "rea": "Rerau", "reb": "Rembong", "ree": "Rejang Kayan", "reg": "Kara (Tanzania)", "rei": "Reli", "rej": "Rejang", "rel": "Rendille", "rem": "Remo", "ren": "Rengao", "rer": "Rer Bare", "res": "Reshe", "ret": "Retta", "rey": "Reyesano", "rga": "Roria", "rge": "Romano-Greek", "rgk": "Rangkas", "rgn": "Romagnol", "rgr": "Resígaro", "rgs": "Southern Roglai", "rgu": "Ringgou", "rhg": "Rohingya", "rhp": "Yahang", "ria": "Riang (India)", "rib": "Bribri Sign Language", "rif": "Tarifit", "ril": "Riang Lang; Riang (Myanmar)", "rim": "Nyaturu", "rin": "Nungu", "rir": "Ribun", "rit": "Ritharrngu", "riu": "Riung", "rjg": "Rajong", "rji": "Raji", "rjs": "Rajbanshi", "rka": "Kraol", "rkb": "Rikbaktsa", "rkh": "Rakahanga-Manihiki", "rki": "Rakhine", "rkm": "Marka", "rkt": "Rangpuri; Kamta", "rkw": "Arakwal", "rm": "Romansh", "rma": "Rama", "rmb": "Rembarrnga", "rmc": "Carpathian Romani", "rmd": "Traveller Danish", "rme": "Angloromani", "rmf": "Kalo Finnish Romani", "rmg": "Traveller Norwegian", "rmh": "Murkim", "rmi": "Lomavren", "rmk": "Romkun", "rml": "Baltic Romani", "rmm": "Roma", "rmn": "Balkan Romani", "rmo": "Sinte Romani", "rmp": "Rempi", "rmq": "Caló", "rms": "Romanian Sign Language", "rmt": "Domari", "rmu": "Tavringer Romani", "rmv": "Romanova", "rmw": "Welsh Romani", "rmx": "Romam", "rmy": "Vlax Romani", "rmz": "Marma", "rn": "Rundi", "rnb": "Brunca Sign Language", "rnd": "Ruund", "rng": "Ronga", "rnl": "Ranglong", "rnn": "Roon", "rnp": "Rongpo", "rnr": "Nari Nari", "rnw": "Rungwa", "ro": "Romanian; Moldavian; Moldovan", "roa": "Romance languages", "rob": "Tae'", "roc": "Cacgia Roglai", "rod": "Rogo", "roe": "Ronji", "rof": "Rombo", "rog": "Northern Roglai", "rol": "Romblomanon", "rom": "Romany", "roo": "Rotokas", "rop": "Kriol", "ror": "Rongga", "rou": "Runga", "row": "Dela-Oenale", "rpn": "Repanbitip", "rpt": "Rapting", "rri": "Ririo", "rro": "Waima", "rrt": "Arritinngithigh", "rsb": "Romano-Serbian", "rsk": "Ruthenian; Rusyn", "rsl": "Russian Sign Language", "rsm": "Miriwoong Sign Language", "rsn": "Rwandan Sign Language", "rtc": "Rungtu Chin", "rth": "Ratahan", "rtm": "Rotuman", "rts": "Yurats", "rtw": "Rathawi", "ru": "Russian", "rub": "Gungu", "ruc": "Ruuli", "rue": "Rusyn", "ruf": "Luguru", "rug": "Roviana", "ruh": "Ruga", "rui": "Rufiji", "ruk": "Che", "ruo": "Istro Romanian", "rup": "Macedo-Romanian; Aromanian; Arumanian", "ruq": "Megleno Romanian", "rut": "Rutul", "ruu": "Lanas Lobu", "ruy": "Mala (Nigeria)", "ruz": "Ruma", "rw": "Kinyarwanda", "rwa": "Rawo", "rwk": "Rwa", "rwl": "Ruwila", "rwm": "Amba (Uganda)", "rwo": "Rawa", "rwr": "Marwari (India)", "rxd": "Ngardi", "rxw": "Karuwali; Garuwali", "ryn": "Northern Amami-Oshima", "rys": "Yaeyama", "ryu": "Central Okinawan", "rzh": "Rāziḥī", "sa": "Sanskrit", "saa": "Saba", "sab": "Buglere", "sac": "Meskwaki", "sad": "Sandawe", "sae": "Sabanê", "saf": "Safaliba", "sah": "Yakut", "sai": "South American Indian languages", "saj": "Sahu", "sak": "Sake", "sal": "Salishan languages", "sam": "Samaritan Aramaic", "sao": "Sause", "saq": "Samburu", "sar": "Saraveca", "sas": "Sasak", "sat": "Santali", "sau": "Saleman", "sav": "Saafi-Saafi", "saw": "Sawi", "sax": "Sa", "say": "Saya", "saz": "Saurashtra", "sba": "Ngambay", "sbb": "Simbo", "sbc": "Kele (Papua New Guinea)", "sbd": "Southern Samo", "sbe": "Saliba", "sbf": "Chabu; Shabo", "sbg": "Seget", "sbh": "Sori-Harengan", "sbi": "Seti", "sbj": "Surbakhal", "sbk": "Safwa", "sbl": "Botolan Sambal", "sbm": "Sagala", "sbn": "Sindhi Bhil", "sbo": "Sabüm", "sbp": "Sangu (Tanzania)", "sbq": "Sileibi", "sbr": "Sembakung Murut", "sbs": "Subiya", "sbt": "Kimki", "sbu": "Stod Bhoti", "sbv": "Sabine", "sbw": "Simba", "sbx": "Seberuang", "sby": "Soli", "sbz": "Sara Kaba", "sc": "Sardinian", "scb": "Chut", "sce": "Dongxiang", "scf": "San Miguel Creole French", "scg": "Sanggau", "sch": "Sakachep", "sci": "Sri Lankan Creole Malay", "sck": "Sadri", "scl": "Shina", "scn": "Sicilian", "sco": "Scots", "scp": "Hyolmo; Helambu Sherpa", "scq": "Sa'och", "scs": "North Slavey", "sct": "Southern Katang", "scu": "Shumcho", "scv": "Sheni", "scw": "Sha", "scx": "Sicel", "sd": "Sindhi", "sda": "Toraja-Sa'dan", "sdb": "Shabak", "sdc": "Sassarese Sardinian", "sde": "Surubu", "sdf": "Sarli", "sdg": "Savi", "sdh": "Southern Kurdish", "sdj": "Suundi", "sdk": "Sos Kundi", "sdl": "Saudi Arabian Sign Language", "sdn": "Gallurese Sardinian", "sdo": "Bukar-Sadung Bidayuh", "sdp": "Sherdukpen", "sdq": "Semandang", "sdr": "Oraon Sadri", "sds": "Sened", "sdt": "Shuadit", "sdu": "Sarudu", "sdv": "Eastern Sudanic languages", "sdx": "Sibu Melanau", "sdz": "Sallands", "se": "Northern Sami", "sea": "Semai", "seb": "Shempire Senoufo", "sec": "Sechelt", "sed": "Sedang", "see": "Seneca", "sef": "Cebaara Senoufo", "seg": "Segeju", "seh": "Sena", "sei": "Seri", "sej": "Sene", "sek": "Sekani", "sel": "Selkup", "sem": "Semitic languages", "sen": "Nanerigé Sénoufo", "seo": "Suarmin", "sep": "Sìcìté Sénoufo", "seq": "Senara Sénoufo", "ser": "Serrano", "ses": "Koyraboro Senni Songhai", "set": "Sentani", "seu": "Serui-Laut", "sev": "Nyarafolo Senoufo", "sew": "Sewa Bay", "sey": "Secoya", "sez": "Senthang Chin", "sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language", "sfe": "Eastern Subanen", "sfm": "Small Flowery Miao", "sfs": "South African Sign Language", "sfw": "Sehwi", "sg": "Sango", "sga": "Old Irish (to 900)", "sgb": "Mag-antsi Ayta", "sgc": "Kipsigis", "sgd": "Surigaonon", "sge": "Segai", "sgg": "Swiss-German Sign Language", "sgh": "Shughni", "sgi": "Suga", "sgj": "Surgujia", "sgk": "Sangkong", "sgm": "Singa", "sgn": "Sign languages", "sgp": "Singpho", "sgr": "Sangisari", "sgs": "Samogitian", "sgt": "Brokpake", "sgu": "Salas", "sgw": "Sebat Bet Gurage", "sgx": "Sierra Leone Sign Language", "sgy": "Sanglechi", "sgz": "Sursurunga", "sh": "Serbo-Croatian", "sha": "Shall-Zwall", "shb": "Ninam", "shc": "Sonde", "shd": "Kundal Shahi", "she": "Sheko", "shg": "Shua", "shh": "Shoshoni", "shi": "Tachelhit", "shj": "Shatt", "shk": "Shilluk", "shl": "Shendu", "shm": "Shahrudi", "shn": "Shan", "sho": "Shanga", "shp": "Shipibo-Conibo", "shq": "Sala", "shr": "Shi", "shs": "Shuswap", "sht": "Shasta", "shu": "Chadian Arabic", "shv": "Shehri", "shw": "Shwai", "shx": "She", "shy": "Tachawit", "shz": "Syenara Senoufo", "si": "Sinhala; Sinhalese", "sia": "Akkala Sami", "sib": "Sebop", "sid": "Sidamo", "sie": "Simaa", "sif": "Siamou", "sig": "Paasaal", "sih": "Zire; Sîshëë", "sii": "Shom Peng", "sij": "Numbami", "sik": "Sikiana", "sil": "Tumulung Sisaala", "sim": "Mende (Papua New Guinea)", "sio": "Siouan languages", "sip": "Sikkimese", "siq": "Sonia", "sir": "Siri", "sis": "Siuslaw", "sit": "Sino-Tibetan languages", "siu": "Sinagen", "siv": "Sumariup", "siw": "Siwai", "six": "Sumau", "siy": "Sivandi", "siz": "Siwi", "sja": "Epena", "sjb": "Sajau Basap", "sjd": "Kildin Sami", "sje": "Pite Sami", "sjg": "Assangori", "sjk": "Kemi Sami", "sjl": "Sajalong; Miji", "sjm": "Mapun", "sjn": "Sindarin", "sjo": "Xibe", "sjp": "Surjapuri", "sjr": "Siar-Lak", "sjs": "Senhaja De Srair", "sjt": "Ter Sami", "sju": "Ume Sami", "sjw": "Shawnee", "sk": "Slovak", "ska": "Skagit", "skb": "Saek", "skc": "Ma Manda", "skd": "Southern Sierra Miwok", "ske": "Seke (Vanuatu)", "skf": "Sakirabiá", "skg": "Sakalava Malagasy", "skh": "Sikule", "ski": "Sika", "skj": "Seke (Nepal)", "skm": "Kutong", "skn": "Kolibugan Subanon", "sko": "Seko Tengah", "skp": "Sekapan", "skq": "Sininkere", "skr": "Saraiki; Seraiki", "sks": "Maia", "skt": "Sakata", "sku": "Sakao", "skv": "Skou", "skw": "Skepi Creole Dutch", "skx": "Seko Padang", "sky": "Sikaiana", "skz": "Sekar", "sl": "Slovenian", "sla": "Slavic languages", "slc": "Sáliba", "sld": "Sissala", "sle": "Sholaga", "slf": "Swiss-Italian Sign Language", "slg": "Selungai Murut", "slh": "Southern Puget Sound Salish", "sli": "Lower Silesian", "slj": "Salumá", "sll": "Salt-Yui", "slm": "Pangutaran Sama", "sln": "Salinan", "slp": "Lamaholot", "slq": "Salchuq", "slr": "Salar", "sls": "Singapore Sign Language", "slt": "Sila", "slu": "Selaru", "slw": "Sialum", "slx": "Salampasu", "sly": "Selayar", "slz": "Ma'ya", "sm": "Samoan", "sma": "Southern Sami", "smb": "Simbari", "smc": "Som", "smf": "Auwe", "smg": "Simbali", "smh": "Samei", "smi": "Sami languages", "smj": "Lule Sami", "smk": "Bolinao", "sml": "Central Sama", "smm": "Musasa", "smn": "Inari Sami", "smp": "Samaritan", "smq": "Samo", "smr": "Simeulue", "sms": "Skolt Sami", "smt": "Simte", "smu": "Somray", "smv": "Samvedi", "smw": "Sumbawa", "smx": "Samba", "smy": "Semnani", "smz": "Simeku", "sn": "Shona", "snc": "Sinaugoro", "sne": "Bau Bidayuh", "snf": "Noon", "sng": "Sanga (Democratic Republic of Congo)", "sni": "Sensi", "snj": "Riverain Sango", "snk": "Soninke", "snl": "Sangil", "snm": "Southern Ma'di", "snn": "Siona", "sno": "Snohomish", "snp": "Siane", "snq": "Sangu (Gabon)", "snr": "Sihan", "sns": "South West Bay; Nahavaq", "snu": "Senggi; Viid", "snv": "Sa'ban", "snw": "Selee", "snx": "Sam", "sny": "Saniyo-Hiyewe", "snz": "Kou", "so": "Somali", "soa": "Thai Song", "sob": "Sobei", "soc": "So (Democratic Republic of Congo)", "sod": "Songoora", "soe": "Songomeno", "sog": "Sogdian", "soh": "Aka", "soi": "Sonha", "soj": "Soi", "sok": "Sokoro", "sol": "Solos", "son": "Songhai languages", "soo": "Songo", "sop": "Songe", "soq": "Kanasi", "sor": "Somrai", "sos": "Seeku", "sou": "Southern Thai", "sov": "Sonsorol", "sow": "Sowanda", "sox": "Swo", "soy": "Miyobe", "soz": "Temi", "spb": "Sepa (Indonesia)", "spc": "Sapé", "spd": "Saep", "spe": "Sepa (Papua New Guinea)", "spg": "Sian", "spi": "Saponi", "spk": "Sengo", "spl": "Selepet", "spm": "Akukem", "spn": "Sanapaná", "spo": "Spokane", "spp": "Supyire Senoufo", "spq": "Loreto-Ucayali Spanish", "spr": "Saparua", "sps": "Saposa", "spt": "Spiti Bhoti", "spu": "Sapuan", "spv": "Sambalpuri; Kosli", "spx": "South Picene", "spy": "Sabaot", "sq": "Albanian", "sqa": "Shama-Sambuga", "sqh": "Shau", "sqj": "Albanian languages", "sqk": "Albanian Sign Language", "sqm": "Suma", "sqn": "Susquehannock", "sqo": "Sorkhei", "sqq": "Sou", "sqr": "Siculo Arabic", "sqs": "Sri Lankan Sign Language", "sqt": "Soqotri", "squ": "Squamish", "sqx": "Kufr Qassem Sign Language (KQSL)", "sr": "Serbian", "sra": "Saruga", "srb": "Sora", "src": "Logudorese Sardinian", "sre": "Sara", "srf": "Nafi", "srg": "Sulod", "srh": "Sarikoli", "sri": "Siriano", "srk": "Serudung Murut", "srl": "Isirawa", "srm": "Saramaccan", "srn": "Sranan Tongo", "sro": "Campidanese Sardinian", "srq": "Sirionó", "srr": "Serer", "srs": "Sarsi", "srt": "Sauri", "sru": "Suruí", "srv": "Southern Sorsoganon", "srw": "Serua", "srx": "Sirmauri", "sry": "Sera", "srz": "Shahmirzadi", "ss": "Swati", "ssa": "Nilo-Saharan languages", "ssb": "Southern Sama", "ssc": "Suba-Simbiti", "ssd": "Siroi", "sse": "Balangingi; Bangingih Sama", "ssf": "Thao", "ssg": "Seimat", "ssh": "Shihhi Arabic", "ssi": "Sansi", "ssj": "Sausi", "ssk": "Sunam", "ssl": "Western Sisaala", "ssm": "Semnam", "ssn": "Waata", "sso": "Sissano", "ssp": "Spanish Sign Language", "ssq": "So'a", "ssr": "Swiss-French Sign Language", "sss": "Sô", "sst": "Sinasina", "ssu": "Susuami", "ssv": "Shark Bay", "ssx": "Samberigi", "ssy": "Saho", "ssz": "Sengseng", "st": "Southern Sotho", "sta": "Settla", "stb": "Northern Subanen", "std": "Sentinel", "ste": "Liana-Seti", "stf": "Seta", "stg": "Trieng", "sth": "Shelta", "sti": "Bulo Stieng", "stj": "Matya Samo", "stk": "Arammba", "stl": "Stellingwerfs", "stm": "Setaman", "stn": "Owa", "sto": "Stoney", "stp": "Southeastern Tepehuan", "stq": "Saterfriesisch", "str": "Straits Salish", "sts": "Shumashti", "stt": "Budeh Stieng", "stu": "Samtao", "stv": "Silt'e", "stw": "Satawalese", "sty": "Siberian Tatar", "su": "Sundanese", "sua": "Sulka", "sub": "Suku", "suc": "Western Subanon", "sue": "Suena", "sug": "Suganga", "sui": "Suki", "suj": "Shubi", "suk": "Sukuma", "suo": "Bouni", "suq": "Tirmaga-Chai Suri; Suri", "sur": "Mwaghavul", "sus": "Susu", "sut": "Subtiaba", "suv": "Puroik", "suw": "Sumbwa", "sux": "Sumerian", "suy": "Suyá", "suz": "Sunwar", "sv": "Swedish", "sva": "Svan", "svb": "Ulau-Suain", "svc": "Vincentian Creole English", "sve": "Serili", "svk": "Slovakian Sign Language", "svm": "Slavomolisano", "svs": "Savosavo", "svx": "Skalvian", "sw": "Swahili (macrolanguage)", "swb": "Maore Comorian", "swc": "Congo Swahili", "swf": "Sere", "swg": "Swabian", "swh": "Swahili (individual language); Kiswahili", "swi": "Sui", "swj": "Sira", "swk": "Malawi Sena", "swl": "Swedish Sign Language", "swm": "Samosa", "swn": "Sawknah", "swo": "Shanenawa", "swp": "Suau", "swq": "Sharwa", "swr": "Saweru", "sws": "Seluwasan", "swt": "Sawila", "swu": "Suwawa", "swv": "Shekhawati", "sww": "Sowa", "swx": "Suruahá", "swy": "Sarua", "sxb": "Suba", "sxc": "Sicanian", "sxe": "Sighu", "sxg": "Shuhi; Shixing", "sxk": "Southern Kalapuya", "sxl": "Selian", "sxm": "Samre", "sxn": "Sangir", "sxo": "Sorothaptic", "sxr": "Saaroa", "sxs": "Sasaru", "sxu": "Upper Saxon", "sxw": "Saxwe Gbe", "sya": "Siang", "syb": "Central Subanen", "syc": "Classical Syriac", "syd": "Samoyedic languages", "syi": "Seki", "syk": "Sukur", "syl": "Sylheti", "sym": "Maya Samo", "syn": "Senaya", "syo": "Suoy", "syr": "Syriac", "sys": "Sinyar", "syw": "Kagate", "syx": "Samay", "syy": "Al-Sayyid Bedouin Sign Language", "sza": "Semelai", "szb": "Ngalum", "szc": "Semaq Beri", "szd": "Seru", "sze": "Seze", "szg": "Sengele", "szl": "Silesian", "szn": "Sula", "szp": "Suabo", "szs": "Solomon Islands Sign Language", "szv": "Isu (Fako Division)", "szw": "Sawai", "szy": "Sakizaya", "ta": "Tamil", "taa": "Lower Tanana", "tab": "Tabassaran", "tac": "Lowland Tarahumara", "tad": "Tause", "tae": "Tariana", "taf": "Tapirapé", "tag": "Tagoi", "tai": "Tai languages", "taj": "Eastern Tamang", "tak": "Tala", "tal": "Tal", "tan": "Tangale", "tao": "Yami", "tap": "Taabwa", "taq": "Tamasheq", "tar": "Central Tarahumara", "tas": "Tay Boi", "tau": "Upper Tanana", "tav": "Tatuyo", "taw": "Tai", "tax": "Tamki", "tay": "Atayal", "taz": "Tocho", "tba": "Aikanã", "tbc": "Takia", "tbd": "Kaki Ae", "tbe": "Tanimbili", "tbf": "Mandara", "tbg": "North Tairora", "tbh": "Dharawal; Thurawal", "tbi": "Gaam", "tbj": "Tiang", "tbk": "Calamian Tagbanwa", "tbl": "Tboli", "tbm": "Tagbu", "tbn": "Barro Negro Tunebo", "tbo": "Tawala", "tbp": "Taworta; Diebroud", "tbq": "Tibeto-Burman languages", "tbr": "Tumtum", "tbs": "Tanguat", "tbt": "Tembo (Kitembo)", "tbu": "Tubar", "tbv": "Tobo", "tbw": "Tagbanwa", "tbx": "Kapin", "tby": "Tabaru", "tbz": "Ditammari", "tca": "Ticuna", "tcb": "Tanacross", "tcc": "Datooga", "tcd": "Tafi", "tce": "Southern Tutchone", "tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec", "tcg": "Tamagario", "tch": "Turks And Caicos Creole English", "tci": "Wára", "tck": "Tchitchege", "tcl": "Taman (Myanmar)", "tcm": "Tanahmerah", "tcn": "Tichurong", "tco": "Taungyo", "tcp": "Tawr Chin", "tcq": "Kaiy", "tcs": "Torres Strait Creole; Yumplatok", "tct": "T'en", "tcu": "Southeastern Tarahumara", "tcw": "Tecpatlán Totonac", "tcx": "Toda", "tcy": "Tulu", "tcz": "Thado Chin", "tda": "Tagdal", "tdb": "Panchpargania", "tdc": "Emberá-Tadó", "tdd": "Tai Nüa", "tde": "Tiranige Diga Dogon", "tdf": "Talieng", "tdg": "Western Tamang", "tdh": "Thulung", "tdi": "Tomadino", "tdj": "Tajio", "tdk": "Tambas", "tdl": "Sur", "tdm": "Taruma", "tdn": "Tondano", "tdo": "Teme", "tdq": "Tita", "tdr": "Todrah", "tds": "Doutai", "tdt": "Tetun Dili", "tdv": "Toro", "tdx": "Tandroy-Mahafaly Malagasy", "tdy": "Tadyawan", "te": "Telugu", "tea": "Temiar", "teb": "Tetete", "tec": "Terik", "ted": "Tepo Krumen", "tee": "Huehuetla Tepehua", "tef": "Teressa", "teg": "Teke-Tege", "teh": "Tehuelche", "tei": "Torricelli", "tek": "Ibali Teke", "tem": "Timne", "ten": "Tama (Colombia)", "teo": "Teso", "tep": "Tepecano", "teq": "Temein", "ter": "Tereno", "tes": "Tengger", "tet": "Tetum", "teu": "Soo", "tev": "Teor", "tew": "Tewa (USA)", "tex": "Tennet", "tey": "Tulishi", "tez": "Tetserret", "tfi": "Tofin Gbe", "tfn": "Tanaina", "tfo": "Tefaro", "tfr": "Teribe", "tft": "Ternate", "tg": "Tajik", "tga": "Sagalla", "tgb": "Tobilung", "tgc": "Tigak", "tgd": "Ciwogai", "tge": "Eastern Gorkha Tamang", "tgf": "Chalikha", "tgh": "Tobagonian Creole English", "tgi": "Lawunuia", "tgj": "Tagin", "tgn": "Tandaganon", "tgo": "Sudest", "tgp": "Tangoa", "tgq": "Tring", "tgr": "Tareng", "tgs": "Nume", "tgt": "Central Tagbanwa", "tgu": "Tanggu", "tgv": "Tingui-Boto", "tgw": "Tagwana Senoufo", "tgx": "Tagish", "tgy": "Togoyo", "tgz": "Tagalaka", "th": "Thai", "thd": "Kuuk Thaayorre; Thayore", "the": "Chitwania Tharu", "thf": "Thangmi", "thh": "Northern Tarahumara", "thi": "Tai Long", "thk": "Tharaka; Kitharaka", "thl": "Dangaura Tharu", "thm": "Aheu", "thn": "Thachanadan", "thp": "Thompson", "thq": "Kochila Tharu", "thr": "Rana Tharu", "ths": "Thakali", "tht": "Tahltan", "thu": "Thuri", "thv": "Tahaggart Tamahaq", "thy": "Tha", "thz": "Tayart Tamajeq", "ti": "Tigrinya", "tia": "Tidikelt Tamazight", "tic": "Tira", "tif": "Tifal", "tig": "Tigre", "tih": "Timugon Murut", "tii": "Tiene", "tij": "Tilung", "tik": "Tikar", "til": "Tillamook", "tim": "Timbe", "tin": "Tindi", "tio": "Teop", "tip": "Trimuris", "tiq": "Tiéfo", "tis": "Masadiit Itneg", "tit": "Tinigua", "tiu": "Adasen", "tiv": "Tiv", "tiw": "Tiwi", "tix": "Southern Tiwa", "tiy": "Tiruray", "tiz": "Tai Hongjin", "tja": "Tajuasohn", "tjg": "Tunjung", "tji": "Northern Tujia", "tjj": "Tjungundji", "tjl": "Tai Laing", "tjm": "Timucua", "tjn": "Tonjon", "tjo": "Temacine Tamazight", "tjp": "Tjupany", "tjs": "Southern Tujia", "tju": "Tjurruru", "tjw": "Djabwurrung", "tk": "Turkmen", "tka": "Truká", "tkb": "Buksa", "tkd": "Tukudede", "tke": "Takwane", "tkf": "Tukumanféd", "tkg": "Tesaka Malagasy", "tkl": "Tokelau", "tkm": "Takelma", "tkn": "Toku-No-Shima", "tkp": "Tikopia", "tkq": "Tee", "tkr": "Tsakhur", "tks": "Takestani", "tkt": "Kathoriya Tharu", "tku": "Upper Necaxa Totonac", "tkv": "Mur Pano", "tkw": "Teanu", "tkx": "Tangko", "tkz": "Takua", "tl": "Tagalog", "tla": "Southwestern Tepehuan", "tlb": "Tobelo", "tlc": "Yecuatla Totonac", "tld": "Talaud", "tlf": "Telefol", "tlg": "Tofanma", "tlh": "Klingon; tlhIngan Hol", "tli": "Tlingit", "tlj": "Talinga-Bwisi", "tlk": "Taloki", "tll": "Tetela", "tlm": "Tolomako", "tln": "Talondo'", "tlo": "Talodi", "tlp": "Filomena Mata-Coahuitlán Totonac", "tlq": "Tai Loi", "tlr": "Talise", "tls": "Tambotalo", "tlt": "Sou Nama; Teluti", "tlu": "Tulehu", "tlv": "Taliabu", "tlx": "Khehek", "tly": "Talysh", "tma": "Tama (Chad)", "tmb": "Katbol; Avava", "tmc": "Tumak", "tmd": "Haruai", "tme": "Tremembé", "tmf": "Toba-Maskoy", "tmg": "Ternateño", "tmh": "Tamashek", "tmi": "Tutuba", "tmj": "Samarokena", "tmk": "Northwestern Tamang", "tml": "Tamnim Citak", "tmm": "Tai Thanh", "tmn": "Taman (Indonesia)", "tmo": "Temoq", "tmq": "Tumleo", "tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)", "tms": "Tima", "tmt": "Tasmate", "tmu": "Iau", "tmv": "Tembo (Motembo)", "tmw": "Temuan", "tmy": "Tami", "tmz": "Tamanaku", "tn": "Tswana", "tna": "Tacana", "tnb": "Western Tunebo", "tnc": "Tanimuca-Retuarã", "tnd": "Angosturas Tunebo", "tng": "Tobanga", "tnh": "Maiani", "tni": "Tandia", "tnk": "Kwamera", "tnl": "Lenakel", "tnm": "Tabla", "tnn": "North Tanna", "tno": "Toromono", "tnp": "Whitesands", "tnq": "Taino", "tnr": "Ménik", "tns": "Tenis", "tnt": "Tontemboan", "tnu": "Tay Khang", "tnv": "Tangchangya", "tnw": "Tonsawang", "tnx": "Tanema", "tny": "Tongwe", "tnz": "Ten'edn", "to": "Tonga (Tonga Islands)", "tob": "Toba", "toc": "Coyutla Totonac", "tod": "Toma", "tof": "Gizrra", "tog": "Tonga (Nyasa)", "toh": "Gitonga", "toi": "Tonga (Zambia)", "toj": "Tojolabal", "tok": "Toki Pona", "tol": "Tolowa", "tom": "Tombulu", "too": "Xicotepec De Juárez Totonac", "top": "Papantla Totonac", "toq": "Toposa", "tor": "Togbo-Vara Banda", "tos": "Highland Totonac", "tou": "Tho", "tov": "Upper Taromi", "tow": "Jemez", "tox": "Tobian", "toy": "Topoiyo", "toz": "To", "tpa": "Taupota", "tpc": "Azoyú Me'phaa; Azoyú Tlapanec", "tpe": "Tippera", "tpf": "Tarpia", "tpg": "Kula", "tpi": "Tok Pisin", "tpj": "Tapieté", "tpk": "Tupinikin", "tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec", "tpm": "Tampulma", "tpn": "Tupinambá", "tpo": "Tai Pao", "tpp": "Pisaflores Tepehua", "tpq": "Tukpa", "tpr": "Tuparí", "tpt": "Tlachichilco Tepehua", "tpu": "Tampuan", "tpv": "Tanapag", "tpw": "Tupí", "tpx": "Acatepec Me'phaa; Acatepec Tlapanec", "tpy": "Trumai", "tpz": "Tinputz", "tqb": "Tembé", "tql": "Lehali", "tqm": "Turumsa", "tqn": "Tenino", "tqo": "Toaripi", "tqp": "Tomoip", "tqq": "Tunni", "tqr": "Torona", "tqt": "Western Totonac", "tqu": "Touo", "tqw": "Tonkawa", "tr": "Turkish", "tra": "Tirahi", "trb": "Terebu", "trc": "Copala Triqui", "trd": "Turi", "tre": "East Tarangan", "trf": "Trinidadian Creole English", "trg": "Lishán Didán", "trh": "Turaka", "tri": "Trió", "trj": "Toram", "trk": "Turkic languages", "trl": "Traveller Scottish", "trm": "Tregami", "trn": "Trinitario", "tro": "Tarao Naga", "trp": "Kok Borok", "trq": "San Martín Itunyoso Triqui", "trr": "Taushiro", "trs": "Chicahuaxtla Triqui", "trt": "Tunggare", "tru": "Turoyo; Surayt", "trv": "Sediq; Seediq; Taroko", "trw": "Torwali", "trx": "Tringgus-Sembaan Bidayuh", "try": "Turung", "trz": "Torá", "ts": "Tsonga", "tsa": "Tsaangi", "tsb": "Tsamai", "tsc": "Tswa", "tsd": "Tsakonian", "tse": "Tunisian Sign Language", "tsg": "Tausug", "tsh": "Tsuvan", "tsi": "Tsimshian", "tsj": "Tshangla", "tsk": "Tseku", "tsl": "Ts'ün-Lao", "tsm": "Turkish Sign Language; Türk İşaret Dili", "tsp": "Northern Toussian", "tsq": "Thai Sign Language", "tsr": "Akei", "tss": "Taiwan Sign Language", "tst": "Tondi Songway Kiini", "tsu": "Tsou", "tsv": "Tsogo", "tsw": "Tsishingini", "tsx": "Mubami", "tsy": "Tebul Sign Language", "tsz": "Purepecha", "tt": "Tatar", "tta": "Tutelo", "ttb": "Gaa", "ttc": "Tektiteko", "ttd": "Tauade", "tte": "Bwanabwana", "ttf": "Tuotomb", "ttg": "Tutong", "tth": "Upper Ta'oih", "tti": "Tobati", "ttj": "Tooro", "ttk": "Totoro", "ttl": "Totela", "ttm": "Northern Tutchone", "ttn": "Towei", "tto": "Lower Ta'oih", "ttp": "Tombelala", "ttq": "Tawallammat Tamajaq", "ttr": "Tera", "tts": "Northeastern Thai", "ttt": "Muslim Tat", "ttu": "Torau", "ttv": "Titan", "ttw": "Long Wat", "tty": "Sikaritai", "ttz": "Tsum", "tua": "Wiarumus", "tub": "Tübatulabal", "tuc": "Mutu", "tud": "Tuxá", "tue": "Tuyuca", "tuf": "Central Tunebo", "tug": "Tunia", "tuh": "Taulil", "tui": "Tupuri", "tuj": "Tugutil", "tul": "Tula", "tum": "Tumbuka", "tun": "Tunica", "tuo": "Tucano", "tup": "Tupi languages", "tuq": "Tedaga", "tus": "Tuscarora", "tut": "Altaic languages", "tuu": "Tututni", "tuv": "Turkana", "tuw": "Tungus languages", "tux": "Tuxináwa", "tuy": "Tugen", "tuz": "Turka", "tva": "Vaghua", "tvd": "Tsuvadi", "tve": "Te'un", "tvk": "Southeast Ambrym", "tvl": "Tuvalu", "tvm": "Tela-Masbuar", "tvn": "Tavoyan", "tvo": "Tidore", "tvs": "Taveta", "tvt": "Tutsa Naga", "tvu": "Tunen", "tvw": "Sedoa", "tvx": "Taivoan", "tvy": "Timor Pidgin", "tw": "Twi", "twa": "Twana", "twb": "Western Tawbuid", "twc": "Teshenawa", "twd": "Twents", "twe": "Tewa (Indonesia)", "twf": "Northern Tiwa", "twg": "Tereweng", "twh": "Tai Dón", "twl": "Tawara", "twm": "Tawang Monpa", "twn": "Twendi", "two": "Tswapong", "twp": "Ere", "twq": "Tasawaq", "twr": "Southwestern Tarahumara", "twt": "Turiwára", "twu": "Termanu", "tww": "Tuwari", "twx": "Tewe", "twy": "Tawoyan", "txa": "Tombonuo", "txb": "Tokharian B", "txc": "Tsetsaut", "txe": "Totoli", "txg": "Tangut", "txh": "Thracian", "txi": "Ikpeng", "txj": "Tarjumo", "txm": "Tomini", "txn": "West Tarangan", "txo": "Toto", "txq": "Tii", "txr": "Tartessian", "txs": "Tonsea", "txt": "Citak", "txu": "Kayapó", "txx": "Tatana", "txy": "Tanosy Malagasy", "ty": "Tahitian", "tya": "Tauya", "tye": "Kyanga", "tyh": "O'du", "tyi": "Teke-Tsaayi", "tyj": "Tai Do; Tai Yo", "tyl": "Thu Lao", "tyn": "Kombai", "typ": "Thaypan", "tyr": "Tai Daeng", "tys": "Tày Sa Pa", "tyt": "Tày Tac", "tyu": "Kua", "tyv": "Tuvinian", "tyx": "Teke-Tyee", "tyy": "Tiyaa", "tyz": "Tày", "tza": "Tanzanian Sign Language", "tzh": "Tzeltal", "tzj": "Tz'utujil", "tzl": "Talossan", "tzm": "Central Atlas Tamazight", "tzn": "Tugun", "tzo": "Tzotzil", "tzx": "Tabriak", "uam": "Uamué", "uan": "Kuan", "uar": "Tairuma", "uba": "Ubang", "ubi": "Ubi", "ubl": "Buhi'non Bikol", "ubr": "Ubir", "ubu": "Umbu-Ungu", "uby": "Ubykh", "uda": "Uda", "ude": "Udihe", "udg": "Muduga", "udi": "Udi", "udj": "Ujir", "udl": "Wuzlam", "udm": "Udmurt", "udu": "Uduk", "ues": "Kioko", "ufi": "Ufim", "ug": "Uighur; Uyghur", "uga": "Ugaritic", "ugb": "Kuku-Ugbanh", "uge": "Ughele", "ugh": "Kubachi", "ugn": "Ugandan Sign Language", "ugo": "Ugong", "ugy": "Uruguayan Sign Language", "uha": "Uhami", "uhn": "Damal", "uis": "Uisai", "uiv": "Iyive", "uji": "Tanjijili", "uk": "Ukrainian", "uka": "Kaburi", "ukg": "Ukuriguma", "ukh": "Ukhwejo", "uki": "Kui (India)", "ukk": "Muak Sa-aak", "ukl": "Ukrainian Sign Language", "ukp": "Ukpe-Bayobiri", "ukq": "Ukwa", "uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language", "uku": "Ukue", "ukv": "Kuku", "ukw": "Ukwuani-Aboh-Ndoni", "uky": "Kuuk-Yak", "ula": "Fungwa", "ulb": "Ulukwumi", "ulc": "Ulch", "ule": "Lule", "ulf": "Usku; Afra", "uli": "Ulithian", "ulk": "Meriam Mir", "ull": "Ullatan", "ulm": "Ulumanda'", "uln": "Unserdeutsch", "ulu": "Uma' Lung", "ulw": "Ulwa", "uma": "Umatilla", "umb": "Umbundu", "umc": "Marrucinian", "umd": "Umbindhamu", "umg": "Morrobalama; Umbuygamu", "umi": "Ukit", "umm": "Umon", "umn": "Makyan Naga", "umo": "Umotína", "ump": "Umpila", "umr": "Umbugarla", "ums": "Pendau", "umu": "Munsee", "una": "North Watut", "und": "Undetermined", "une": "Uneme", "ung": "Ngarinyin", "uni": "Uni", "unk": "Enawené-Nawé", "unm": "Unami", "unn": "Kurnai", "unr": "Mundari", "unu": "Unubahe", "unx": "Munda", "unz": "Unde Kaili", "uon": "Kulon", "upi": "Umeda", "upv": "Uripiv-Wala-Rano-Atchin", "ur": "Urdu", "ura": "Urarina", "urb": "Urubú-Kaapor; Kaapor", "urc": "Urningangg", "ure": "Uru", "urf": "Uradhi", "urg": "Urigina", "urh": "Urhobo", "uri": "Urim", "urj": "Uralic languages", "urk": "Urak Lawoi'", "url": "Urali", "urm": "Urapmin", "urn": "Uruangnirin", "uro": "Ura (Papua New Guinea)", "urp": "Uru-Pa-In", "urr": "Lehalurup; Löyöp", "urt": "Urat", "uru": "Urumi", "urv": "Uruava", "urw": "Sop", "urx": "Urimo", "ury": "Orya", "urz": "Uru-Eu-Wau-Wau", "usa": "Usarufa", "ush": "Ushojo", "usi": "Usui", "usk": "Usaghade", "usp": "Uspanteco", "uss": "us-Saare", "usu": "Uya", "uta": "Otank", "ute": "Ute-Southern Paiute", "uth": "ut-Hun", "utp": "Amba (Solomon Islands)", "utr": "Etulo", "utu": "Utu", "uum": "Urum", "uur": "Ura (Vanuatu)", "uuu": "U", "uve": "West Uvean; Fagauvea", "uvh": "Uri", "uvl": "Lote", "uwa": "Kuku-Uwanh", "uya": "Doko-Uyanga", "uz": "Uzbek", "uzn": "Northern Uzbek", "uzs": "Southern Uzbek", "vaa": "Vaagri Booli", "vae": "Vale", "vaf": "Vafsi", "vag": "Vagla", "vah": "Varhadi-Nagpuri", "vai": "Vai", "vaj": "Sekele; Northwestern ǃKung; Vasekele", "val": "Vehes", "vam": "Vanimo", "van": "Valman", "vao": "Vao", "vap": "Vaiphei", "var": "Huarijio", "vas": "Vasavi", "vau": "Vanuma", "vav": "Varli", "vay": "Wayu", "vbb": "Southeast Babar", "vbk": "Southwestern Bontok", "ve": "Venda", "vec": "Venetian", "ved": "Veddah", "vel": "Veluws", "vem": "Vemgo-Mabas", "veo": "Ventureño", "vep": "Veps", "ver": "Mom Jango", "vgr": "Vaghri", "vgt": "Vlaamse Gebarentaal; Flemish Sign Language", "vi": "Vietnamese", "vic": "Virgin Islands Creole English", "vid": "Vidunda", "vif": "Vili", "vig": "Viemo", "vil": "Vilela", "vin": "Vinza", "vis": "Vishavan", "vit": "Viti", "viv": "Iduna", "vka": "Kariyarra", "vkj": "Kujarge", "vkk": "Kaur", "vkl": "Kulisusu", "vkm": "Kamakan", "vkn": "Koro Nulu", "vko": "Kodeoha", "vkp": "Korlai Creole Portuguese", "vkt": "Tenggarong Kutai Malay", "vku": "Kurrama", "vkz": "Koro Zuba", "vlp": "Valpei", "vls": "Vlaams", "vma": "Martuyhunira", "vmb": "Barbaram", "vmc": "Juxtlahuaca Mixtec", "vmd": "Mudu Koraga", "vme": "East Masela", "vmf": "Mainfränkisch", "vmg": "Lungalunga", "vmh": "Maraghei", "vmi": "Miwa", "vmj": "Ixtayutla Mixtec", "vmk": "Makhuwa-Shirima", "vml": "Malgana", "vmm": "Mitlatongo Mixtec", "vmp": "Soyaltepec Mazatec", "vmq": "Soyaltepec Mixtec", "vmr": "Marenje", "vms": "Moksela", "vmu": "Muluridyi", "vmv": "Valley Maidu", "vmw": "Makhuwa", "vmx": "Tamazola Mixtec", "vmy": "Ayautla Mazatec", "vmz": "Mazatlán Mazatec", "vnk": "Vano; Lovono", "vnm": "Vinmavis; Neve'ei", "vnp": "Vunapu", "vo": "Volapük", "vor": "Voro", "vot": "Votic", "vra": "Vera'a", "vro": "Võro", "vrs": "Varisi", "vrt": "Burmbar; Banam Bay", "vsi": "Moldova Sign Language", "vsl": "Venezuelan Sign Language", "vsv": "Valencian Sign Language; Llengua de signes valenciana", "vto": "Vitou", "vum": "Vumbu", "vun": "Vunjo", "vut": "Vute", "vwa": "Awa (China)", "wa": "Walloon", "waa": "Walla Walla", "wab": "Wab", "wac": "Wasco-Wishram", "wad": "Wamesa; Wondama", "wae": "Walser", "waf": "Wakoná", "wag": "Wa'ema", "wah": "Watubela", "wai": "Wares", "waj": "Waffa", "wak": "Wakashan languages", "wal": "Wolaytta; Wolaitta", "wam": "Wampanoag", "wan": "Wan", "wao": "Wappo", "wap": "Wapishana", "waq": "Wagiman", "war": "Waray (Philippines)", "was": "Washo", "wat": "Kaninuwa", "wau": "Waurá", "wav": "Waka", "waw": "Waiwai", "wax": "Watam; Marangis", "way": "Wayana", "waz": "Wampur", "wba": "Warao", "wbb": "Wabo", "wbe": "Waritai", "wbf": "Wara", "wbh": "Wanda", "wbi": "Vwanji", "wbj": "Alagwa", "wbk": "Waigali", "wbl": "Wakhi", "wbm": "Wa", "wbp": "Warlpiri", "wbq": "Waddar", "wbr": "Wagdi", "wbs": "West Bengal Sign Language", "wbt": "Warnman", "wbv": "Wajarri", "wbw": "Woi", "wca": "Yanomámi", "wci": "Waci Gbe", "wdd": "Wandji", "wdg": "Wadaginam", "wdj": "Wadjiginy", "wdk": "Wadikali", "wdt": "Wendat", "wdu": "Wadjigu", "wdy": "Wadjabangayi", "wea": "Wewaw", "wec": "Wè Western", "wed": "Wedau", "weg": "Wergaia", "weh": "Weh", "wei": "Kiunum", "wem": "Weme Gbe", "wen": "Sorbian languages", "weo": "Wemale", "wep": "Westphalien", "wer": "Weri", "wes": "Cameroon Pidgin", "wet": "Perai", "weu": "Rawngtu Chin", "wew": "Wejewa", "wfg": "Yafi; Zorop", "wga": "Wagaya", "wgb": "Wagawaga", "wgg": "Wangkangurru; Wangganguru", "wgi": "Wahgi", "wgo": "Waigeo", "wgu": "Wirangu", "wgy": "Warrgamay", "wha": "Sou Upaa; Manusela", "whg": "North Wahgi", "whk": "Wahau Kenyah", "whu": "Wahau Kayan", "wib": "Southern Toussian", "wic": "Wichita", "wie": "Wik-Epa", "wif": "Wik-Keyangan", "wig": "Wik Ngathan", "wih": "Wik-Me'anha", "wii": "Minidien", "wij": "Wik-Iiyanh", "wik": "Wikalkan", "wil": "Wilawila", "wim": "Wik-Mungkan", "win": "Ho-Chunk", "wir": "Wiraféd", "wiu": "Wiru", "wiv": "Vitu", "wiy": "Wiyot", "wja": "Waja", "wji": "Warji", "wka": "Kw'adza", "wkb": "Kumbaran", "wkd": "Wakde; Mo", "wkl": "Kalanadi", "wkr": "Keerray-Woorroong", "wku": "Kunduvadi", "wkw": "Wakawaka", "wky": "Wangkayutyuru", "wla": "Walio", "wlc": "Mwali Comorian", "wle": "Wolane", "wlg": "Kunbarlang", "wlh": "Welaun", "wli": "Waioli", "wlk": "Wailaki", "wll": "Wali (Sudan)", "wlm": "Middle Welsh", "wlo": "Wolio", "wlr": "Wailapa", "wls": "Wallisian", "wlu": "Wuliwuli", "wlv": "Wichí Lhamtés Vejoz", "wlw": "Walak", "wlx": "Wali (Ghana)", "wly": "Waling", "wma": "Mawa (Nigeria)", "wmb": "Wambaya", "wmc": "Wamas", "wmd": "Mamaindé", "wme": "Wambule", "wmg": "Western Minyag", "wmh": "Waima'a", "wmi": "Wamin", "wmm": "Maiwa (Indonesia)", "wmn": "Waamwang", "wmo": "Wom (Papua New Guinea)", "wms": "Wambon", "wmt": "Walmajarri", "wmw": "Mwani", "wmx": "Womo", "wnb": "Wanambre", "wnc": "Wantoat", "wnd": "Wandarang", "wne": "Waneci", "wng": "Wanggom", "wni": "Ndzwani Comorian", "wnk": "Wanukaka", "wnm": "Wanggamala", "wnn": "Wunumara", "wno": "Wano", "wnp": "Wanap", "wnu": "Usan", "wnw": "Wintu", "wny": "Wanyi; Waanyi", "wo": "Wolof", "woa": "Kuwema; Tyaraity", "wob": "Wè Northern", "woc": "Wogeo", "wod": "Wolani", "woe": "Woleaian", "wof": "Gambian Wolof", "wog": "Wogamusin", "woi": "Kamang", "wok": "Longto", "wom": "Wom (Nigeria)", "won": "Wongo", "woo": "Manombai", "wor": "Woria", "wos": "Hanga Hundi", "wow": "Wawonii", "woy": "Weyto", "wpc": "Maco", "wrb": "Waluwarra; Warluwara", "wrg": "Warungu; Gudjal", "wrh": "Wiradjuri", "wri": "Wariyangga", "wrk": "Garrwa", "wrl": "Warlmanpa", "wrm": "Warumungu", "wrn": "Warnang", "wro": "Worrorra", "wrp": "Waropen", "wrr": "Wardaman", "wrs": "Waris", "wru": "Waru", "wrv": "Waruna", "wrw": "Gugu Warra", "wrx": "Wae Rana", "wry": "Merwari", "wrz": "Waray (Australia)", "wsa": "Warembori", "wsg": "Adilabad Gondi", "wsi": "Wusi", "wsk": "Waskia", "wsr": "Owenia", "wss": "Wasa", "wsu": "Wasu", "wsv": "Wotapuri-Katarqalai", "wtf": "Watiwa", "wth": "Wathawurrung", "wti": "Berta", "wtk": "Watakataui", "wtm": "Mewati", "wtw": "Wotu", "wua": "Wikngenchera", "wub": "Wunambal", "wud": "Wudu", "wuh": "Wutunhua", "wul": "Silimo", "wum": "Wumbvu", "wun": "Bungu", "wur": "Wurrugu", "wut": "Wutung", "wuu": "Wu Chinese", "wuv": "Wuvulu-Aua", "wux": "Wulna", "wuy": "Wauyai", "wwa": "Waama", "wwb": "Wakabunga", "wwo": "Wetamut; Dorig", "wwr": "Warrwa", "www": "Wawa", "wxa": "Waxianghua", "wxw": "Wardandi", "wyb": "Wangaaybuwan-Ngiyambaa", "wyi": "Woiwurrung", "wym": "Wymysorys", "wyn": "Wyandot", "wyr": "Wayoró", "wyy": "Western Fijian", "xaa": "Andalusian Arabic", "xab": "Sambe", "xac": "Kachari", "xad": "Adai", "xae": "Aequian", "xag": "Aghwan", "xai": "Kaimbé", "xaj": "Ararandewára", "xak": "Máku", "xal": "Kalmyk; Oirat", "xam": "ǀXam", "xan": "Xamtanga", "xao": "Khao", "xap": "Apalachee", "xaq": "Aquitanian", "xar": "Karami", "xas": "Kamas", "xat": "Katawixi", "xau": "Kauwera", "xav": "Xavánte", "xaw": "Kawaiisu", "xay": "Kayan Mahakam", "xbb": "Lower Burdekin", "xbc": "Bactrian", "xbd": "Bindal", "xbe": "Bigambal", "xbg": "Bunganditj", "xbi": "Kombio", "xbj": "Birrpayi", "xbm": "Middle Breton", "xbn": "Kenaboi", "xbo": "Bolgarian", "xbp": "Bibbulman", "xbr": "Kambera", "xbw": "Kambiwá", "xby": "Batjala; Batyala", "xcb": "Cumbric", "xcc": "Camunic", "xce": "Celtiberian", "xcg": "Cisalpine Gaulish", "xch": "Chemakum; Chimakum", "xcl": "Classical Armenian", "xcm": "Comecrudo", "xcn": "Cotoname", "xco": "Chorasmian", "xcr": "Carian", "xct": "Classical Tibetan", "xcu": "Curonian", "xcv": "Chuvantsy", "xcw": "Coahuilteco", "xcy": "Cayuse", "xda": "Darkinyung", "xdc": "Dacian", "xdk": "Dharuk", "xdm": "Edomite", "xdo": "Kwandu", "xdq": "Kaitag", "xdy": "Malayic Dayak", "xeb": "Eblan", "xed": "Hdi", "xeg": "ǁXegwi", "xel": "Kelo", "xem": "Kembayan", "xep": "Epi-Olmec", "xer": "Xerénte", "xes": "Kesawai", "xet": "Xetá", "xeu": "Keoru-Ahia", "xfa": "Faliscan", "xga": "Galatian", "xgb": "Gbin", "xgd": "Gudang", "xgf": "Gabrielino-Fernandeño", "xgg": "Goreng", "xgi": "Garingbal", "xgl": "Galindan", "xgm": "Dharumbal; Guwinmal", "xgn": "Mongolian languages", "xgr": "Garza", "xgu": "Unggumi", "xgw": "Guwa", "xh": "Xhosa", "xha": "Harami", "xhc": "Hunnic", "xhd": "Hadrami", "xhe": "Khetrani", "xhm": "Middle Khmer (1400 to 1850 CE)", "xhr": "Hernican", "xht": "Hattic", "xhu": "Hurrian", "xhv": "Khua", "xib": "Iberian", "xii": "Xiri", "xil": "Illyrian", "xin": "Xinca", "xir": "Xiriâna", "xis": "Kisan", "xiv": "Indus Valley Language", "xiy": "Xipaya", "xjb": "Minjungbal", "xjt": "Jaitmatang", "xka": "Kalkoti", "xkb": "Northern Nago", "xkc": "Kho'ini", "xkd": "Mendalam Kayan", "xke": "Kereho", "xkf": "Khengkha", "xkg": "Kagoro", "xki": "Kenyan Sign Language", "xkj": "Kajali", "xkk": "Kachok; Kaco'", "xkl": "Mainstream Kenyah", "xkn": "Kayan River Kayan", "xko": "Kiorr", "xkp": "Kabatei", "xkq": "Koroni", "xkr": "Xakriabá", "xks": "Kumbewaha", "xkt": "Kantosi", "xku": "Kaamba", "xkv": "Kgalagadi", "xkw": "Kembra", "xkx": "Karore", "xky": "Uma' Lasan", "xkz": "Kurtokha", "xla": "Kamula", "xlb": "Loup B", "xlc": "Lycian", "xld": "Lydian", "xle": "Lemnian", "xlg": "Ligurian (Ancient)", "xli": "Liburnian", "xln": "Alanic", "xlo": "Loup A", "xlp": "Lepontic", "xls": "Lusitanian", "xlu": "Cuneiform Luwian", "xly": "Elymian", "xma": "Mushungulu", "xmb": "Mbonga", "xmc": "Makhuwa-Marrevone", "xmd": "Mbudum", "xme": "Median", "xmf": "Mingrelian", "xmg": "Mengaka", "xmh": "Kugu-Muminh", "xmj": "Majera", "xmk": "Ancient Macedonian", "xml": "Malaysian Sign Language", "xmm": "Manado Malay", "xmn": "Manichaean Middle Persian", "xmo": "Morerebi", "xmp": "Kuku-Mu'inh", "xmq": "Kuku-Mangk", "xmr": "Meroitic", "xms": "Moroccan Sign Language", "xmt": "Matbat", "xmu": "Kamu", "xmv": "Antankarana Malagasy; Tankarana Malagasy", "xmw": "Tsimihety Malagasy", "xmx": "Salawati; Maden", "xmy": "Mayaguduna", "xmz": "Mori Bawah", "xna": "Ancient North Arabian", "xnb": "Kanakanabu", "xnd": "Na-Dene languages", "xng": "Middle Mongolian", "xnh": "Kuanhua", "xni": "Ngarigu", "xnj": "Ngoni (Tanzania)", "xnk": "Nganakarti", "xnm": "Ngumbarl", "xnn": "Northern Kankanay", "xno": "Anglo-Norman", "xnq": "Ngoni (Mozambique)", "xnr": "Kangri", "xns": "Kanashi", "xnt": "Narragansett", "xnu": "Nukunul", "xny": "Nyiyaparli", "xnz": "Kenzi; Mattoki", "xoc": "O'chi'chi'", "xod": "Kokoda", "xog": "Soga", "xoi": "Kominimung", "xok": "Xokleng", "xom": "Komo (Sudan)", "xon": "Konkomba", "xoo": "Xukurú", "xop": "Kopar", "xor": "Korubo", "xow": "Kowaki", "xpa": "Pirriya", "xpb": "Northeastern Tasmanian; Pyemmairrener", "xpc": "Pecheneg", "xpd": "Oyster Bay Tasmanian", "xpe": "Liberia Kpelle", "xpf": "Southeast Tasmanian; Nuenonne", "xpg": "Phrygian", "xph": "North Midlands Tasmanian; Tyerrenoterpanner", "xpi": "Pictish", "xpj": "Mpalitjanh", "xpk": "Kulina Pano", "xpl": "Port Sorell Tasmanian", "xpm": "Pumpokol", "xpn": "Kapinawá", "xpo": "Pochutec", "xpp": "Puyo-Paekche", "xpq": "Mohegan-Pequot", "xpr": "Parthian", "xps": "Pisidian", "xpt": "Punthamara", "xpu": "Punic", "xpv": "Northern Tasmanian; Tommeginne", "xpw": "Northwestern Tasmanian; Peerapper", "xpx": "Southwestern Tasmanian; Toogee", "xpy": "Puyo", "xpz": "Bruny Island Tasmanian", "xqa": "Karakhanid", "xqt": "Qatabanian", "xra": "Krahô", "xrb": "Eastern Karaboro", "xrd": "Gundungurra", "xre": "Kreye", "xrg": "Minang", "xri": "Krikati-Timbira", "xrm": "Armazic", "xrn": "Arin", "xrr": "Raetic", "xrt": "Aranama-Tamique", "xru": "Marriammu", "xrw": "Karawa", "xsa": "Sabaean", "xsb": "Sambal", "xsc": "Scythian", "xsd": "Sidetic", "xse": "Sempan", "xsh": "Shamang", "xsi": "Sio", "xsj": "Subi", "xsl": "South Slavey", "xsm": "Kasem", "xsn": "Sanga (Nigeria)", "xso": "Solano", "xsp": "Silopi", "xsq": "Makhuwa-Saka", "xsr": "Sherpa", "xss": "Assan", "xsu": "Sanumá", "xsv": "Sudovian", "xsy": "Saisiyat", "xta": "Alcozauca Mixtec", "xtb": "Chazumba Mixtec", "xtc": "Katcha-Kadugli-Miri", "xtd": "Diuxi-Tilantongo Mixtec", "xte": "Ketengban", "xtg": "Transalpine Gaulish", "xth": "Yitha Yitha", "xti": "Sinicahua Mixtec", "xtj": "San Juan Teita Mixtec", "xtl": "Tijaltepec Mixtec", "xtm": "Magdalena Peñasco Mixtec", "xtn": "Northern Tlaxiaco Mixtec", "xto": "Tokharian A", "xtp": "San Miguel Piedras Mixtec", "xtq": "Tumshuqese", "xtr": "Early Tripuri", "xts": "Sindihui Mixtec", "xtt": "Tacahua Mixtec", "xtu": "Cuyamecalco Mixtec", "xtv": "Thawa", "xtw": "Tawandê", "xty": "Yoloxochitl Mixtec", "xua": "Alu Kurumba", "xub": "Betta Kurumba", "xud": "Umiida", "xug": "Kunigami", "xuj": "Jennu Kurumba", "xul": "Ngunawal; Nunukul", "xum": "Umbrian", "xun": "Unggaranggu", "xuo": "Kuo", "xup": "Upper Umpqua", "xur": "Urartian", "xut": "Kuthant", "xuu": "Kxoe; Khwedam", "xve": "Venetic", "xvi": "Kamviri", "xvn": "Vandalic", "xvo": "Volscian", "xvs": "Vestinian", "xwa": "Kwaza", "xwc": "Woccon", "xwd": "Wadi Wadi", "xwe": "Xwela Gbe", "xwg": "Kwegu", "xwj": "Wajuk", "xwk": "Wangkumara", "xwl": "Western Xwla Gbe", "xwo": "Written Oirat", "xwr": "Kwerba Mamberamo", "xwt": "Wotjobaluk", "xww": "Wemba Wemba", "xxb": "Boro (Ghana)", "xxk": "Ke'o", "xxm": "Minkin", "xxr": "Koropó", "xxt": "Tambora", "xya": "Yaygir", "xyb": "Yandjibara", "xyj": "Mayi-Yapi", "xyk": "Mayi-Kulan", "xyl": "Yalakalore", "xyt": "Mayi-Thakurti", "xyy": "Yorta Yorta", "xzh": "Zhang-Zhung", "xzm": "Zemgalian", "xzp": "Ancient Zapotec", "yaa": "Yaminahua", "yab": "Yuhup", "yac": "Pass Valley Yali", "yad": "Yagua", "yae": "Pumé", "yaf": "Yaka (Democratic Republic of Congo)", "yag": "Yámana", "yah": "Yazgulyam", "yai": "Yagnobi", "yaj": "Banda-Yangere", "yak": "Yakama", "yal": "Yalunka", "yam": "Yamba", "yan": "Mayangna", "yao": "Yao", "yap": "Yapese", "yaq": "Yaqui", "yar": "Yabarana", "yas": "Nugunu (Cameroon)", "yat": "Yambeta", "yau": "Yuwana", "yav": "Yangben", "yaw": "Yawalapití", "yax": "Yauma", "yay": "Agwagwune", "yaz": "Lokaa", "yba": "Yala", "ybb": "Yemba", "ybe": "West Yugur", "ybh": "Yakha", "ybi": "Yamphu", "ybj": "Hasha", "ybk": "Bokha", "ybl": "Yukuben", "ybm": "Yaben", "ybn": "Yabaâna", "ybo": "Yabong", "ybx": "Yawiyo", "yby": "Yaweyuha", "ych": "Chesu", "ycl": "Lolopo", "ycn": "Yucuna", "ycp": "Chepya", "yda": "Yanda", "ydd": "Eastern Yiddish", "yde": "Yangum Dey", "ydg": "Yidgha", "ydk": "Yoidik", "yea": "Ravula", "yec": "Yeniche", "yee": "Yimas", "yei": "Yeni", "yej": "Yevanic", "yel": "Yela", "yer": "Tarok", "yes": "Nyankpa", "yet": "Yetfa", "yeu": "Yerukula", "yev": "Yapunda", "yey": "Yeyi", "yga": "Malyangapa", "ygi": "Yiningayi", "ygl": "Yangum Gel", "ygm": "Yagomi", "ygp": "Gepo", "ygr": "Yagaria", "ygs": "Yolŋu Sign Language", "ygu": "Yugul", "ygw": "Yagwoia", "yha": "Baha Buyang", "yhd": "Judeo-Iraqi Arabic", "yhl": "Hlepho Phowa", "yhs": "Yan-nhaŋu Sign Language", "yi": "Yiddish", "yia": "Yinggarda", "yif": "Ache", "yig": "Wusa Nasu", "yih": "Western Yiddish", "yii": "Yidiny", "yij": "Yindjibarndi", "yik": "Dongshanba Lalo", "yil": "Yindjilandji", "yim": "Yimchungru Naga", "yin": "Riang Lai; Yinchia", "yip": "Pholo", "yiq": "Miqie", "yir": "North Awyu", "yis": "Yis", "yit": "Eastern Lalu", "yiu": "Awu", "yiv": "Northern Nisu", "yix": "Axi Yi", "yiz": "Azhe", "yka": "Yakan", "ykg": "Northern Yukaghir", "yki": "Yoke", "ykk": "Yakaikeke", "ykl": "Khlula", "ykm": "Kap", "ykn": "Kua-nsi", "yko": "Yasa", "ykr": "Yekora", "ykt": "Kathu", "yku": "Kuamasi", "yky": "Yakoma", "yla": "Yaul", "ylb": "Yaleba", "yle": "Yele", "ylg": "Yelogu", "yli": "Angguruk Yali", "yll": "Yil", "ylm": "Limi", "yln": "Langnian Buyang", "ylo": "Naluo Yi", "ylr": "Yalarnnga", "ylu": "Aribwaung", "yly": "Nyâlayu; Nyelâyu", "ymb": "Yambes", "ymc": "Southern Muji", "ymd": "Muda", "yme": "Yameo", "ymg": "Yamongeri", "ymh": "Mili", "ymi": "Moji", "ymk": "Makwe", "yml": "Iamalele", "ymm": "Maay", "ymn": "Yamna; Sunum", "ymo": "Yangum Mon", "ymp": "Yamap", "ymq": "Qila Muji", "ymr": "Malasar", "yms": "Mysian", "ymx": "Northern Muji", "ymz": "Muzi", "yna": "Aluo", "ynd": "Yandruwandha", "yne": "Lang'e", "yng": "Yango", "ynk": "Naukan Yupik", "ynl": "Yangulam", "ynn": "Yana", "yno": "Yong", "ynq": "Yendang", "yns": "Yansi", "ynu": "Yahuna", "yo": "Yoruba", "yob": "Yoba", "yog": "Yogad", "yoi": "Yonaguni", "yok": "Yokuts", "yol": "Yola", "yom": "Yombe", "yon": "Yongkom", "yot": "Yotti", "yox": "Yoron", "yoy": "Yoy", "ypa": "Phala", "ypb": "Labo Phowa", "ypg": "Phola", "yph": "Phupha", "ypk": "Yupik languages", "ypm": "Phuma", "ypn": "Ani Phowa", "ypo": "Alo Phola", "ypp": "Phupa", "ypz": "Phuza", "yra": "Yerakai", "yrb": "Yareba", "yre": "Yaouré", "yrk": "Nenets", "yrl": "Nhengatu", "yrm": "Yirrk-Mel", "yrn": "Yerong", "yro": "Yaroamë", "yrs": "Yarsun", "yrw": "Yarawata", "yry": "Yarluyandi", "ysc": "Yassic", "ysd": "Samatao", "ysg": "Sonaga", "ysl": "Yugoslavian Sign Language", "ysm": "Myanmar Sign Language", "ysn": "Sani", "yso": "Nisi (China)", "ysp": "Southern Lolopo", "ysr": "Sirenik Yupik", "yss": "Yessan-Mayo", "ysy": "Sanie", "yta": "Talu", "ytl": "Tanglang", "ytp": "Thopho", "ytw": "Yout Wam", "yty": "Yatay", "yua": "Yucateco; Yucatec Maya", "yub": "Yugambal", "yuc": "Yuchi", "yud": "Judeo-Tripolitanian Arabic", "yue": "Yue Chinese; Cantonese", "yuf": "Havasupai-Walapai-Yavapai", "yug": "Yug", "yui": "Yurutí", "yuj": "Karkar-Yuri", "yuk": "Yuki", "yul": "Yulu", "yum": "Quechan", "yun": "Bena (Nigeria)", "yup": "Yukpa", "yuq": "Yuqui", "yur": "Yurok", "yut": "Yopno", "yuw": "Yau (Morobe Province)", "yux": "Southern Yukaghir", "yuy": "East Yugur", "yuz": "Yuracare", "yva": "Yawa", "yvt": "Yavitero", "ywa": "Kalou", "ywg": "Yinhawangka", "ywl": "Western Lalu", "ywn": "Yawanawa", "ywq": "Wuding-Luquan Yi", "ywr": "Yawuru", "ywt": "Xishanba Lalo; Central Lalo", "ywu": "Wumeng Nasu", "yww": "Yawarawarga", "yxa": "Mayawali", "yxg": "Yagara", "yxl": "Yardliyawarra", "yxm": "Yinwum", "yxu": "Yuyu", "yxy": "Yabula Yabula", "yyr": "Yir Yoront", "yyu": "Yau (Sandaun Province)", "yyz": "Ayizi", "yzg": "E'ma Buyang", "yzk": "Zokhuo", "za": "Zhuang; Chuang", "zaa": "Sierra de Juárez Zapotec", "zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec", "zac": "Ocotlán Zapotec", "zad": "Cajonos Zapotec", "zae": "Yareni Zapotec", "zaf": "Ayoquesco Zapotec", "zag": "Zaghawa", "zah": "Zangwal", "zai": "Isthmus Zapotec", "zaj": "Zaramo", "zak": "Zanaki", "zal": "Zauzou", "zam": "Miahuatlán Zapotec", "zao": "Ozolotepec Zapotec", "zap": "Zapotec", "zaq": "Aloápam Zapotec", "zar": "Rincón Zapotec", "zas": "Santo Domingo Albarradas Zapotec", "zat": "Tabaa Zapotec", "zau": "Zangskari", "zav": "Yatzachi Zapotec", "zaw": "Mitla Zapotec", "zax": "Xadani Zapotec", "zay": "Zayse-Zergulla; Zaysete", "zaz": "Zari", "zba": "Balaibalan", "zbc": "Central Berawan", "zbe": "East Berawan", "zbl": "Blissymbols; Bliss; Blissymbolics", "zbt": "Batui", "zbu": "Bu (Bauchi State)", "zbw": "West Berawan", "zca": "Coatecas Altas Zapotec", "zcd": "Las Delicias Zapotec", "zch": "Central Hongshuihe Zhuang", "zdj": "Ngazidja Comorian", "zea": "Zeeuws", "zeg": "Zenag", "zeh": "Eastern Hongshuihe Zhuang", "zen": "Zenaga", "zga": "Kinga", "zgb": "Guibei Zhuang", "zgh": "Standard Moroccan Tamazight", "zgm": "Minz Zhuang", "zgn": "Guibian Zhuang", "zgr": "Magori", "zh": "Chinese", "zhb": "Zhaba", "zhd": "Dai Zhuang", "zhi": "Zhire", "zhn": "Nong Zhuang", "zhw": "Zhoa", "zhx": "Chinese (family)", "zia": "Zia", "zib": "Zimbabwe Sign Language", "zik": "Zimakani", "zil": "Zialo", "zim": "Mesme", "zin": "Zinza", "ziw": "Zigula", "ziz": "Zizilivakan", "zka": "Kaimbulawa", "zkb": "Koibal", "zkd": "Kadu", "zkg": "Koguryo", "zkh": "Khorezmian", "zkk": "Karankawa", "zkn": "Kanan", "zko": "Kott", "zkp": "São Paulo Kaingáng", "zkr": "Zakhring", "zkt": "Kitan", "zku": "Kaurna", "zkv": "Krevinian", "zkz": "Khazar", "zla": "Zula", "zle": "East Slavic languages", "zlj": "Liujiang Zhuang", "zlm": "Malay (individual language)", "zln": "Lianshan Zhuang", "zlq": "Liuqian Zhuang", "zls": "South Slavic languages", "zlw": "West Slavic languages", "zma": "Manda (Australia)", "zmb": "Zimba", "zmc": "Margany", "zmd": "Maridan", "zme": "Mangerr", "zmf": "Mfinu", "zmg": "Marti Ke", "zmh": "Makolkol", "zmi": "Negeri Sembilan Malay", "zmj": "Maridjabin", "zmk": "Mandandanyi", "zml": "Matngala", "zmm": "Marimanindji; Marramaninyshi", "zmn": "Mbangwe", "zmo": "Molo", "zmp": "Mpuono", "zmq": "Mituku", "zmr": "Maranunggu", "zms": "Mbesa", "zmt": "Maringarr", "zmu": "Muruwari", "zmv": "Mbariman-Gudhinma", "zmw": "Mbo (Democratic Republic of Congo)", "zmx": "Bomitaba", "zmy": "Mariyedi", "zmz": "Mbandja", "zna": "Zan Gula", "znd": "Zande languages", "zne": "Zande (individual language)", "zng": "Mang", "znk": "Manangkari", "zns": "Mangas", "zoc": "Copainalá Zoque", "zoh": "Chimalapa Zoque", "zom": "Zou", "zoo": "Asunción Mixtepec Zapotec", "zoq": "Tabasco Zoque", "zor": "Rayón Zoque", "zos": "Francisco León Zoque", "zpa": "Lachiguiri Zapotec", "zpb": "Yautepec Zapotec", "zpc": "Choapan Zapotec", "zpd": "Southeastern Ixtlán Zapotec", "zpe": "Petapa Zapotec", "zpf": "San Pedro Quiatoni Zapotec", "zpg": "Guevea De Humboldt Zapotec", "zph": "Totomachapan Zapotec", "zpi": "Santa María Quiegolani Zapotec", "zpj": "Quiavicuzas Zapotec", "zpk": "Tlacolulita Zapotec", "zpl": "Lachixío Zapotec", "zpm": "Mixtepec Zapotec", "zpn": "Santa Inés Yatzechi Zapotec", "zpo": "Amatlán Zapotec", "zpp": "El Alto Zapotec", "zpq": "Zoogocho Zapotec", "zpr": "Santiago Xanica Zapotec", "zps": "Coatlán Zapotec", "zpt": "San Vicente Coatlán Zapotec", "zpu": "Yalálag Zapotec", "zpv": "Chichicapan Zapotec", "zpw": "Zaniza Zapotec", "zpx": "San Baltazar Loxicha Zapotec", "zpy": "Mazaltepec Zapotec", "zpz": "Texmelucan Zapotec", "zqe": "Qiubei Zhuang", "zra": "Kara (Korea)", "zrg": "Mirgan", "zrn": "Zerenkel", "zro": "Záparo", "zrp": "Zarphatic", "zrs": "Mairasi", "zsa": "Sarasira", "zsk": "Kaskean", "zsl": "Zambian Sign Language", "zsm": "Standard Malay", "zsr": "Southern Rincon Zapotec", "zsu": "Sukurum", "zte": "Elotepec Zapotec", "ztg": "Xanaguía Zapotec", "ztl": "Lapaguía-Guivini Zapotec", "ztm": "San Agustín Mixtepec Zapotec", "ztn": "Santa Catarina Albarradas Zapotec", "ztp": "Loxicha Zapotec", "ztq": "Quioquitani-Quierí Zapotec", "zts": "Tilquiapan Zapotec", "ztt": "Tejalapan Zapotec", "ztu": "Güilá Zapotec", "ztx": "Zaachila Zapotec", "zty": "Yatee Zapotec", "zu": "Zulu", "zua": "Zeem", "zuh": "Tokano", "zum": "Kumzari", "zun": "Zuni", "zuy": "Zumaya", "zwa": "Zay", "zyb": "Yongbei Zhuang", "zyg": "Yang Zhuang", "zyj": "Youjiang Zhuang", "zyn": "Yongnan Zhuang", "zyp": "Zyphe Chin", "zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki", "zzj": "Zuojiang Zhuang" }
datasets/src/datasets/utils/resources/languages.json/0
{ "file_path": "datasets/src/datasets/utils/resources/languages.json", "repo_id": "datasets", "token_count": 111198 }
110
import pytest from huggingface_hub import snapshot_download @pytest.fixture def dataset_dir(tmp_path): dataset_dir = tmp_path / "test_command_dataset_dir" snapshot_download("hf-internal-testing/ner-jsonl", repo_type="dataset", local_dir=dataset_dir) return str(dataset_dir)
datasets/tests/commands/conftest.py/0
{ "file_path": "datasets/tests/commands/conftest.py", "repo_id": "datasets", "token_count": 106 }
111
import datetime from unittest import TestCase from unittest.mock import MagicMock, patch import numpy as np import pandas as pd import pyarrow as pa import pytest from datasets import Array2D from datasets.arrow_dataset import Column, Dataset from datasets.features import Audio, ClassLabel, Features, Image, LargeList, List, Sequence, Value from datasets.features.features import ( _align_features, _arrow_to_datasets_dtype, _cast_to_python_objects, _check_if_features_can_be_aligned, _check_non_null_non_empty_recursive, _visit, cast_to_python_objects, decode_nested_example, encode_nested_example, generate_from_arrow_type, generate_from_dict, get_nested_type, require_decoding, require_storage_cast, require_storage_embed, string_to_arrow, ) from datasets.features.translation import Translation, TranslationVariableLanguages from datasets.info import DatasetInfo from datasets.utils.py_utils import asdict from ..utils import require_jax, require_numpy1_on_windows, require_tf, require_torch def list_with(item): return [item] class FeaturesTest(TestCase): def test_from_arrow_schema_simple(self): data = {"a": [{"b": {"c": "text"}}] * 10, "foo": [1] * 10} original_features = Features({"a": {"b": {"c": Value("string")}}, "foo": Value("int64")}) dset = Dataset.from_dict(data, features=original_features) new_features = dset.features new_dset = Dataset.from_dict(data, features=new_features) self.assertEqual(original_features.type, new_features.type) self.assertDictEqual(dset[0], new_dset[0]) self.assertDictEqual(dset[:], new_dset[:]) def test_from_arrow_schema_with_sequence(self): data = {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10} original_features = Features({"a": {"b": {"c": List(Value("string"))}}, "foo": Value("int64")}) dset = Dataset.from_dict(data, features=original_features) new_features = dset.features new_dset = Dataset.from_dict(data, features=new_features) self.assertEqual(original_features.type, new_features.type) self.assertDictEqual(dset[0], new_dset[0]) self.assertDictEqual(dset[:], new_dset[:]) def test_string_to_arrow_bijection_for_primitive_types(self): supported_pyarrow_datatypes = [ pa.time32("s"), pa.time64("us"), pa.timestamp("s"), pa.timestamp("ns", tz="America/New_York"), pa.date32(), pa.date64(), pa.duration("s"), pa.decimal128(10, 2), pa.decimal256(40, -3), pa.string(), pa.int32(), pa.float64(), pa.array([datetime.time(1, 1, 1)]).type, # arrow type: DataType(time64[us]) ] for dt in supported_pyarrow_datatypes: self.assertEqual(dt, string_to_arrow(_arrow_to_datasets_dtype(dt))) unsupported_pyarrow_datatypes = [pa.list_(pa.float64())] for dt in unsupported_pyarrow_datatypes: with self.assertRaises(ValueError): string_to_arrow(_arrow_to_datasets_dtype(dt)) supported_datasets_dtypes = [ "time32[s]", "timestamp[ns]", "timestamp[ns, tz=+07:30]", "duration[us]", "decimal128(30, -4)", "int32", "float64", ] for sdt in supported_datasets_dtypes: self.assertEqual(sdt, _arrow_to_datasets_dtype(string_to_arrow(sdt))) unsupported_datasets_dtypes = [ "time32[ns]", "timestamp[blob]", "timestamp[[ns]]", "timestamp[ns, tz=[ns]]", "duration[[us]]", "decimal20(30, -4)", "int", ] for sdt in unsupported_datasets_dtypes: with self.assertRaises(ValueError): string_to_arrow(sdt) def test_categorical_one_way(self): # Categorical types (aka dictionary types) need special handling as there isn't a bijection categorical_type = pa.dictionary(pa.int32(), pa.string()) self.assertEqual("string", _arrow_to_datasets_dtype(categorical_type)) def test_feature_named_type(self): """reference: issue #1110""" features = Features({"_type": Value("string")}) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_feature_named_self_as_kwarg(self): """reference: issue #5641""" features = Features(self=Value("string")) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_class_label_feature_with_no_labels(self): """reference: issue #4681""" features = Features({"label": ClassLabel(names=[])}) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_reorder_fields_as(self): features = Features( { "id": Value("string"), "document": { "title": Value("string"), "url": Value("string"), "html": Value("string"), "tokens": {"token": List(Value("string")), "is_html": List(Value("bool"))}, }, "question": { "text": Value("string"), "tokens": List(Value("string")), }, "annotations": { "id": List(Value("string")), "long_answer": List( { "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), } ), "short_answers": List( { "start_token": List(Value("int64")), "end_token": List(Value("int64")), "start_byte": List(Value("int64")), "end_byte": List(Value("int64")), "text": List(Value("string")), } ), "yes_no_answer": List(ClassLabel(names=["NO", "YES"])), }, } ) other = Features( # same but with a shuffled fields order { "id": Value("string"), "document": { "tokens": {"token": List(Value("string")), "is_html": List(Value("bool"))}, "title": Value("string"), "url": Value("string"), "html": Value("string"), }, "question": { "text": Value("string"), "tokens": List(Value("string")), }, "annotations": { "yes_no_answer": List(ClassLabel(names=["NO", "YES"])), "id": List(Value("string")), "long_answer": List( { "end_byte": Value("int64"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), } ), "short_answers": List( { "text": List(Value("string")), "start_token": List(Value("int64")), "end_token": List(Value("int64")), "start_byte": List(Value("int64")), "end_byte": List(Value("int64")), } ), }, } ) expected = Features( { "id": Value("string"), "document": { "tokens": {"token": List(Value("string")), "is_html": List(Value("bool"))}, "title": Value("string"), "url": Value("string"), "html": Value("string"), }, "question": { "text": Value("string"), "tokens": List(Value("string")), }, "annotations": { "yes_no_answer": List(ClassLabel(names=["NO", "YES"])), "id": List(Value("string")), "long_answer": List( { "end_byte": Value("int64"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), } ), "short_answers": List( { "text": List(Value("string")), "start_token": List(Value("int64")), "end_token": List(Value("int64")), "start_byte": List(Value("int64")), "end_byte": List(Value("int64")), } ), }, } ) reordered_features = features.reorder_fields_as(other) self.assertDictEqual(reordered_features, expected) self.assertEqual(reordered_features.type, other.type) self.assertEqual(reordered_features.type, expected.type) self.assertNotEqual(reordered_features.type, features.type) def test_flatten(self): features = Features({"foo": {"bar1": Value("int32"), "bar2": {"foobar": Value("string")}}}) _features = features.copy() flattened_features = features.flatten() assert flattened_features == {"foo.bar1": Value("int32"), "foo.bar2.foobar": Value("string")} assert features == _features, "calling flatten shouldn't alter the current features" def test_flatten_with_sequence(self): features = Features({"foo": {"bar": List({"my_value": Value("int32")})}}) _features = features.copy() flattened_features = features.flatten() assert flattened_features == {"foo.bar": List({"my_value": Value("int32")})} assert features == _features, "calling flatten shouldn't alter the current features" def test_features_dicts_are_synced(self): def assert_features_dicts_are_synced(features: Features): assert ( hasattr(features, "_column_requires_decoding") and features.keys() == features._column_requires_decoding.keys() ) features = Features({"foo": {"bar": List({"my_value": Value("int32")})}}) assert_features_dicts_are_synced(features) features["barfoo"] = Image() assert_features_dicts_are_synced(features) del features["barfoo"] assert_features_dicts_are_synced(features) features.update({"foobar": Value("string")}) assert_features_dicts_are_synced(features) features.pop("foobar") assert_features_dicts_are_synced(features) features.popitem() assert_features_dicts_are_synced(features) features.setdefault("xyz", Value("bool")) assert_features_dicts_are_synced(features) features.clear() assert_features_dicts_are_synced(features) def test_classlabel_init(tmp_path_factory): names = ["negative", "positive"] names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") with open(names_file, "w", encoding="utf-8") as f: f.write("\n".join(names)) classlabel = ClassLabel(names=names) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(names_file=names_file) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(num_classes=len(names), names=names) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(num_classes=len(names)) assert classlabel.names == [str(i) for i in range(len(names))] and classlabel.num_classes == len(names) with pytest.raises(ValueError): classlabel = ClassLabel(num_classes=len(names) + 1, names=names) with pytest.raises(ValueError): classlabel = ClassLabel(names=names, names_file=names_file) with pytest.raises(ValueError): classlabel = ClassLabel() with pytest.raises(TypeError): classlabel = ClassLabel(names=np.array(names)) def test_classlabel_str2int(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) for label in names: assert classlabel.str2int(label) == names.index(label) with pytest.raises(ValueError): classlabel.str2int("__bad_label_name__") with pytest.raises(ValueError): classlabel.str2int(1) with pytest.raises(ValueError): classlabel.str2int(None) def test_classlabel_int2str(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) for i in range(len(names)): assert classlabel.int2str(i) == names[i] with pytest.raises(ValueError): classlabel.int2str(len(names)) with pytest.raises(ValueError): classlabel.int2str(-1) with pytest.raises(ValueError): classlabel.int2str(None) def test_classlabel_cast_storage(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) # from integers arr = pa.array([0, 1, -1, -100], type=pa.int64()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1, -1, -100] arr = pa.array([0, 1, -1, -100], type=pa.int32()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1, -1, -100] arr = pa.array([3]) with pytest.raises(ValueError): classlabel.cast_storage(arr) # from strings arr = pa.array(["negative", "positive"]) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1] arr = pa.array(["__label_that_doesnt_exist__"]) with pytest.raises(ValueError): classlabel.cast_storage(arr) # from nulls arr = pa.array([None]) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [None] # from empty arr = pa.array([], pa.int64()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [] arr = pa.array([], pa.string()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [] @pytest.mark.parametrize("class_label_arg", ["names", "names_file"]) def test_class_label_to_and_from_dict(class_label_arg, tmp_path_factory): names = ["negative", "positive"] names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") with open(names_file, "w", encoding="utf-8") as f: f.write("\n".join(names)) if class_label_arg == "names": class_label = ClassLabel(names=names) elif class_label_arg == "names_file": class_label = ClassLabel(names_file=names_file) generated_class_label = generate_from_dict(asdict(class_label)) assert generated_class_label == class_label @pytest.mark.parametrize( "schema", [LargeList(Audio()), List(Audio())], ) def test_decode_nested_example_with_list_types(schema, monkeypatch): mock_decode_example = MagicMock() monkeypatch.setattr(Audio, "decode_example", mock_decode_example) audio_example = {"path": "dummy_audio_path"} _ = decode_nested_example(schema, [audio_example]) assert mock_decode_example.called assert mock_decode_example.call_args.args[0] == audio_example @pytest.mark.parametrize( "schema", [List(ClassLabel(names=["a", "b"])), LargeList(ClassLabel(names=["a", "b"]))], ) def test_encode_nested_example_with_list_types(schema): result = encode_nested_example(schema, ["b"]) assert result == [1] @pytest.mark.parametrize("inner_type", [Value("int32"), {"subcolumn": Value("int32")}]) def test_encode_nested_example_sequence_with_none(inner_type): schema = List(inner_type) obj = None result = encode_nested_example(schema, obj) assert result is None @pytest.mark.parametrize( "features_dict, example, expected_encoded_example", [ ({"col_1": ClassLabel(names=["a", "b"])}, {"col_1": "b"}, {"col_1": 1}), ({"col_1": List(ClassLabel(names=["a", "b"]))}, {"col_1": ["b"]}, {"col_1": [1]}), ({"col_1": LargeList(ClassLabel(names=["a", "b"]))}, {"col_1": ["b"]}, {"col_1": [1]}), ({"col_1": List(ClassLabel(names=["a", "b"]))}, {"col_1": ["b"]}, {"col_1": [1]}), ], ) def test_encode_example(features_dict, example, expected_encoded_example): features = Features(features_dict) encoded_example = features.encode_example(example) assert encoded_example == expected_encoded_example def test_encode_batch_with_example_with_empty_first_elem(): features = Features( { "x": List(List(ClassLabel(names=["a", "b"]))), } ) encoded_batch = features.encode_batch( { "x": [ [["a"], ["b"]], [[], ["b"]], ] } ) assert encoded_batch == {"x": [[[0], [1]], [[], [1]]]} def test_encode_column_dict_with_none(): features = Features( { "x": {"a": ClassLabel(names=["a", "b"]), "b": Value("int32")}, } ) encoded_column = features.encode_column([{"a": "a", "b": 1}, None], "x") assert encoded_column == [{"a": 0, "b": 1}, None] @pytest.mark.parametrize( "feature", [ Value("int32"), ClassLabel(num_classes=2), Translation(languages=["en", "fr"]), TranslationVariableLanguages(languages=["en", "fr"]), ], ) def test_dataset_feature_with_none(feature): data = {"col": [None]} features = Features({"col": feature}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"col"} assert item["col"] is None batch = dset[:1] assert len(batch) == 1 assert batch.keys() == {"col"} assert isinstance(batch["col"], list) and all(item is None for item in batch["col"]) column = dset["col"] assert len(column) == 1 assert isinstance(column, Column) and all(item is None for item in column) # nested tests data = {"col": [[None]]} features = Features({"col": List(feature)}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"col"} assert all(i is None for i in item["col"]) data = {"nested": [{"col": None}]} features = Features({"nested": {"col": feature}}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"nested"} assert item["nested"].keys() == {"col"} assert item["nested"]["col"] is None def iternumpy(key1, value1, value2): if value1.dtype != value2.dtype: # check only for dtype raise AssertionError( f"dtype of '{key1}' key for casted object: {value1.dtype} and expected object: {value2.dtype} not matching" ) def dict_diff(d1: dict, d2: dict): # check if 2 dictionaries are equal np.testing.assert_equal(d1, d2) # sanity check if dict values are equal or not for (k1, v1), (k2, v2) in zip(d1.items(), d2.items()): # check if their values have same dtype or not if isinstance(v1, dict): # nested dictionary case dict_diff(v1, v2) elif isinstance(v1, np.ndarray): # checks if dtype and value of np.ndarray is equal iternumpy(k1, v1, v2) elif isinstance(v1, list): for element1, element2 in zip(v1, v2): # iterates over all elements of list if isinstance(element1, dict): dict_diff(element1, element2) elif isinstance(element1, np.ndarray): iternumpy(k1, element1, element2) class CastToPythonObjectsTest(TestCase): def test_cast_to_python_objects_list(self): obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_tuple(self): obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} expected_obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_or_numpy(self): obj = {"col_1": [{"vec": np.arange(1, 4), "txt": "foo"}] * 3, "col_2": np.arange(1, 7).reshape(3, 2)} expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) def test_cast_to_python_objects_series(self): obj = { "col_1": pd.Series([{"vec": [1, 2, 3], "txt": "foo"}] * 3), "col_2": pd.Series([[1, 2], [3, 4], [5, 6]]), } expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_dataframe(self): obj = pd.DataFrame({"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}) expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_pandas_timestamp(self): obj = pd.Timestamp(2020, 1, 1) expected_obj = obj.to_pydatetime() casted_obj = cast_to_python_objects(obj) self.assertEqual(casted_obj, expected_obj) casted_obj = cast_to_python_objects(pd.Series([obj])) self.assertListEqual(casted_obj, [expected_obj]) casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) self.assertDictEqual(casted_obj, {"a": [expected_obj]}) def test_cast_to_python_objects_pandas_timedelta(self): obj = pd.Timedelta(seconds=1) expected_obj = obj.to_pytimedelta() casted_obj = cast_to_python_objects(obj) self.assertEqual(casted_obj, expected_obj) casted_obj = cast_to_python_objects(pd.Series([obj])) self.assertListEqual(casted_obj, [expected_obj]) casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) self.assertDictEqual(casted_obj, {"a": [expected_obj]}) @require_numpy1_on_windows @require_torch def test_cast_to_python_objects_torch(self): import torch obj = { "col_1": [{"vec": torch.tensor(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": torch.tensor(np.arange(1, 7).reshape(3, 2)), } expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @require_tf def test_cast_to_python_objects_tf(self): import tensorflow as tf obj = { "col_1": [{"vec": tf.constant(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": tf.constant(np.arange(1, 7).reshape(3, 2)), } expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @require_jax def test_cast_to_python_objects_jax(self): import jax.numpy as jnp obj = { "col_1": [{"vec": jnp.array(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": jnp.array(np.arange(1, 7).reshape(3, 2)), } assert obj["col_2"].dtype == jnp.int32 expected_obj = { "col_1": [{"vec": np.array([1, 2, 3], dtype=np.int32), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @patch("datasets.features.features._cast_to_python_objects", side_effect=_cast_to_python_objects) def test_dont_iterate_over_each_element_in_a_list(self, mocked_cast): obj = {"col_1": [[1, 2], [3, 4], [5, 6]]} cast_to_python_objects(obj) self.assertEqual(mocked_cast.call_count, 4) # 4 = depth of obj SIMPLE_FEATURES = [ Features(), Features({"a": Value("int32")}), Features({"a": Value("int32", id="my feature")}), Features({"a": Value("int32"), "b": Value("float64"), "c": Value("string")}), ] CUSTOM_FEATURES = [ Features({"label": ClassLabel(names=["negative", "positive"])}), Features({"array": Array2D(dtype="float32", shape=(4, 4))}), Features({"image": Image()}), Features({"audio": Audio()}), Features({"image": Image(decode=False)}), Features({"audio": Audio(decode=False)}), Features({"translation": Translation(["en", "fr"])}), Features({"translation": TranslationVariableLanguages(["en", "fr"])}), ] NESTED_FEATURES = [ Features({"foo": {}}), Features({"foo": {"bar": Value("int32")}}), Features({"foo": {"bar1": Value("int32"), "bar2": Value("float64")}}), Features({"foo": List(Value("int32"))}), Features({"foo": {"bar": List(Value("int32"))}}), Features({"foo": List({"bar": Value("int32")})}), Features({"foo": LargeList(Value("int32"))}), Features({"foo": LargeList({"bar": Value("int32")})}), ] NESTED_CUSTOM_FEATURES = [ Features({"foo": {"bar": ClassLabel(names=["negative", "positive"])}}), Features({"foo": List(ClassLabel(names=["negative", "positive"]))}), Features({"foo": List({"bar": ClassLabel(names=["negative", "positive"])})}), Features({"foo": LargeList(ClassLabel(names=["negative", "positive"]))}), Features({"foo": LargeList({"bar": ClassLabel(names=["negative", "positive"])})}), ] @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_dict_and_from_dict_round_trip(features: Features): features_dict = features.to_dict() assert isinstance(features_dict, dict) reloaded = Features.from_dict(features_dict) assert features == reloaded @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_yaml_list(features: Features): features_yaml_list = features._to_yaml_list() assert isinstance(features_yaml_list, list) reloaded = Features._from_yaml_list(features_yaml_list) assert features == reloaded @pytest.mark.parametrize( "features_dict, expected_features_dict", [ ({"col": [{"sub_col": Value("int32")}]}, {"col": [{"sub_col": Value("int32")}]}), ({"col": LargeList({"sub_col": Value("int32")})}, {"col": LargeList({"sub_col": Value("int32")})}), ({"col": {"sub_col": List(Value("int32"))}}, {"col.sub_col": List(Value("int32"))}), ], ) def test_features_flatten_with_list_types(features_dict, expected_features_dict): features = Features(features_dict) flattened_features = features.flatten() assert flattened_features == Features(expected_features_dict) @pytest.mark.parametrize( "deserialized_features_dict, expected_features_dict", [ ( {"col": {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "List"}}, {"col": List(Value("int32"))}, ), ( {"col": {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "LargeList"}}, {"col": LargeList(Value("int32"))}, ), ( {"col": {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "List"}}, {"col": List({"sub_col": Value("int32")})}, ), ( {"col": {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "LargeList"}}, {"col": LargeList({"sub_col": Value("int32")})}, ), ( {"col": {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "Sequence"}}, {"col": {"sub_col": List(Value("int32"))}}, ), ], ) def test_features_from_dict_with_list_types(deserialized_features_dict, expected_features_dict): features = Features.from_dict(deserialized_features_dict) assert features == Features(expected_features_dict) @pytest.mark.parametrize( "deserialized_feature_dict, expected_feature", [ ( {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "LargeList"}, LargeList(Value("int32")), ), ( {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "List"}, List(Value("int32")), ), ( {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "List"}, List({"sub_col": Value("int32")}), ), ( {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "LargeList"}, LargeList({"sub_col": Value("int32")}), ), ( {"sub_col": {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "List"}}, {"sub_col": List(Value("int32"))}, ), ], ) def test_generate_from_dict_with_list_types(deserialized_feature_dict, expected_feature): feature = generate_from_dict(deserialized_feature_dict) assert feature == expected_feature @pytest.mark.parametrize( "features_dict, expected_features_yaml_list", [ ({"col": LargeList(Value("int32"))}, [{"name": "col", "large_list": "int32"}]), ( {"col": LargeList({"sub_col": Value("int32")})}, [{"name": "col", "large_list": [{"dtype": "int32", "name": "sub_col"}]}], ), ], ) def test_features_to_yaml_list_with_large_list(features_dict, expected_features_yaml_list): features = Features(features_dict) features_yaml_list = features._to_yaml_list() assert features_yaml_list == expected_features_yaml_list @pytest.mark.parametrize( "features_yaml_list, expected_features_dict", [ ([{"name": "col", "large_list": "int32"}], {"col": LargeList(Value("int32"))}), ( [{"name": "col", "large_list": [{"dtype": "int32", "name": "sub_col"}]}], {"col": LargeList({"sub_col": Value("int32")})}, ), ], ) def test_features_from_yaml_list_with_large_list(features_yaml_list, expected_features_dict): features = Features._from_yaml_list(features_yaml_list) assert features == Features(expected_features_dict) @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_arrow_schema(features: Features): arrow_schema = features.arrow_schema assert isinstance(arrow_schema, pa.Schema) reloaded = Features.from_arrow_schema(arrow_schema) assert features == reloaded NESTED_COMPARISON = [ [ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})], [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})], ], [ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="null", id=None)})], [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})], ], [ [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="string", id=None)}}), ], [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="string", id=None)}}), ], ], [ [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="null", id=None)}}), ], [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="string", id=None)}}), ], ], ] @pytest.mark.parametrize("features", NESTED_COMPARISON) def test_features_alignment(features: tuple[list[Features], list[Features]]): inputs, expected = features _check_if_features_can_be_aligned(inputs) # Check that we can align, will raise otherwise. assert _align_features(inputs) == expected @pytest.mark.parametrize("dtype", [pa.int32, pa.string]) def test_features_from_arrow_schema_primitive_data_type(dtype): schema = pa.schema([("column_name", dtype())]) assert schema == Features.from_arrow_schema(schema).arrow_schema @pytest.mark.parametrize("scalar_dtype", [pa.int32, pa.string]) @pytest.mark.parametrize("list_dtype", [pa.list_, pa.large_list]) def test_features_from_arrow_schema_list_data_type(list_dtype, scalar_dtype): schema = pa.schema([("column_name", list_dtype(scalar_dtype()))]) assert schema == Features.from_arrow_schema(schema).arrow_schema @pytest.mark.parametrize( "feature, other_feature", [ (List(Value("int64")), List(Value("int64"))), (LargeList(Value("int64")), LargeList(Value("int64"))), (List(Value("int64")), List(Value("int64"))), ( List({"sub_col_1": Value("int64"), "sub_col_2": Value("int64")}), List({"sub_col_2": Value("int64"), "sub_col_1": Value("int64")}), ), ( LargeList({"sub_col_1": Value("int64"), "sub_col_2": Value("int64")}), LargeList({"sub_col_2": Value("int64"), "sub_col_1": Value("int64")}), ), ( {"sub_col_1": List(Value("int64")), "sub_col_2": List(Value("int64"))}, {"sub_col_2": List(Value("int64")), "sub_col_1": List(Value("int64"))}, ), ], ) def test_features_reorder_fields_as_with_list_types(feature, other_feature): features = Features({"col": feature}) other_features = Features({"col": other_feature}) new_features = features.reorder_fields_as(other_features) assert new_features.type == other_features.type @pytest.mark.parametrize( "feature, expected_arrow_data_type", [(Value("int64"), pa.int64), (Value("string"), pa.string)] ) def test_get_nested_type_with_scalar_feature(feature, expected_arrow_data_type): arrow_data_type = get_nested_type(feature) assert arrow_data_type == expected_arrow_data_type() @pytest.mark.parametrize( "scalar_feature, expected_arrow_primitive_data_type", [(Value("int64"), pa.int64), (Value("string"), pa.string)] ) @pytest.mark.parametrize( "list_feature, expected_arrow_nested_data_type", [(list_with, pa.list_), (LargeList, pa.large_list), (Sequence, pa.list_)], ) def test_get_nested_type_with_list_feature( list_feature, expected_arrow_nested_data_type, scalar_feature, expected_arrow_primitive_data_type ): feature = list_feature(scalar_feature) arrow_data_type = get_nested_type(feature) assert arrow_data_type == expected_arrow_nested_data_type(expected_arrow_primitive_data_type()) @pytest.mark.parametrize( "arrow_primitive_data_type, expected_feature", [(pa.int32, Value("int32")), (pa.string, Value("string"))] ) def test_generate_from_arrow_type_with_arrow_primitive_data_type(arrow_primitive_data_type, expected_feature): arrow_data_type = arrow_primitive_data_type() feature = generate_from_arrow_type(arrow_data_type) assert feature == expected_feature @pytest.mark.parametrize( "arrow_primitive_data_type, expected_scalar_feature", [(pa.int32, Value("int32")), (pa.string, Value("string"))] ) @pytest.mark.parametrize( "arrow_nested_data_type, expected_list_feature", [(pa.list_, Sequence), (pa.large_list, LargeList)] ) def test_generate_from_arrow_type_with_arrow_nested_data_type( arrow_nested_data_type, expected_list_feature, arrow_primitive_data_type, expected_scalar_feature ): arrow_data_type = arrow_nested_data_type(arrow_primitive_data_type()) feature = generate_from_arrow_type(arrow_data_type) expected_feature = expected_list_feature(expected_scalar_feature) assert feature == expected_feature @pytest.mark.parametrize( "schema", [[ClassLabel(names=["a", "b"])], LargeList(ClassLabel(names=["a", "b"])), List(ClassLabel(names=["a", "b"]))], ) def test_check_non_null_non_empty_recursive_with_list_types(schema): assert _check_non_null_non_empty_recursive([], schema) is False @pytest.mark.parametrize( "schema", [ [[ClassLabel(names=["a", "b"])]], LargeList(LargeList(ClassLabel(names=["a", "b"]))), List(List(ClassLabel(names=["a", "b"]))), ], ) def test_check_non_null_non_empty_recursive_with_nested_list_types(schema): assert _check_non_null_non_empty_recursive([[]], schema) is False @pytest.mark.parametrize("feature", [LargeList(Audio()), List(Audio())]) def test_require_decoding_with_list_types(feature): assert require_decoding(feature) @pytest.mark.parametrize("feature", [LargeList(Audio()), List(Audio())]) def test_require_storage_cast_with_list_types(feature): assert require_storage_cast(feature) @pytest.mark.parametrize("feature", [LargeList(Audio()), List(Audio())]) def test_require_storage_embed_with_list_types(feature): assert require_storage_embed(feature) @pytest.mark.parametrize( "feature, expected", [(List(Value("int32")), List(1)), (LargeList(Value("int32")), LargeList(1)), (List(Value("int32")), List(1))], ) def test_visit_with_list_types(feature, expected): def func(x): return 1 if isinstance(x, Value) else x result = _visit(feature, func) assert result == expected
datasets/tests/features/test_features.py/0
{ "file_path": "datasets/tests/features/test_features.py", "repo_id": "datasets", "token_count": 17867 }
112
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _check_text_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = TextDatasetReader(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_text_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ], ) def test_dataset_from_text_features(features, text_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"text": "string"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = TextDatasetReader(text_path, features=features, cache_dir=cache_dir).read() _check_text_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_text_split(split, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = TextDatasetReader(text_path, cache_dir=cache_dir, split=split).read() _check_text_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_text_path_type(path_type, text_path, tmp_path): if issubclass(path_type, str): path = text_path elif issubclass(path_type, list): path = [text_path] cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = TextDatasetReader(path, cache_dir=cache_dir).read() _check_text_dataset(dataset, expected_features) def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = TextDatasetReader({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ], ) def test_datasetdict_from_text_features(features, text_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"text": "string"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = TextDatasetReader({"train": text_path}, features=features, cache_dir=cache_dir).read() _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_text_split(split, text_path, tmp_path): if split: path = {split: text_path} else: split = "train" path = {"train": text_path, "test": text_path} cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = TextDatasetReader(path, cache_dir=cache_dir).read() _check_text_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys())
datasets/tests/io/test_text.py/0
{ "file_path": "datasets/tests/io/test_text.py", "repo_id": "datasets", "token_count": 1833 }
113
import asyncio import contextlib import copy import itertools import json import os import pickle import re import sys import tempfile import time from functools import partial from pathlib import Path from unittest import TestCase from unittest.mock import MagicMock, patch import numpy as np import numpy.testing as npt import pandas as pd import pyarrow as pa import pytest from absl.testing import parameterized from fsspec.core import strip_protocol from packaging import version import datasets.arrow_dataset from datasets import concatenate_datasets, interleave_datasets, load_from_disk from datasets.arrow_dataset import Dataset, transmit_format, update_metadata_with_features from datasets.dataset_dict import DatasetDict from datasets.features import ( Array2D, Array3D, ClassLabel, Features, Image, LargeList, List, Translation, TranslationVariableLanguages, Value, ) from datasets.info import DatasetInfo from datasets.iterable_dataset import IterableDataset from datasets.splits import NamedSplit from datasets.table import ConcatenationTable, InMemoryTable, MemoryMappedTable from datasets.utils.logging import INFO, get_logger from datasets.utils.py_utils import temp_seed from .utils import ( assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_dill_gt_0_3_2, require_jax, require_not_windows, require_numpy1_on_windows, require_pil, require_polars, require_pyspark, require_sqlalchemy, require_tf, require_torch, require_transformers, set_current_working_directory_to_temp_dir, ) class PickableMagicMock(MagicMock): def __reduce__(self): return MagicMock, () class Unpicklable: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __getstate__(self): raise pickle.PicklingError() def picklable_map_function(x): return {"id": int(x["filename"].split("_")[-1])} def picklable_map_function_with_indices(x, i): return {"id": i} def picklable_map_function_with_rank(x, r): return {"rank": r} def picklable_map_function_with_indices_and_rank(x, i, r): return {"id": i, "rank": r} def picklable_filter_function(x): return int(x["filename"].split("_")[-1]) < 10 def picklable_filter_function_with_rank(x, r): return r == 0 def assert_arrow_metadata_are_synced_with_dataset_features(dataset: Dataset): assert dataset.data.schema.metadata is not None assert b"huggingface" in dataset.data.schema.metadata metadata = json.loads(dataset.data.schema.metadata[b"huggingface"].decode()) assert "info" in metadata features = DatasetInfo.from_dict(metadata["info"]).features assert features is not None assert features == dataset.features assert features == Features.from_arrow_schema(dataset.data.schema) assert list(features) == dataset.data.column_names assert list(features) == list(dataset.features) IN_MEMORY_PARAMETERS = [ {"testcase_name": name, "in_memory": im} for im, name in [(True, "in_memory"), (False, "on_disk")] ] @parameterized.named_parameters(IN_MEMORY_PARAMETERS) class BaseDatasetTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures(self, caplog, set_sqlalchemy_silence_uber_warning): self._caplog = caplog def _create_dummy_dataset( self, in_memory: bool, tmp_dir: str, multiple_columns=False, array_features=False, nested_features=False, int_to_float=False, ) -> Dataset: assert int(multiple_columns) + int(array_features) + int(nested_features) < 2 if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": [False, True, False, True]} dset = Dataset.from_dict(data) elif array_features: data = { "col_1": [[[True, False], [False, True]]] * 4, # 2D "col_2": [[[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]] * 4, # 3D array "col_3": [[3, 2, 1, 0]] * 4, # List } features = Features( { "col_1": Array2D(shape=(2, 2), dtype="bool"), "col_2": Array3D(shape=(2, 2, 2), dtype="string"), "col_3": List(Value("int64")), } ) dset = Dataset.from_dict(data, features=features) elif nested_features: data = {"nested": [{"a": i, "x": i * 10, "c": i * 100} for i in range(1, 11)]} features = Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}}) dset = Dataset.from_dict(data, features=features) elif int_to_float: data = { "text": ["text1", "text2", "text3", "text4"], "labels": [[1, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 1], [0, 0, 0, 1, 0]], } dset = Dataset.from_dict(data) else: dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]}) if not in_memory: dset = self._to(in_memory, tmp_dir, dset) return dset def _to(self, in_memory, tmp_dir, *datasets): if in_memory: datasets = [dataset.map(keep_in_memory=True) for dataset in datasets] else: start = 0 while os.path.isfile(os.path.join(tmp_dir, f"dataset{start}.arrow")): start += 1 datasets = [ dataset.map(cache_file_name=os.path.join(tmp_dir, f"dataset{start + i}.arrow")) for i, dataset in enumerate(datasets) ] return datasets if len(datasets) > 1 else datasets[0] def test_dummy_dataset(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}), ) self.assertEqual(dset[0]["col_1"], 3) self.assertEqual(dset["col_1"][0], 3) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: self.assertDictEqual( dset.features, Features( { "col_1": Array2D(shape=(2, 2), dtype="bool"), "col_2": Array3D(shape=(2, 2, 2), dtype="string"), "col_3": List(Value("int64")), } ), ) self.assertEqual(dset[0]["col_2"], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]) self.assertEqual(dset["col_2"][0], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]) def test_dataset_getitem(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") self.assertEqual(dset[-1]["filename"], "my_name-train_29") self.assertEqual(dset["filename"][-1], "my_name-train_29") self.assertListEqual(dset[:2]["filename"], ["my_name-train_0", "my_name-train_1"]) self.assertListEqual(dset["filename"][:2], ["my_name-train_0", "my_name-train_1"]) self.assertEqual(dset[:-1]["filename"][-1], "my_name-train_28") self.assertEqual(dset["filename"][:-1][-1], "my_name-train_28") self.assertListEqual(dset[[0, -1]]["filename"], ["my_name-train_0", "my_name-train_29"]) self.assertListEqual(dset[range(0, -2, -1)]["filename"], ["my_name-train_0", "my_name-train_29"]) self.assertListEqual(dset[np.array([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"]) self.assertListEqual(dset[pd.Series([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"]) with dset.select(range(2)) as dset_subset: self.assertListEqual(dset_subset[-1:]["filename"], ["my_name-train_1"]) self.assertListEqual(dset_subset["filename"][-1:], ["my_name-train_1"]) def test_dummy_dataset_deepcopy(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset2 = copy.deepcopy(dset) # don't copy the underlying arrow data using memory self.assertEqual(len(dset2), 10) self.assertDictEqual(dset2.features, Features({"filename": Value("string")})) self.assertEqual(dset2[0]["filename"], "my_name-train_0") self.assertEqual(dset2["filename"][0], "my_name-train_0") del dset2 def test_dummy_dataset_pickle(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: tmp_file = os.path.join(tmp_dir, "dset.pt") with self._create_dummy_dataset(in_memory, tmp_dir).select(range(0, 10, 2)) as dset: with open(tmp_file, "wb") as f: pickle.dump(dset, f) with open(tmp_file, "rb") as f: with pickle.load(f) as dset: self.assertEqual(len(dset), 5) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir).select( range(0, 10, 2), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow") ) as dset: if not in_memory: dset._data.table = Unpicklable() dset._indices.table = Unpicklable() with open(tmp_file, "wb") as f: pickle.dump(dset, f) with open(tmp_file, "rb") as f: with pickle.load(f) as dset: self.assertEqual(len(dset), 5) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") def test_dummy_dataset_serialize(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with set_current_working_directory_to_temp_dir(): with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: dataset_path = "my_dataset" # rel path dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") expected = dset.to_dict() with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: dataset_path = os.path.join(tmp_dir, "my_dataset") # abs path dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir).select( range(10), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow") ) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir, nested_features=True) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual( dset.features, Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}}), ) self.assertDictEqual(dset[0]["nested"], {"a": 1, "c": 100, "x": 10}) self.assertDictEqual(dset["nested"][0], {"a": 1, "c": 100, "x": 10}) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path, num_shards=4) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 4) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path, num_proc=2) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 2) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path, num_shards=7, num_proc=2) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 7) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): max_shard_size = dset._estimate_nbytes() // 2 + 1 dset.save_to_disk(dataset_path, max_shard_size=max_shard_size) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 2) def test_dummy_dataset_load_from_disk(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: dataset_path = os.path.join(tmp_dir, "my_dataset") dset.save_to_disk(dataset_path) with load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") def test_restore_saved_format(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) dataset_path = os.path.join(tmp_dir, "my_dataset") dset.save_to_disk(dataset_path) with load_from_disk(dataset_path) as loaded_dset: self.assertEqual(dset.format, loaded_dset.format) def test_set_format_numpy_multiple_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint dset.set_format(type="numpy", columns=["col_1"]) self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], np.int64) self.assertEqual(dset[0]["col_1"].item(), 3) self.assertIsInstance(dset["col_1"][:], np.ndarray) self.assertListEqual(list(dset["col_1"][:].shape), [4]) np.testing.assert_array_equal(dset["col_1"][:], np.array([3, 2, 1, 0])) self.assertNotEqual(dset._fingerprint, fingerprint) dset.reset_format() with dset.formatted_as(type="numpy", columns=["col_1"]): self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], np.int64) self.assertEqual(dset[0]["col_1"].item(), 3) self.assertIsInstance(dset["col_1"][:], np.ndarray) self.assertListEqual(list(dset["col_1"][:].shape), [4]) np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0])) self.assertEqual(dset.format["type"], None) self.assertEqual(dset.format["format_kwargs"], {}) self.assertEqual(dset.format["columns"], dset.column_names) self.assertEqual(dset.format["output_all_columns"], False) dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") dset.set_format(type="numpy", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0]), 2) self.assertIsInstance(dset[0]["col_2"], np.str_) self.assertEqual(dset[0]["col_2"].item(), "a") @require_numpy1_on_windows @require_torch def test_set_format_torch(self, in_memory): import torch with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="torch", columns=["col_1"]) self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], torch.Tensor) self.assertIsInstance(dset["col_1"][:], torch.Tensor) self.assertListEqual(list(dset[0]["col_1"].shape), []) self.assertEqual(dset[0]["col_1"].item(), 3) dset.set_format(type="torch", columns=["col_1"], output_all_columns=True) self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") dset.set_format(type="torch") self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_1"], torch.Tensor) self.assertIsInstance(dset["col_1"][:], torch.Tensor) self.assertListEqual(list(dset[0]["col_1"].shape), []) self.assertEqual(dset[0]["col_1"].item(), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") self.assertIsInstance(dset[0]["col_3"], torch.Tensor) self.assertIsInstance(dset["col_3"][:], torch.Tensor) self.assertListEqual(list(dset[0]["col_3"].shape), []) @require_tf def test_set_format_tf(self, in_memory): import tensorflow as tf with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="tensorflow", columns=["col_1"]) self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], tf.Tensor) self.assertListEqual(list(dset[0]["col_1"].shape), []) self.assertEqual(dset[0]["col_1"].numpy().item(), 3) dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True) self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") dset.set_format(type="tensorflow", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0]), 2) self.assertEqual(dset[0]["col_2"].numpy().decode("utf-8"), "a") def test_set_format_pandas(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="pandas", columns=["col_1"]) self.assertEqual(len(dset[0].columns), 1) self.assertIsInstance(dset[0], pd.DataFrame) self.assertListEqual(list(dset[0].shape), [1, 1]) self.assertEqual(dset[0]["col_1"].item(), 3) dset.set_format(type="pandas", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0].columns), 2) self.assertEqual(dset[0]["col_2"].item(), "a") @require_polars def test_set_format_polars(self, in_memory): import polars as pl with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="polars", columns=["col_1"]) self.assertEqual(len(dset[0].columns), 1) self.assertIsInstance(dset[0], pl.DataFrame) self.assertListEqual(list(dset[0].shape), [1, 1]) self.assertEqual(dset[0]["col_1"].item(), 3) dset.set_format(type="polars", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0].columns), 2) self.assertEqual(dset[0]["col_2"].item(), "a") def test_set_transform(self, in_memory): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_transform(transform=transform, columns=["col_1"]) self.assertEqual(dset.format["type"], "custom") self.assertEqual(len(dset[0].keys()), 1) self.assertEqual(dset[0]["col_1"], "3") self.assertEqual(dset[:2]["col_1"], ["3", "2"]) self.assertEqual(dset["col_1"][:2], ["3", "2"]) prev_format = dset.format dset.set_format(**dset.format) self.assertEqual(prev_format, dset.format) dset.set_transform(transform=transform, columns=["col_1", "col_2"]) self.assertEqual(len(dset[0].keys()), 2) self.assertEqual(dset[0]["col_2"], "A") def test_transmit_format(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: transform = datasets.arrow_dataset.transmit_format(lambda x: x) # make sure identity transform doesn't apply unnecessary format self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) dset.set_format(**dset.format) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) # check lists comparisons dset.set_format(columns=["col_1"]) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) dset.set_format(columns=["col_1", "col_2"]) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) dset.set_format("numpy", columns=["col_1", "col_2"]) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) def test_cast(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: features = dset.features features["col_1"] = Value("float64") features = Features({k: features[k] for k in list(features)[::-1]}) fingerprint = dset._fingerprint # TODO: with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): with dset.cast(features) as casted_dset: self.assertEqual(casted_dset.num_columns, 3) self.assertEqual(casted_dset.features["col_1"], Value("float64")) self.assertIsInstance(casted_dset[0]["col_1"], float) self.assertNotEqual(casted_dset._fingerprint, fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) def test_class_encode_column(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with self.assertRaises(ValueError): dset.class_encode_column(column="does not exist") with dset.class_encode_column("col_1") as casted_dset: self.assertIsInstance(casted_dset.features["col_1"], ClassLabel) self.assertListEqual(casted_dset.features["col_1"].names, ["0", "1", "2", "3"]) self.assertListEqual(casted_dset["col_1"][:], [3, 2, 1, 0]) self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) with dset.class_encode_column("col_2") as casted_dset: self.assertIsInstance(casted_dset.features["col_2"], ClassLabel) self.assertListEqual(casted_dset.features["col_2"].names, ["a", "b", "c", "d"]) self.assertListEqual(casted_dset["col_2"][:], [0, 1, 2, 3]) self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) with dset.class_encode_column("col_3") as casted_dset: self.assertIsInstance(casted_dset.features["col_3"], ClassLabel) self.assertListEqual(casted_dset.features["col_3"].names, ["False", "True"]) self.assertListEqual(casted_dset["col_3"][:], [0, 1, 0, 1]) self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) # Test raises if feature is an array / sequence with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: for column in dset.column_names: with self.assertRaises(ValueError): dset.class_encode_column(column) def test_remove_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.remove_columns(column_names="col_1") as new_dset: self.assertEqual(new_dset.num_columns, 2) self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.remove_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset: self.assertEqual(new_dset.num_columns, 0) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset._format_columns = ["col_1", "col_2", "col_3"] with dset.remove_columns(column_names=["col_1"]) as new_dset: self.assertListEqual(new_dset._format_columns, ["col_2", "col_3"]) self.assertEqual(new_dset.num_columns, 2) self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) def test_rename_column(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.rename_column(original_column_name="col_1", new_column_name="new_name") as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"]) self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) def test_rename_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.rename_columns({"col_1": "new_name"}) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"]) self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) with dset.rename_columns({"col_1": "new_name", "col_2": "new_name2"}) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["new_name", "new_name2", "col_3"]) self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) # Original column not in dataset with self.assertRaises(ValueError): dset.rename_columns({"not_there": "new_name"}) # Empty new name with self.assertRaises(ValueError): dset.rename_columns({"col_1": ""}) # Duplicates with self.assertRaises(ValueError): dset.rename_columns({"col_1": "new_name", "col_2": "new_name"}) def test_select_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.select_columns(column_names=[]) as new_dset: self.assertEqual(new_dset.num_columns, 0) self.assertListEqual(list(new_dset.column_names), []) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.select_columns(column_names="col_1") as new_dset: self.assertEqual(new_dset.num_columns, 1) self.assertListEqual(list(new_dset.column_names), ["col_1"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.select_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.select_columns(column_names=["col_3", "col_2", "col_1"]) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["col_3", "col_2", "col_1"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset._format_columns = ["col_1", "col_2", "col_3"] with dset.select_columns(column_names=["col_1"]) as new_dset: self.assertListEqual(new_dset._format_columns, ["col_1"]) self.assertEqual(new_dset.num_columns, 1) self.assertListEqual(list(new_dset.column_names), ["col_1"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) def test_concatenate(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"][:], [0, 1, 2, 3, 4, 5, 6, 7]) self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2") del dset1, dset2, dset3 def test_concatenate_formatted(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1.set_format("numpy") with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: self.assertEqual(dset_concat.format["type"], None) dset2.set_format("numpy") dset3.set_format("numpy") with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: self.assertEqual(dset_concat.format["type"], "numpy") del dset1, dset2, dset3 def test_concatenate_with_indices(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7, 8]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1, dset2, dset3 = dset1.select([2, 1, 0]), dset2.select([2, 1, 0]), dset3 with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"][:], [6, 7, 8, 5, 4, 3, 2, 1, 0]) # in_memory = False: # 3 cache files for the dset_concat._data table # no cache file for the indices because it's in memory # in_memory = True: # no cache files since both dset_concat._data and dset_concat._indices are in memory self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") dset1 = dset1.rename_columns({"id": "id1"}) dset2 = dset2.rename_columns({"id": "id2"}) dset3 = dset3.rename_columns({"id": "id3"}) with concatenate_datasets([dset1, dset2, dset3], axis=1) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3)) self.assertEqual(len(dset_concat), len(dset1)) self.assertListEqual(dset_concat["id1"][:], [2, 1, 0]) self.assertListEqual(dset_concat["id2"][:], [5, 4, 3]) self.assertListEqual(dset_concat["id3"][:], [6, 7, 8]) # in_memory = False: # 3 cache files for the dset_concat._data table # no cache file for the indices because it's None # in_memory = True: # no cache files since dset_concat._data is in memory and dset_concat._indices is None self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) self.assertIsNone(dset_concat._indices) self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2") with concatenate_datasets([dset1], axis=1) as dset_concat: self.assertEqual(len(dset_concat), len(dset1)) self.assertListEqual(dset_concat["id1"][:], [2, 1, 0]) # in_memory = False: # 1 cache file for the dset_concat._data table # no cache file for the indices because it's in memory # in_memory = True: # no cache files since both dset_concat._data and dset_concat._indices are in memory self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 1) self.assertTrue(dset_concat._indices == dset1._indices) self.assertEqual(dset_concat.info.description, "Dataset1") del dset1, dset2, dset3 def test_concatenate_with_indices_from_disk(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1, dset2, dset3 = ( dset1.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")), dset2.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")), dset3.select([1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow")), ) with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"][:], [7, 6, 5, 4, 3, 2, 1, 0]) # in_memory = False: # 3 cache files for the dset_concat._data table, and 1 for the dset_concat._indices_table # There is only 1 for the indices tables (i1.arrow) # Indeed, the others are brought to memory since an offset is applied to them. # in_memory = True: # 1 cache file for i1.arrow since both dset_concat._data and dset_concat._indices are in memory self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 3 + 1) self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") del dset1, dset2, dset3 def test_concatenate_pickle(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7], "foo": ["bar", "bar"]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) schema = dset1.data.schema # mix from in-memory and on-disk datasets dset1, dset2 = self._to(in_memory, tmp_dir, dset1, dset2) dset3 = self._to(not in_memory, tmp_dir, dset3) dset1, dset2, dset3 = ( dset1.select( [2, 1, 0], keep_in_memory=in_memory, indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow") if not in_memory else None, ), dset2.select( [2, 1, 0], keep_in_memory=in_memory, indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow") if not in_memory else None, ), dset3.select( [1, 0], keep_in_memory=in_memory, indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow") if not in_memory else None, ), ) dset3 = dset3.rename_column("foo", "new_foo") dset3 = dset3.remove_columns("new_foo") if in_memory: dset3._data.table = Unpicklable(schema=schema) else: dset1._data.table, dset2._data.table = Unpicklable(schema=schema), Unpicklable(schema=schema) dset1, dset2, dset3 = (pickle.loads(pickle.dumps(d)) for d in (dset1, dset2, dset3)) with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: if not in_memory: dset_concat._data.table = Unpicklable(schema=schema) with pickle.loads(pickle.dumps(dset_concat)) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"][:], [7, 6, 5, 4, 3, 2, 1, 0]) # in_memory = True: 1 cache file for dset3 # in_memory = False: 2 caches files for dset1 and dset2, and 1 cache file for i1.arrow self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 2 + 1) self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") del dset1, dset2, dset3 def test_repeat(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: repeated_dset = dset.repeat(3) column_values_dict = {col: dset[col] for col in dset.column_names} for col, single_values in column_values_dict.items(): self.assertListEqual(repeated_dset[col][:], single_values[:] * 3) del repeated_dset with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with pytest.raises(ValueError): dset.repeat(None) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: repeated_dset = dset.repeat(0) self.assertEqual(len(repeated_dset), 0) del repeated_dset with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: repeated_dset = dset.repeat(-1) self.assertEqual(len(repeated_dset), 0) del repeated_dset def test_flatten(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": {"c": List(Value("string"))}}, "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.b.c", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.b.c", "foo"]) self.assertDictEqual( dset.features, Features({"a.b.c": List(Value("string")), "foo": Value("int64")}) ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"en": "Thank you", "fr": "Merci"}] * 10, "foo": [1] * 10}, features=Features({"a": Translation(languages=["en", "fr"]), "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.en", "a.fr", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.en", "a.fr", "foo"]) self.assertDictEqual( dset.features, Features({"a.en": Value("string"), "a.fr": Value("string"), "foo": Value("int64")}), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"en": "the cat", "fr": ["le chat", "la chatte"], "de": "die katze"}] * 10, "foo": [1] * 10}, features=Features( { "a": TranslationVariableLanguages(languages=["en", "fr", "de"]), "foo": Value("int64"), } ), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.language", "a.translation", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.language", "a.translation", "foo"]) self.assertDictEqual( dset.features, Features( { "a.language": List(Value("string")), "a.translation": List(Value("string")), "foo": Value("int64"), } ), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) @require_pil def test_flatten_complex_image(self, in_memory): # decoding turned on with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10}, features=Features({"a": Image(), "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a", "foo"]) self.assertDictEqual(dset.features, Features({"a": Image(), "foo": Value("int64")})) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) # decoding turned on + nesting with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Image()}, "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.b", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.b", "foo"]) self.assertDictEqual(dset.features, Features({"a.b": Image(), "foo": Value("int64")})) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) # decoding turned off with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10}, features=Features({"a": Image(decode=False), "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.bytes", "a.path", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.bytes", "a.path", "foo"]) self.assertDictEqual( dset.features, Features({"a.bytes": Value("binary"), "a.path": Value("string"), "foo": Value("int64")}), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) # decoding turned off + nesting with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Image(decode=False)}, "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.b.bytes", "a.b.path", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.b.bytes", "a.b.path", "foo"]) self.assertDictEqual( dset.features, Features( { "a.b.bytes": Value("binary"), "a.b.path": Value("string"), "foo": Value("int64"), } ), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) def test_map(self, in_memory): # standard with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) fingerprint = dset._fingerprint with dset.map( lambda x: {"name": x["filename"][:-2], "id": int(x["filename"].split("_")[-1])} ) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) self.assertListEqual(dset_test["id"][:], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) # no transform with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(lambda x: None) as dset_test: self.assertEqual(len(dset_test), 30) self.assertEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) # with indices with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map( lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True ) as dset_test_with_indices: self.assertEqual(len(dset_test_with_indices), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_with_indices.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) self.assertListEqual(dset_test_with_indices["id"][:], list(range(30))) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) # interrupted with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: def func(x, i): if i == 4: raise KeyboardInterrupt() return {"name": x["filename"][:-2], "id": i} tmp_file = os.path.join(tmp_dir, "test.arrow") self.assertRaises( KeyboardInterrupt, dset.map, function=func, with_indices=True, cache_file_name=tmp_file, writer_batch_size=2, ) self.assertFalse(os.path.exists(tmp_file)) with dset.map( lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True, cache_file_name=tmp_file, writer_batch_size=2, ) as dset_test_with_indices: self.assertTrue(os.path.exists(tmp_file)) self.assertEqual(len(dset_test_with_indices), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_with_indices.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) self.assertListEqual(dset_test_with_indices["id"][:], list(range(30))) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) # formatted with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format("numpy", columns=["col_1"]) with dset.map(lambda x: {"col_1_plus_one": x["col_1"] + 1}) as dset_test: self.assertEqual(len(dset_test), 4) self.assertEqual(dset_test.format["type"], "numpy") self.assertIsInstance(dset_test["col_1"][:], np.ndarray) self.assertIsInstance(dset_test["col_1_plus_one"][:], np.ndarray) self.assertListEqual(sorted(dset_test[0].keys()), ["col_1", "col_1_plus_one"]) self.assertListEqual(sorted(dset_test.column_names), ["col_1", "col_1_plus_one", "col_2", "col_3"]) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) # casting int labels to float labels with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, int_to_float=True) as dset: def _preprocess(examples): result = {"labels": [list(map(float, labels)) for labels in examples["labels"]]} return result with dset.map( _preprocess, remove_columns=["labels", "text"], batched=True, try_original_type=True ) as dset_test: for labels in dset_test["labels"]: for label in labels: self.assertIsInstance(label, int) with dset.map( _preprocess, remove_columns=["labels", "text"], batched=True, try_original_type=False ) as dset_test: for labels in dset_test["labels"]: for label in labels: self.assertIsInstance(label, float) def test_map_multiprocessing(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # standard with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) fingerprint = dset._fingerprint with dset.map(picklable_map_function, num_proc=2) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) if not in_memory: self.assertIn("_of_00002.arrow", dset_test.cache_files[0]["filename"]) self.assertListEqual(dset_test["id"][:], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # num_proc > num rows with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) fingerprint = dset._fingerprint with dset.select([0, 1], keep_in_memory=True).map(picklable_map_function, num_proc=10) as dset_test: self.assertEqual(len(dset_test), 2) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) self.assertListEqual(dset_test["id"][:], list(range(2))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # with_indices with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(picklable_map_function_with_indices, num_proc=3, with_indices=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) self.assertListEqual(dset_test["id"][:], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # with_rank with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(picklable_map_function_with_rank, num_proc=3, with_rank=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "rank": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) self.assertListEqual(dset_test["rank"][:], [0] * 10 + [1] * 10 + [2] * 10) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # with_indices AND with_rank with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map( picklable_map_function_with_indices_and_rank, num_proc=3, with_indices=True, with_rank=True ) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64"), "rank": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) self.assertListEqual(dset_test["id"][:], list(range(30))) self.assertListEqual(dset_test["rank"][:], [0] * 10 + [1] * 10 + [2] * 10) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # new_fingerprint new_fingerprint = "foobar" invalid_new_fingerprint = "foobar/hey" with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint self.assertRaises( ValueError, dset.map, picklable_map_function, num_proc=2, new_fingerprint=invalid_new_fingerprint ) with dset.map(picklable_map_function, num_proc=2, new_fingerprint=new_fingerprint) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) self.assertListEqual(dset_test["id"][:], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) self.assertEqual(dset_test._fingerprint, new_fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) file_names = sorted(Path(cache_file["filename"]).name for cache_file in dset_test.cache_files) for i, file_name in enumerate(file_names): self.assertIn(new_fingerprint + f"_{i:05d}", file_name) with tempfile.TemporaryDirectory() as tmp_dir: # lambda (requires multiprocess from pathos) with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(lambda x: {"id": int(x["filename"].split("_")[-1])}, num_proc=2) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) self.assertListEqual(dset_test["id"][:], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) def test_map_new_features(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: features = Features({"filename": Value("string"), "label": ClassLabel(names=["positive", "negative"])}) with dset.map( lambda x, i: {"label": i % 2}, with_indices=True, features=features ) as dset_test_with_indices: self.assertEqual(len(dset_test_with_indices), 30) self.assertDictEqual( dset_test_with_indices.features, features, ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) def test_map_batched(self, in_memory): def map_batched(example): return {"filename_new": [x + "_extension" for x in example["filename"]]} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(map_batched, batched=True) as dset_test_batched: self.assertEqual(len(dset_test_batched), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) # change batch size and drop the last batch with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: batch_size = 4 with dset.map( map_batched, batched=True, batch_size=batch_size, drop_last_batch=True ) as dset_test_batched: self.assertEqual(len(dset_test_batched), 30 // batch_size * batch_size) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.formatted_as("numpy", columns=["filename"]): with dset.map(map_batched, batched=True) as dset_test_batched: self.assertEqual(len(dset_test_batched), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) def map_batched_with_indices(example, idx): return {"filename_new": [x + "_extension_" + str(idx) for x in example["filename"]]} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map( map_batched_with_indices, batched=True, with_indices=True ) as dset_test_with_indices_batched: self.assertEqual(len(dset_test_with_indices_batched), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_with_indices_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices_batched) # check remove columns for even if the function modifies input in-place def map_batched_modifying_inputs_inplace(example): result = {"filename_new": [x + "_extension" for x in example["filename"]]} del example["filename"] return result with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map( map_batched_modifying_inputs_inplace, batched=True, remove_columns="filename" ) as dset_test_modifying_inputs_inplace: self.assertEqual(len(dset_test_modifying_inputs_inplace), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_modifying_inputs_inplace.features, Features({"filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_modifying_inputs_inplace) def test_map_nested(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"field": ["a", "b"]}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(lambda example: {"otherfield": {"capital": example["field"].capitalize()}}) as dset: with dset.map(lambda example: {"otherfield": {"append_x": example["field"] + "x"}}) as dset: self.assertEqual(dset[0], {"field": "a", "otherfield": {"append_x": "ax"}}) def test_map_return_example_as_dict_value(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"en": ["aa", "bb"], "fr": ["cc", "dd"]}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(lambda example: {"translation": example}) as dset: self.assertEqual(dset[0], {"en": "aa", "fr": "cc", "translation": {"en": "aa", "fr": "cc"}}) def test_map_fn_kwargs(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"id": range(10)}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fn_kwargs = {"offset": 3} with dset.map( lambda example, offset: {"id+offset": example["id"] + offset}, fn_kwargs=fn_kwargs ) as mapped_dset: assert mapped_dset["id+offset"] == list(range(3, 13)) with dset.map( lambda id, offset: {"id+offset": id + offset}, fn_kwargs=fn_kwargs, input_columns="id" ) as mapped_dset: assert mapped_dset["id+offset"] == list(range(3, 13)) with dset.map( lambda id, i, offset: {"id+offset": i + offset}, fn_kwargs=fn_kwargs, input_columns="id", with_indices=True, ) as mapped_dset: assert mapped_dset["id+offset"] == list(range(3, 13)) def test_map_caching(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with patch( "datasets.arrow_dataset.Dataset._map_single", autospec=Dataset._map_single, side_effect=Dataset._map_single, ) as mock_map_single: with dset.map(lambda x: {"foo": "bar"}) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) self.assertEqual(mock_map_single.call_count, 1) with dset.map(lambda x: {"foo": "bar"}) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory)) self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory) self.assertEqual(mock_map_single.call_count, 2 if in_memory else 1) with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(lambda x: {"foo": "bar"}) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) with dset.map(lambda x: {"foo": "bar"}, load_from_cache_file=False) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory)) self.assertNotIn("Loading cached processed dataset", self._caplog.text) with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with patch( "datasets.arrow_dataset.Pool", new_callable=PickableMagicMock, side_effect=datasets.arrow_dataset.Pool, ) as mock_pool: with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) self.assertEqual(mock_pool.call_count, 1) with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertTrue( (len(re.findall("Loading cached processed dataset", self._caplog.text)) == 1) ^ in_memory ) self.assertEqual(mock_pool.call_count, 2 if in_memory else 1) with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) with dset.map(lambda x: {"foo": "bar"}, num_proc=2, load_from_cache_file=False) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), (1 - int(in_memory)) * 2) self.assertNotIn("Loading cached processed dataset", self._caplog.text) if not in_memory: try: self._caplog.clear() with tempfile.TemporaryDirectory() as tmp_dir: with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: datasets.disable_caching() with dset.map(lambda x: {"foo": "bar"}) as dset_test1: with dset.map(lambda x: {"foo": "bar"}) as dset_test2: self.assertNotEqual(dset_test1.cache_files, dset_test2.cache_files) self.assertEqual(len(dset_test1.cache_files), 1) self.assertEqual(len(dset_test2.cache_files), 1) self.assertNotIn("Loading cached processed dataset", self._caplog.text) # make sure the arrow files are going to be removed self.assertIn( Path(tempfile.gettempdir()), Path(dset_test1.cache_files[0]["filename"]).parents, ) self.assertIn( Path(tempfile.gettempdir()), Path(dset_test2.cache_files[0]["filename"]).parents, ) finally: datasets.enable_caching() def test_suffix_template_format(self, in_memory): with ( tempfile.TemporaryDirectory() as tmp_dir, self._caplog.at_level(INFO, logger=get_logger().name), self._create_dummy_dataset(in_memory, tmp_dir) as dset, self.assertRaises(ValueError) as e, dset.map(lambda x: {"foo": "bar"}, suffix_template="_{}_of_{}"), ): self.assertIn( "suffix_template must contain exactly the fields 'rank' and 'num_proc', got: ", e.exception.args[0], ) def test_cache_file_name_no_ext_raises_error(self, in_memory): with ( tempfile.TemporaryDirectory() as tmp_dir, self._caplog.at_level(INFO, logger=get_logger().name), self._create_dummy_dataset(in_memory, tmp_dir) as dset, self.assertRaises(ValueError) as e, dset.map(lambda x: {"foo": "bar"}, cache_file_name=os.path.join(tmp_dir, "train")), ): self.assertIn("Expected cache_file_name to have an extension, but got: ", e.exception.args[0]) def test_map_caching_reuses_cache_with_different_num_proc(self, in_memory): for dset_test1_num_proc, dset_test2_num_proc in [(1, 2), (2, 1)]: with ( tempfile.TemporaryDirectory() as tmp_dir, self._caplog.at_level(INFO, logger=get_logger().name), self._create_dummy_dataset(in_memory, tmp_dir) as dset, ): # cannot mock _map_single here because mock objects aren't picklable # see: https://github.com/python/cpython/issues/100090 self._caplog.clear() with dset.map(lambda x: {"foo": "bar"}, num_proc=dset_test1_num_proc) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) self.assertFalse("Loading cached processed dataset" in self._caplog.text) self._caplog.clear() with dset.map(lambda x: {"foo": "bar"}, num_proc=dset_test2_num_proc) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), 0 if in_memory else dset_test1_num_proc) self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory) def test_map_caching_partial_remap(self, in_memory): with ( tempfile.TemporaryDirectory() as tmp_dir, self._caplog.at_level(INFO, logger=get_logger().name), self._create_dummy_dataset(in_memory, tmp_dir) as dset, ): # cannot mock _map_single here because mock objects aren't picklable # see: https://github.com/python/cpython/issues/100090 self._caplog.clear() dset_test1_num_proc = 4 with dset.map(lambda x: {"foo": "bar"}, num_proc=dset_test1_num_proc) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) self.assertFalse("Loading cached processed dataset" in self._caplog.text) num_files_to_delete = 2 expected_msg = ( f"Reprocessing {num_files_to_delete}/{dset_test1_num_proc} shards because some of them " "were missing from the cache." ) for cache_file in dset_test1_data_files[num_files_to_delete:]: os.remove(cache_file["filename"]) self._caplog.clear() dset_test2_num_proc = None with dset.map(lambda x: {"foo": "bar"}, num_proc=dset_test2_num_proc) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), 0 if in_memory else dset_test1_num_proc) self.assertTrue((expected_msg in self._caplog.text) ^ in_memory) self.assertFalse(f"Spawning {dset_test1_num_proc} processes" in self._caplog.text) self.assertFalse(f"Spawning {dset_test2_num_proc} processes" in self._caplog.text) for cache_file in dset_test1_data_files[num_files_to_delete:]: os.remove(cache_file["filename"]) self._caplog.clear() dset_test2_num_proc = 1 with dset.map(lambda x: {"foo": "bar"}, num_proc=dset_test2_num_proc) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), 0 if in_memory else dset_test1_num_proc) self.assertTrue((expected_msg in self._caplog.text) ^ in_memory) self.assertFalse(f"Spawning {dset_test1_num_proc} process" in self._caplog.text) self.assertTrue(f"Spawning {dset_test2_num_proc} process" in self._caplog.text) for cache_file in dset_test1_data_files[num_files_to_delete:]: os.remove(cache_file["filename"]) self._caplog.clear() dset_test3_num_proc = 3 with dset.map(lambda x: {"foo": "bar"}, num_proc=dset_test3_num_proc) as dset_test3: self.assertEqual(dset_test1_data_files, dset_test3.cache_files) self.assertEqual(len(dset_test3.cache_files), 0 if in_memory else dset_test1_num_proc) self.assertTrue((expected_msg in self._caplog.text) ^ in_memory) self.assertTrue(f"Spawning {dset_test3_num_proc} processes" in self._caplog.text) def test_map_return_pa_table(self, in_memory): def func_return_single_row_pa_table(x): return pa.table({"id": [0], "text": ["a"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pa_table) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Batched def func_return_single_row_pa_table_batched(x): batch_size = len(x[next(iter(x))]) return pa.table({"id": [0] * batch_size, "text": ["a"] * batch_size}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pa_table_batched, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Error when returning a table with more than one row in the non-batched mode def func_return_multi_row_pa_table(x): return pa.table({"id": [0, 1], "text": ["a", "b"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertRaises(ValueError, dset.map, func_return_multi_row_pa_table) # arrow formatted dataset def func_return_table_from_expression(t): import pyarrow.dataset as pds return pds.dataset(t).to_table( columns={"new_column": pds.field("")._call("ascii_capitalize", [pds.field("filename")])} ) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.with_format("arrow").map(func_return_table_from_expression, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"new_column": Value("string")}), ) self.assertEqual(dset_test.with_format(None)[0]["new_column"], dset[0]["filename"].capitalize()) def test_map_return_pd_dataframe(self, in_memory): def func_return_single_row_pd_dataframe(x): return pd.DataFrame({"id": [0], "text": ["a"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pd_dataframe) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Batched def func_return_single_row_pd_dataframe_batched(x): batch_size = len(x[next(iter(x))]) return pd.DataFrame({"id": [0] * batch_size, "text": ["a"] * batch_size}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pd_dataframe_batched, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Error when returning a table with more than one row in the non-batched mode def func_return_multi_row_pd_dataframe(x): return pd.DataFrame({"id": [0, 1], "text": ["a", "b"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertRaises(ValueError, dset.map, func_return_multi_row_pd_dataframe) @require_polars def test_map_return_pl_dataframe(self, in_memory): import polars as pl def func_return_single_row_pl_dataframe(x): return pl.DataFrame({"id": [0], "text": ["a"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pl_dataframe) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("large_string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Batched def func_return_single_row_pl_dataframe_batched(x): batch_size = len(x[next(iter(x))]) return pl.DataFrame({"id": [0] * batch_size, "text": ["a"] * batch_size}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pl_dataframe_batched, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("large_string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Error when returning a table with more than one row in the non-batched mode def func_return_multi_row_pl_dataframe(x): return pl.DataFrame({"id": [0, 1], "text": ["a", "b"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertRaises(ValueError, dset.map, func_return_multi_row_pl_dataframe) @require_numpy1_on_windows @require_torch def test_map_torch(self, in_memory): import torch def func(example): return {"tensor": torch.tensor([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": List(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) @require_tf def test_map_tf(self, in_memory): import tensorflow as tf def func(example): return {"tensor": tf.constant([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": List(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) @require_jax def test_map_jax(self, in_memory): import jax.numpy as jnp def func(example): return {"tensor": jnp.asarray([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": List(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) def test_map_numpy(self, in_memory): def func(example): return {"tensor": np.array([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": List(Value("float64"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) @require_numpy1_on_windows @require_torch def test_map_tensor_batched(self, in_memory): import torch def func(batch): return {"tensor": torch.tensor([[1.0, 2, 3]] * len(batch["filename"]))} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": List(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) def test_map_input_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.map(lambda col_1: {"label": col_1 % 2}, input_columns="col_1") as mapped_dset: self.assertEqual(mapped_dset[0].keys(), {"col_1", "col_2", "col_3", "label"}) self.assertEqual( mapped_dset.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), "label": Value("int64"), } ), ) def test_map_remove_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True) as dset: self.assertTrue("id" in dset[0]) self.assertDictEqual( dset.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset) with dset.map(lambda x: x, remove_columns=["id"]) as mapped_dset: self.assertTrue("id" not in mapped_dset[0]) self.assertDictEqual( mapped_dset.features, Features({"filename": Value("string"), "name": Value("string")}) ) assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) with mapped_dset.with_format("numpy", columns=mapped_dset.column_names) as mapped_dset: with mapped_dset.map( lambda x: {"name": 1}, remove_columns=mapped_dset.column_names ) as mapped_dset: self.assertTrue("filename" not in mapped_dset[0]) self.assertTrue("name" in mapped_dset[0]) self.assertDictEqual(mapped_dset.features, Features({"name": Value(dtype="int64")})) assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) # empty dataset columns_names = dset.column_names with dset.select([]) as empty_dset: self.assertEqual(len(empty_dset), 0) with empty_dset.map(lambda x: {}, remove_columns=columns_names[0]) as mapped_dset: self.assertListEqual(columns_names[1:], mapped_dset.column_names) assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) def test_map_stateful_callable(self, in_memory): # be sure that the state of the map callable is unaffected # before processing the dataset examples class ExampleCounter: def __init__(self, batched=False): self.batched = batched # state self.cnt = 0 def __call__(self, example): if self.batched: self.cnt += len(example) else: self.cnt += 1 with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: ex_cnt = ExampleCounter() dset.map(ex_cnt) self.assertEqual(ex_cnt.cnt, len(dset)) ex_cnt = ExampleCounter(batched=True) dset.map(ex_cnt) self.assertEqual(ex_cnt.cnt, len(dset)) @require_not_windows def test_map_crash_subprocess(self, in_memory): # be sure that a crash in one of the subprocess will not # hang dataset.map() call forever def do_crash(row): import os os.kill(os.getpid(), 9) return row with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with pytest.raises(RuntimeError) as excinfo: dset.map(do_crash, num_proc=2) assert str(excinfo.value) == ( "One of the subprocesses has abruptly died during map operation." "To debug the error, disable multiprocessing." ) def test_filter(self, in_memory): # keep only first five examples with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five: self.assertEqual(len(dset_filter_first_five), 5) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_first_five.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_filter_first_five._fingerprint, fingerprint) # filter filenames with even id at the end + formatted with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: dset.set_format("numpy") fingerprint = dset._fingerprint with dset.filter(lambda x: (int(x["filename"][-1]) % 2 == 0)) as dset_filter_even_num: self.assertEqual(len(dset_filter_even_num), 15) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_even_num.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_filter_even_num._fingerprint, fingerprint) self.assertEqual(dset_filter_even_num.format["type"], "numpy") def test_filter_with_indices_mapping(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: dset = Dataset.from_dict({"col": [0, 1, 2]}) with self._to(in_memory, tmp_dir, dset) as dset: with dset.filter(lambda x: x["col"] > 0) as dset: self.assertListEqual(dset["col"][:], [1, 2]) with dset.filter(lambda x: x["col"] < 2) as dset: self.assertListEqual(dset["col"][:], [1]) def test_filter_empty(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertIsNone(dset._indices, None) tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 0) self.assertIsNotNone(dset._indices, None) tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") with dset.filter(lambda _: False, cache_file_name=tmp_file_2) as dset2: self.assertEqual(len(dset2), 0) self.assertEqual(dset._indices, dset2._indices) def test_filter_batched(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: dset = Dataset.from_dict({"col": [0, 1, 2]}) with self._to(in_memory, tmp_dir, dset) as dset: with dset.filter(lambda x: [i > 0 for i in x["col"]], batched=True) as dset: self.assertListEqual(dset["col"][:], [1, 2]) with dset.filter(lambda x: [i < 2 for i in x["col"]], batched=True) as dset: self.assertListEqual(dset["col"][:], [1]) def test_filter_input_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: dset = Dataset.from_dict({"col_1": [0, 1, 2], "col_2": ["a", "b", "c"]}) with self._to(in_memory, tmp_dir, dset) as dset: with dset.filter(lambda x: x > 0, input_columns=["col_1"]) as filtered_dset: self.assertListEqual(filtered_dset.column_names, dset.column_names) self.assertListEqual(filtered_dset["col_1"][:], [1, 2]) self.assertListEqual(filtered_dset["col_2"][:], ["b", "c"]) def test_filter_fn_kwargs(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"id": range(10)}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fn_kwargs = {"max_offset": 3} with dset.filter( lambda example, max_offset: example["id"] < max_offset, fn_kwargs=fn_kwargs ) as filtered_dset: assert len(filtered_dset) == 3 with dset.filter( lambda id, max_offset: id < max_offset, fn_kwargs=fn_kwargs, input_columns="id" ) as filtered_dset: assert len(filtered_dset) == 3 with dset.filter( lambda id, i, max_offset: i < max_offset, fn_kwargs=fn_kwargs, input_columns="id", with_indices=True, ) as filtered_dset: assert len(filtered_dset) == 3 def test_filter_multiprocessing(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.filter(picklable_filter_function, num_proc=2) as dset_filter_first_ten: self.assertEqual(len(dset_filter_first_ten), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_first_ten.features, Features({"filename": Value("string")})) self.assertEqual(len(dset_filter_first_ten.cache_files), 0 if in_memory else 2) self.assertNotEqual(dset_filter_first_ten._fingerprint, fingerprint) with tempfile.TemporaryDirectory() as tmp_dir: # with_rank with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.filter( picklable_filter_function_with_rank, num_proc=2, with_rank=True ) as dset_filter_first_rank: self.assertEqual(len(dset_filter_first_rank), min(len(dset) // 2, len(dset))) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_first_rank.features, Features({"filename": Value("string")})) self.assertEqual(len(dset_filter_first_rank.cache_files), 0 if in_memory else 2) self.assertNotEqual(dset_filter_first_rank._fingerprint, fingerprint) def test_filter_caching(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five1: dset_test1_data_files = list(dset_filter_first_five1.cache_files) with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five2: self.assertEqual(dset_test1_data_files, dset_filter_first_five2.cache_files) self.assertEqual(len(dset_filter_first_five2.cache_files), 0 if in_memory else 2) self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory) def test_keep_features_after_transform_specified(self, in_memory): features = Features( { "tokens": List(Value("string")), "labels": List(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels, features=features) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) def test_keep_features_after_transform_unspecified(self, in_memory): features = Features( { "tokens": List(Value("string")), "labels": List(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) def test_keep_features_after_transform_to_file(self, in_memory): features = Features( { "tokens": List(Value("string")), "labels": List(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: tmp_file = os.path.join(tmp_dir, "test.arrow") dset.map(invert_labels, cache_file_name=tmp_file) with Dataset.from_file(tmp_file) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) def test_keep_features_after_transform_to_memory(self, in_memory): features = Features( { "tokens": List(Value("string")), "labels": List(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels, keep_in_memory=True) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) def test_keep_features_after_loading_from_cache(self, in_memory): features = Features( { "tokens": List(Value("string")), "labels": List(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: tmp_file1 = os.path.join(tmp_dir, "test1.arrow") tmp_file2 = os.path.join(tmp_dir, "test2.arrow") # TODO: Why mapped twice? inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file1) inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file2) self.assertGreater(len(inverted_dset.cache_files), 0) self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) del inverted_dset def test_keep_features_with_new_features(self, in_memory): features = Features( { "tokens": List(Value("string")), "labels": List(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]], "labels2": x["labels"]} expected_features = Features( { "tokens": List(Value("string")), "labels": List(ClassLabel(names=["negative", "positive"])), "labels2": List(Value("int64")), } ) with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels) as inverted_dset: self.assertEqual(inverted_dset.features.type, expected_features.type) self.assertDictEqual(inverted_dset.features, expected_features) assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) def test_select(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: # select every two example indices = list(range(0, len(dset), 2)) tmp_file = os.path.join(tmp_dir, "test.arrow") fingerprint = dset._fingerprint with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_even: self.assertIsNotNone(dset_select_even._indices) # an indices mapping is created self.assertTrue(os.path.exists(tmp_file)) self.assertEqual(len(dset_select_even), 15) for row in dset_select_even: self.assertEqual(int(row["filename"][-1]) % 2, 0) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_even.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_select_even._fingerprint, fingerprint) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: indices = list(range(0, len(dset))) with dset.select(indices) as dset_select_all: # no indices mapping, since the indices are contiguous # (in this case the arrow table is simply sliced, which is more efficient) self.assertIsNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) self.assertListEqual(list(dset_select_all), list(dset)) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_select_all._fingerprint, fingerprint) indices = range(0, len(dset)) with dset.select(indices) as dset_select_all: # same but with range self.assertIsNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) self.assertListEqual(list(dset_select_all), list(dset)) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_select_all._fingerprint, fingerprint) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: bad_indices = list(range(5)) bad_indices[-1] = len(dset) + 10 # out of bounds tmp_file = os.path.join(tmp_dir, "test.arrow") self.assertRaises( Exception, dset.select, indices=bad_indices, indices_cache_file_name=tmp_file, writer_batch_size=2, ) self.assertFalse(os.path.exists(tmp_file)) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: indices = iter(range(len(dset))) # iterator of contiguous indices with dset.select(indices) as dset_select_all: # no indices mapping, since the indices are contiguous self.assertIsNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) indices = reversed(range(len(dset))) # iterator of not contiguous indices tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_all: # new indices mapping, since the indices are not contiguous self.assertIsNotNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: bad_indices = list(range(5)) bad_indices[3] = "foo" # wrong type tmp_file = os.path.join(tmp_dir, "test.arrow") self.assertRaises( Exception, dset.select, indices=bad_indices, indices_cache_file_name=tmp_file, writer_batch_size=2, ) self.assertFalse(os.path.exists(tmp_file)) dset.set_format("numpy") with dset.select( range(5), indices_cache_file_name=tmp_file, writer_batch_size=2, ) as dset_select_five: self.assertIsNone(dset_select_five._indices) self.assertEqual(len(dset_select_five), 5) self.assertEqual(dset_select_five.format["type"], "numpy") for i, row in enumerate(dset_select_five): self.assertEqual(int(row["filename"][-1]), i) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_five.features, Features({"filename": Value("string")})) def test_select_then_map(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.select([0]) as d1: with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1: self.assertEqual(d1[0]["id"], 0) with dset.select([1]) as d2: with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2: self.assertEqual(d2[0]["id"], 1) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.select([0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")) as d1: with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1: self.assertEqual(d1[0]["id"], 0) with dset.select([1], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")) as d2: with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2: self.assertEqual(d2[0]["id"], 1) def test_pickle_after_many_transforms_on_disk(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertEqual(len(dset.cache_files), 0 if in_memory else 1) with dset.rename_column("filename", "file") as dset: self.assertListEqual(dset.column_names, ["file"]) with dset.select(range(5)) as dset: self.assertEqual(len(dset), 5) with dset.map(lambda x: {"id": int(x["file"][-1])}) as dset: self.assertListEqual(sorted(dset.column_names), ["file", "id"]) with dset.rename_column("id", "number") as dset: self.assertListEqual(sorted(dset.column_names), ["file", "number"]) with dset.select([1, 0]) as dset: self.assertEqual(dset[0]["file"], "my_name-train_1") self.assertEqual(dset[0]["number"], 1) self.assertEqual(dset._indices["indices"].to_pylist(), [1, 0]) if not in_memory: self.assertIn( ("rename_columns", (["file", "number"],), {}), dset._data.replays, ) if not in_memory: dset._data.table = Unpicklable() # check that we don't pickle the entire table pickled = pickle.dumps(dset) with pickle.loads(pickled) as loaded: self.assertEqual(loaded[0]["file"], "my_name-train_1") self.assertEqual(loaded[0]["number"], 1) def test_shuffle(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: tmp_file = os.path.join(tmp_dir, "test.arrow") fingerprint = dset._fingerprint with dset.shuffle(seed=1234, keep_in_memory=True) as dset_shuffled: self.assertEqual(len(dset_shuffled), 30) self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28") self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_shuffled._fingerprint, fingerprint) with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled: self.assertEqual(len(dset_shuffled), 30) self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28") self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_shuffled._fingerprint, fingerprint) # Reproducibility tmp_file = os.path.join(tmp_dir, "test_2.arrow") with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled_2: self.assertSequenceEqual(dset_shuffled["filename"], dset_shuffled_2["filename"]) # Compatible with temp_seed with temp_seed(42), dset.shuffle() as d1: with temp_seed(42), dset.shuffle() as d2, dset.shuffle() as d3: self.assertSequenceEqual(d1["filename"], d2["filename"]) self.assertEqual(d1._fingerprint, d2._fingerprint) self.assertNotEqual(d3["filename"], d2["filename"]) self.assertNotEqual(d3._fingerprint, d2._fingerprint) def test_sort(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Sort on a single key with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir) as dset: # Keep only 10 examples tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(range(10), indices_cache_file_name=tmp_file) as dset: tmp_file = os.path.join(tmp_dir, "test_2.arrow") with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 10) self.assertEqual(dset[0]["filename"], "my_name-train_8") self.assertEqual(dset[1]["filename"], "my_name-train_9") # Sort tmp_file = os.path.join(tmp_dir, "test_3.arrow") fingerprint = dset._fingerprint with dset.sort("filename", indices_cache_file_name=tmp_file) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(int(row["filename"][-1]), i) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # Sort reversed tmp_file = os.path.join(tmp_dir, "test_4.arrow") fingerprint = dset._fingerprint with dset.sort("filename", indices_cache_file_name=tmp_file, reverse=True) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(int(row["filename"][-1]), len(dset_sorted) - 1 - i) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # formatted dset.set_format("numpy") with dset.sort("filename") as dset_sorted_formatted: self.assertEqual(dset_sorted_formatted.format["type"], "numpy") # Sort on multiple keys with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True) as dset: tmp_file = os.path.join(tmp_dir, "test_5.arrow") fingerprint = dset._fingerprint # Throw error when reverse is a list of bools that does not match the length of column_names with pytest.raises(ValueError): dset.sort(["col_1", "col_2", "col_3"], reverse=[False]) with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset: # Sort with dset.sort(["col_1", "col_2", "col_3"], reverse=[False, True, False]) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(row["col_1"], i) self.assertDictEqual( dset.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertDictEqual( dset_sorted.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # Sort reversed with dset.sort(["col_1", "col_2", "col_3"], reverse=[True, False, True]) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(row["col_1"], len(dset_sorted) - 1 - i) self.assertDictEqual( dset.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertDictEqual( dset_sorted.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # formatted dset.set_format("numpy") with dset.sort( ["col_1", "col_2", "col_3"], reverse=[False, True, False] ) as dset_sorted_formatted: self.assertEqual(dset_sorted_formatted.format["type"], "numpy") def test_to_csv(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # File path argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.csv") bytes_written = dset.to_csv(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) # File buffer argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_buffer.csv") with open(file_path, "wb+") as buffer: bytes_written = dset.to_csv(path_or_buf=buffer) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) # After a select/shuffle transform with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset = dset.select(range(0, len(dset), 2)).shuffle() file_path = os.path.join(tmp_dir, "test_path.csv") bytes_written = dset.to_csv(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) # With array features with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: file_path = os.path.join(tmp_dir, "test_path.csv") bytes_written = dset.to_csv(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) def test_to_dict(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: # Full dset_to_dict = dset.to_dict() self.assertIsInstance(dset_to_dict, dict) self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names)) for col_name in dset.column_names: self.assertLessEqual(len(dset_to_dict[col_name]), len(dset)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_dict = dset.to_dict() self.assertIsInstance(dset_to_dict, dict) self.assertEqual(len(dset_to_dict), 3) self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names)) for col_name in dset.column_names: self.assertIsInstance(dset_to_dict[col_name], list) self.assertEqual(len(dset_to_dict[col_name]), len(dset)) def test_to_list(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset_to_list = dset.to_list() self.assertIsInstance(dset_to_list, list) for row in dset_to_list: self.assertIsInstance(row, dict) self.assertListEqual(sorted(row.keys()), sorted(dset.column_names)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_list = dset.to_list() self.assertIsInstance(dset_to_list, list) self.assertEqual(len(dset_to_list), 3) for row in dset_to_list: self.assertIsInstance(row, dict) self.assertListEqual(sorted(row.keys()), sorted(dset.column_names)) def test_to_pandas(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Batched with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: batch_size = dset.num_rows - 1 to_pandas_generator = dset.to_pandas(batched=True, batch_size=batch_size) for batch in to_pandas_generator: self.assertIsInstance(batch, pd.DataFrame) self.assertListEqual(sorted(batch.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertLessEqual(len(batch[col_name]), batch_size) # Full dset_to_pandas = dset.to_pandas() self.assertIsInstance(dset_to_pandas, pd.DataFrame) self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_pandas[col_name]), len(dset)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_pandas = dset.to_pandas() self.assertIsInstance(dset_to_pandas, pd.DataFrame) self.assertEqual(len(dset_to_pandas), 3) self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_pandas[col_name]), dset.num_rows) @require_polars def test_to_polars(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Batched with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: batch_size = dset.num_rows - 1 to_polars_generator = dset.to_polars(batched=True, batch_size=batch_size) for batch in to_polars_generator: self.assertIsInstance(batch, sys.modules["polars"].DataFrame) self.assertListEqual(sorted(batch.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertLessEqual(len(batch[col_name]), batch_size) del batch # Full dset_to_polars = dset.to_polars() self.assertIsInstance(dset_to_polars, sys.modules["polars"].DataFrame) self.assertListEqual(sorted(dset_to_polars.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_polars[col_name]), len(dset)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_polars = dset.to_polars() self.assertIsInstance(dset_to_polars, sys.modules["polars"].DataFrame) self.assertEqual(len(dset_to_polars), 3) self.assertListEqual(sorted(dset_to_polars.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_polars[col_name]), dset.num_rows) def test_to_parquet(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # File path argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.parquet") dset.to_parquet(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) # File buffer argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_buffer.parquet") with open(file_path, "wb+") as buffer: dset.to_parquet(path_or_buf=buffer) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) # After a select/shuffle transform with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset = dset.select(range(0, len(dset), 2)).shuffle() file_path = os.path.join(tmp_dir, "test_path.parquet") dset.to_parquet(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) # With array features with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: file_path = os.path.join(tmp_dir, "test_path.parquet") dset.to_parquet(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) @require_sqlalchemy def test_to_sql(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Destionation specified as database URI string with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path) self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # Destionation specified as sqlite3 connection with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: import sqlite3 file_path = os.path.join(tmp_dir, "test_path.sqlite") with contextlib.closing(sqlite3.connect(file_path)) as con: _ = dset.to_sql("data", con, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # Test writing to a database in chunks with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path, batch_size=1, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # After a select/shuffle transform with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset = dset.select(range(0, len(dset), 2)).shuffle() file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # With array features with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) def test_train_test_split(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint dset_dict = dset.train_test_split(test_size=10, shuffle=False) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 20) self.assertEqual(len(dset_test), 10) self.assertEqual(dset_train[0]["filename"], "my_name-train_0") self.assertEqual(dset_train[-1]["filename"], "my_name-train_19") self.assertEqual(dset_test[0]["filename"], "my_name-train_20") self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_train._fingerprint, fingerprint) self.assertNotEqual(dset_test._fingerprint, fingerprint) self.assertNotEqual(dset_train._fingerprint, dset_test._fingerprint) dset_dict = dset.train_test_split(test_size=0.5, shuffle=False) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 15) self.assertEqual(len(dset_test), 15) self.assertEqual(dset_train[0]["filename"], "my_name-train_0") self.assertEqual(dset_train[-1]["filename"], "my_name-train_14") self.assertEqual(dset_test[0]["filename"], "my_name-train_15") self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) dset_dict = dset.train_test_split(train_size=10, shuffle=False) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 10) self.assertEqual(len(dset_test), 20) self.assertEqual(dset_train[0]["filename"], "my_name-train_0") self.assertEqual(dset_train[-1]["filename"], "my_name-train_9") self.assertEqual(dset_test[0]["filename"], "my_name-train_10") self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) dset.set_format("numpy") dset_dict = dset.train_test_split(train_size=10, seed=42) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 10) self.assertEqual(len(dset_test), 20) self.assertEqual(dset_train.format["type"], "numpy") self.assertEqual(dset_test.format["type"], "numpy") self.assertNotEqual(dset_train[0]["filename"].item(), "my_name-train_0") self.assertNotEqual(dset_train[-1]["filename"].item(), "my_name-train_9") self.assertNotEqual(dset_test[0]["filename"].item(), "my_name-train_10") self.assertNotEqual(dset_test[-1]["filename"].item(), "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) del dset_test, dset_train, dset_dict # DatasetDict def test_shard(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset: tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(range(10), indices_cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 10) # Shard non-contiguous tmp_file_1 = os.path.join(tmp_dir, "test_1.arrow") fingerprint = dset._fingerprint with dset.shard( num_shards=8, index=1, contiguous=False, indices_cache_file_name=tmp_file_1 ) as dset_sharded: self.assertEqual(2, len(dset_sharded)) self.assertEqual(["my_name-train_1", "my_name-train_9"], dset_sharded["filename"]) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sharded.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_sharded._fingerprint, fingerprint) # Shard contiguous tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") with dset.shard( num_shards=3, index=0, contiguous=True, indices_cache_file_name=tmp_file_2 ) as dset_sharded_contiguous: self.assertEqual([f"my_name-train_{i}" for i in (0, 1, 2, 3)], dset_sharded_contiguous["filename"]) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sharded_contiguous.features, Features({"filename": Value("string")})) # Test lengths of sharded contiguous self.assertEqual( [4, 3, 3], [ len(dset.shard(3, index=i, contiguous=True, indices_cache_file_name=tmp_file_2 + str(i))) for i in range(3) ], ) # formatted dset.set_format("numpy") with dset.shard(num_shards=3, index=0) as dset_sharded_formatted: self.assertEqual(dset_sharded_formatted.format["type"], "numpy") def test_flatten_indices(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertIsNone(dset._indices) tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(range(0, 10, 2), indices_cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 5) self.assertIsNotNone(dset._indices) tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") fingerprint = dset._fingerprint dset.set_format("numpy") with dset.flatten_indices(cache_file_name=tmp_file_2) as dset: self.assertEqual(len(dset), 5) self.assertEqual(len(dset.data), len(dset)) self.assertIsNone(dset._indices) self.assertNotEqual(dset._fingerprint, fingerprint) self.assertEqual(dset.format["type"], "numpy") # Test unique works dset.unique(dset.column_names[0]) assert_arrow_metadata_are_synced_with_dataset_features(dset) # Empty indices mapping with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertIsNone(dset._indices, None) tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 0) self.assertIsNotNone(dset._indices, None) tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") fingerprint = dset._fingerprint dset.set_format("numpy") with dset.flatten_indices(cache_file_name=tmp_file_2) as dset: self.assertEqual(len(dset), 0) self.assertEqual(len(dset.data), len(dset)) self.assertIsNone(dset._indices, None) self.assertNotEqual(dset._fingerprint, fingerprint) self.assertEqual(dset.format["type"], "numpy") # Test unique works dset.unique(dset.column_names[0]) assert_arrow_metadata_are_synced_with_dataset_features(dset) @require_tf @require_torch def test_format_vectors(self, in_memory): import numpy as np import tensorflow as tf import torch with ( tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset, dset.map(lambda ex, i: {"vec": np.ones(3) * i}, with_indices=True) as dset, ): columns = dset.column_names self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], (str, list)) self.assertIsInstance(dset[:2][col], list) self.assertDictEqual(dset.features, Features({"filename": Value("string"), "vec": List(Value("float64"))})) dset.set_format("tensorflow") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor)) self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor)) self.assertIsInstance(dset[col][:2], (tf.Tensor, tf.RaggedTensor)) self.assertTupleEqual(tuple(dset[:2]["vec"].shape), (2, 3)) self.assertTupleEqual(tuple(dset["vec"][:2].shape), (2, 3)) dset.set_format("numpy") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[0]["filename"], np.str_) self.assertIsInstance(dset[:2]["filename"], np.ndarray) self.assertIsInstance(dset["filename"][:], np.ndarray) self.assertIsInstance(dset[0]["vec"], np.ndarray) self.assertIsInstance(dset[:2]["vec"], np.ndarray) self.assertIsInstance(dset["vec"][:2], np.ndarray) self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3)) self.assertTupleEqual(dset["vec"][:2].shape, (2, 3)) dset.set_format("torch", columns=["vec"]) self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) # torch.Tensor is only for numerical columns self.assertIsInstance(dset[0]["vec"], torch.Tensor) self.assertIsInstance(dset[:2]["vec"], torch.Tensor) self.assertIsInstance(dset["vec"][:2], torch.Tensor) self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3)) self.assertTupleEqual(dset["vec"][:2].shape, (2, 3)) @require_tf @require_torch def test_format_ragged_vectors(self, in_memory): import numpy as np import tensorflow as tf import torch with ( tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset, dset.map(lambda ex, i: {"vec": np.ones(3 + i) * i}, with_indices=True) as dset, ): columns = dset.column_names self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], (str, list)) self.assertIsInstance(dset[:2][col], list) self.assertDictEqual(dset.features, Features({"filename": Value("string"), "vec": List(Value("float64"))})) dset.set_format("tensorflow") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], tf.Tensor) self.assertIsInstance(dset[:2][col], tf.RaggedTensor if col == "vec" else tf.Tensor) self.assertIsInstance(dset[col][:2], tf.RaggedTensor if col == "vec" else tf.Tensor) # dim is None for ragged vectors in tensorflow self.assertListEqual(dset[:2]["vec"].shape.as_list(), [2, None]) self.assertListEqual(dset["vec"][:2].shape.as_list(), [2, None]) dset.set_format("numpy") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[0]["filename"], np.str_) self.assertIsInstance(dset[:2]["filename"], np.ndarray) self.assertIsInstance(dset["filename"][:2], np.ndarray) self.assertIsInstance(dset[0]["vec"], np.ndarray) self.assertIsInstance(dset[:2]["vec"], np.ndarray) self.assertIsInstance(dset["vec"][:], np.ndarray) # array is flat for ragged vectors in numpy self.assertTupleEqual(dset[:2]["vec"].shape, (2,)) self.assertTupleEqual(dset["vec"][:2].shape, (2,)) dset.set_format("torch") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[0]["filename"], str) self.assertIsInstance(dset[:2]["filename"], list) self.assertIsInstance(dset["filename"][:2], list) self.assertIsInstance(dset[0]["vec"], torch.Tensor) self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor) self.assertIsInstance(dset["vec"][0], torch.Tensor) # pytorch doesn't support ragged tensors, so we should have lists self.assertIsInstance(dset[:2]["vec"], list) self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor) self.assertIsInstance(dset["vec"][:2], list) self.assertIsInstance(dset["vec"][0], torch.Tensor) @require_tf @require_torch def test_format_nested(self, in_memory): import numpy as np import tensorflow as tf import torch with ( tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset, dset.map(lambda ex: {"nested": [{"foo": np.ones(3)}] * len(ex["filename"])}, batched=True) as dset, ): self.assertDictEqual( dset.features, Features({"filename": Value("string"), "nested": {"foo": List(Value("float64"))}}) ) dset.set_format("tensorflow") self.assertIsNotNone(dset[0]) self.assertIsInstance(dset[0]["nested"]["foo"], (tf.Tensor, tf.RaggedTensor)) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[:2]["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor)) self.assertIsInstance(dset["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor)) dset.set_format("numpy") self.assertIsNotNone(dset[0]) self.assertIsInstance(dset[0]["nested"]["foo"], np.ndarray) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[:2]["nested"][0]["foo"], np.ndarray) self.assertIsInstance(dset["nested"][0]["foo"], np.ndarray) dset.set_format("torch", columns="nested") self.assertIsNotNone(dset[0]) self.assertIsInstance(dset[0]["nested"]["foo"], torch.Tensor) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[:2]["nested"][0]["foo"], torch.Tensor) self.assertIsInstance(dset["nested"][0]["foo"], torch.Tensor) def test_format_pandas(self, in_memory): import pandas as pd with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format("pandas") self.assertIsInstance(dset[0], pd.DataFrame) self.assertIsInstance(dset[:2], pd.DataFrame) self.assertIsInstance(dset["col_1"], pd.Series) @require_polars def test_format_polars(self, in_memory): import polars as pl with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format("polars") self.assertIsInstance(dset[0], pl.DataFrame) self.assertIsInstance(dset[:2], pl.DataFrame) self.assertIsInstance(dset["col_1"], pl.Series) def test_transmit_format_single(self, in_memory): @transmit_format def my_single_transform(self, return_factory, *args, **kwargs): return return_factory() with tempfile.TemporaryDirectory() as tmp_dir: return_factory = partial( self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True ) with return_factory() as dset: dset.set_format("numpy", columns=["col_1"]) prev_format = dset.format with my_single_transform(dset, return_factory) as transformed_dset: self.assertDictEqual(transformed_dset.format, prev_format) def test_transmit_format_dict(self, in_memory): @transmit_format def my_split_transform(self, return_factory, *args, **kwargs): return DatasetDict({"train": return_factory()}) with tempfile.TemporaryDirectory() as tmp_dir: return_factory = partial( self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True ) with return_factory() as dset: dset.set_format("numpy", columns=["col_1"]) prev_format = dset.format transformed_dset = my_split_transform(dset, return_factory)["train"] self.assertDictEqual(transformed_dset.format, prev_format) del transformed_dset # DatasetDict def test_with_format(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.with_format("numpy", columns=["col_1"]) as dset2: dset.set_format("numpy", columns=["col_1"]) self.assertDictEqual(dset.format, dset2.format) self.assertEqual(dset._fingerprint, dset2._fingerprint) # dset.reset_format() # self.assertNotEqual(dset.format, dset2.format) # self.assertNotEqual(dset._fingerprint, dset2._fingerprint) def test_with_transform(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: transform = lambda x: {"foo": x["col_1"]} # noqa: E731 with dset.with_transform(transform, columns=["col_1"]) as dset2: dset.set_transform(transform, columns=["col_1"]) self.assertDictEqual(dset.format, dset2.format) self.assertEqual(dset._fingerprint, dset2._fingerprint) dset.reset_format() self.assertNotEqual(dset.format, dset2.format) self.assertNotEqual(dset._fingerprint, dset2._fingerprint) @require_tf def test_tf_dataset_conversion(self, in_memory): tmp_dir = tempfile.TemporaryDirectory() for num_workers in [0, 1, 2]: if num_workers > 0 and sys.platform == "win32" and not in_memory: continue # This test hangs on the Py3.10 test worker, but it runs fine locally on my Windows machine with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2, 4]) self.assertEqual(batch.dtype.name, "int64") with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2]) self.assertEqual(batch.dtype.name, "int64") with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: # Check that it works with all default options (except batch_size because the dummy dataset only has 4) tf_dataset = dset.to_tf_dataset(batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch["col_1"].shape.as_list(), [2]) self.assertEqual(batch["col_2"].shape.as_list(), [2]) self.assertEqual(batch["col_1"].dtype.name, "int64") self.assertEqual(batch["col_2"].dtype.name, "string") # Assert that we're converting strings properly with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: # Check that when we use a transform that creates a new column from existing column values # but don't load the old columns that the new column depends on in the final dataset, # that they're still kept around long enough to be used in the transform transform_dset = dset.with_transform( lambda x: {"new_col": [val * 2 for val in x["col_1"]], "col_1": x["col_1"]} ) tf_dataset = transform_dset.to_tf_dataset(columns="new_col", batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2]) self.assertEqual(batch.dtype.name, "int64") del transform_dset del tf_dataset # For correct cleanup @require_tf def test_tf_index_reshuffling(self, in_memory): # This test checks that when we do two epochs over a tf.data.Dataset from to_tf_dataset # that we get a different shuffle order each time # It also checks that when we aren't shuffling, that the dataset order is fully preserved # even when loading is split across multiple workers data = {"col_1": list(range(20))} for num_workers in [0, 1, 2, 3]: with Dataset.from_dict(data) as dset: tf_dataset = dset.to_tf_dataset(batch_size=10, shuffle=True, num_workers=num_workers) indices = [] for batch in tf_dataset: indices.append(batch["col_1"]) indices = np.concatenate([arr.numpy() for arr in indices]) second_indices = [] for batch in tf_dataset: second_indices.append(batch["col_1"]) second_indices = np.concatenate([arr.numpy() for arr in second_indices]) self.assertFalse(np.array_equal(indices, second_indices)) self.assertEqual(len(indices), len(np.unique(indices))) self.assertEqual(len(second_indices), len(np.unique(second_indices))) tf_dataset = dset.to_tf_dataset(batch_size=1, shuffle=False, num_workers=num_workers) for i, batch in enumerate(tf_dataset): # Assert that the unshuffled order is fully preserved even when multiprocessing self.assertEqual(i, batch["col_1"].numpy()) @require_tf def test_tf_label_renaming(self, in_memory): # Protect TF-specific imports in here import tensorflow as tf from datasets.utils.tf_utils import minimal_tf_collate_fn_with_renaming tmp_dir = tempfile.TemporaryDirectory() with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: with dset.rename_columns({"col_1": "features", "col_2": "label"}) as new_dset: tf_dataset = new_dset.to_tf_dataset(collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4) batch = next(iter(tf_dataset)) self.assertTrue("labels" in batch and "features" in batch) tf_dataset = new_dset.to_tf_dataset( columns=["features", "labels"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4 ) batch = next(iter(tf_dataset)) self.assertTrue("labels" in batch and "features" in batch) tf_dataset = new_dset.to_tf_dataset( columns=["features", "label"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4 ) batch = next(iter(tf_dataset)) self.assertTrue("labels" in batch and "features" in batch) # Assert renaming was handled correctly tf_dataset = new_dset.to_tf_dataset( columns=["features"], label_cols=["labels"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4, ) batch = next(iter(tf_dataset)) self.assertEqual(len(batch), 2) # Assert that we don't have any empty entries here self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor)) tf_dataset = new_dset.to_tf_dataset( columns=["features"], label_cols=["label"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4, ) batch = next(iter(tf_dataset)) self.assertEqual(len(batch), 2) # Assert that we don't have any empty entries here self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor)) tf_dataset = new_dset.to_tf_dataset( columns=["features"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4, ) batch = next(iter(tf_dataset)) # Assert that labels didn't creep in when we don't ask for them # just because the collate_fn added them self.assertTrue(isinstance(batch, tf.Tensor)) del tf_dataset # For correct cleanup @require_tf def test_tf_dataset_options(self, in_memory): tmp_dir = tempfile.TemporaryDirectory() # Test that batch_size option works as expected with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2, 4]) self.assertEqual(batch.dtype.name, "int64") # Test that batch_size=None (optional) works as expected with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=None) single_example = next(iter(tf_dataset)) self.assertEqual(single_example.shape.as_list(), []) self.assertEqual(single_example.dtype.name, "int64") # Assert that we can batch it with `tf.data.Dataset.batch` method batched_dataset = tf_dataset.batch(batch_size=2) batch = next(iter(batched_dataset)) self.assertEqual(batch.shape.as_list(), [2]) self.assertEqual(batch.dtype.name, "int64") # Test that batching a batch_size=None dataset produces the same results as using batch_size arg with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: batch_size = 2 tf_dataset_no_batch = dset.to_tf_dataset(columns="col_3") tf_dataset_batch = dset.to_tf_dataset(columns="col_3", batch_size=batch_size) self.assertEqual(tf_dataset_no_batch.element_spec, tf_dataset_batch.unbatch().element_spec) self.assertEqual(tf_dataset_no_batch.cardinality(), tf_dataset_batch.cardinality() * batch_size) for batch_1, batch_2 in zip(tf_dataset_no_batch.batch(batch_size=batch_size), tf_dataset_batch): self.assertEqual(batch_1.shape, batch_2.shape) self.assertEqual(batch_1.dtype, batch_2.dtype) self.assertListEqual(batch_1.numpy().tolist(), batch_2.numpy().tolist()) # Test that requesting label_cols works as expected with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_1", label_cols=["col_2", "col_3"], batch_size=4) batch = next(iter(tf_dataset)) self.assertEqual(len(batch), 2) self.assertEqual(set(batch[1].keys()), {"col_2", "col_3"}) self.assertEqual(batch[0].dtype.name, "int64") # Assert data comes out as expected and isn't shuffled self.assertEqual(batch[0].numpy().tolist(), [3, 2, 1, 0]) self.assertEqual(batch[1]["col_2"].numpy().tolist(), [b"a", b"b", b"c", b"d"]) self.assertEqual(batch[1]["col_3"].numpy().tolist(), [0, 1, 0, 1]) # Check that incomplete batches are dropped if requested with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=3) tf_dataset_with_drop = dset.to_tf_dataset(columns="col_1", batch_size=3, drop_remainder=True) self.assertEqual(len(tf_dataset), 2) # One batch of 3 and one batch of 1 self.assertEqual(len(tf_dataset_with_drop), 1) # Incomplete batch of 1 is dropped # Test that `NotImplementedError` is raised `batch_size` is None and `num_workers` is > 0 with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: with self.assertRaisesRegex( NotImplementedError, "`batch_size` must be specified when using multiple workers" ): dset.to_tf_dataset(columns="col_1", batch_size=None, num_workers=2) del tf_dataset # For correct cleanup del tf_dataset_with_drop class MiscellaneousDatasetTest(TestCase): def test_from_pandas(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} df = pd.DataFrame.from_dict(data) with Dataset.from_pandas(df) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) features = Features({"col_1": Value("int64"), "col_2": Value("string")}) with Dataset.from_pandas(df, features=features) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) features = Features({"col_1": Value("int64"), "col_2": Value("string")}) with Dataset.from_pandas(df, features=features, info=DatasetInfo(features=features)) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) features = Features({"col_1": List(Value("string")), "col_2": Value("string")}) self.assertRaises(TypeError, Dataset.from_pandas, df, features=features) @require_polars def test_from_polars(self): import polars as pl data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} df = pl.from_dict(data) with Dataset.from_polars(df) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")})) features = Features({"col_1": Value("int64"), "col_2": Value("large_string")}) with Dataset.from_polars(df, features=features) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")})) features = Features({"col_1": Value("int64"), "col_2": Value("large_string")}) with Dataset.from_polars(df, features=features, info=DatasetInfo(features=features)) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")})) features = Features({"col_1": List(Value("string")), "col_2": Value("large_string")}) self.assertRaises(TypeError, Dataset.from_polars, df, features=features) def test_from_dict(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": pa.array([True, False, True, False])} with Dataset.from_dict(data) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertSequenceEqual(dset["col_3"], data["col_3"].to_pylist()) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) ) features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) with Dataset.from_dict(data, features=features) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertSequenceEqual(dset["col_3"], data["col_3"].to_pylist()) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) ) features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) with Dataset.from_dict(data, features=features, info=DatasetInfo(features=features)) as dset: self.assertSequenceEqual(dset["col_1"], data["col_1"]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertSequenceEqual(dset["col_3"], data["col_3"].to_pylist()) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) ) features = Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")}) with Dataset.from_dict(data, features=features) as dset: # the integers are converted to strings self.assertSequenceEqual(dset["col_1"], [str(x) for x in data["col_1"]]) self.assertSequenceEqual(dset["col_2"], data["col_2"]) self.assertSequenceEqual(dset["col_3"], [int(x) for x in data["col_3"].to_pylist()]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")}) ) features = Features({"col_1": Value("int64"), "col_2": Value("int64"), "col_3": Value("bool")}) self.assertRaises(ValueError, Dataset.from_dict, data, features=features) def test_concatenate_mixed_memory_and_disk(self): data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: with ( Dataset.from_dict(data1, info=info1).map(cache_file_name=os.path.join(tmp_dir, "d1.arrow")) as dset1, Dataset.from_dict(data2, info=info2).map(cache_file_name=os.path.join(tmp_dir, "d2.arrow")) as dset2, Dataset.from_dict(data3) as dset3, ): with concatenate_datasets([dset1, dset2, dset3]) as concatenated_dset: self.assertEqual(len(concatenated_dset), len(dset1) + len(dset2) + len(dset3)) self.assertSequenceEqual(concatenated_dset["id"], dset1["id"][:] + dset2["id"][:] + dset3["id"][:]) @require_transformers @pytest.mark.integration def test_set_format_encode(self): from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") def encode(batch): return tokenizer(batch["text"], padding="longest", return_tensors="np") with Dataset.from_dict({"text": ["hello there", "foo"]}) as dset: dset.set_transform(transform=encode) self.assertEqual(str(dset[:2]), str(encode({"text": ["hello there", "foo"]}))) @require_tf def test_tf_string_encoding(self): data = {"col_1": ["á", "é", "í", "ó", "ú"], "col_2": ["à", "è", "ì", "ò", "ù"]} with Dataset.from_dict(data) as dset: tf_dset_wo_batch = dset.to_tf_dataset(columns=["col_1", "col_2"]) for tf_row, row in zip(tf_dset_wo_batch, dset): self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"]) self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"]) tf_dset_w_batch = dset.to_tf_dataset(columns=["col_1", "col_2"], batch_size=2) for tf_row, row in zip(tf_dset_w_batch.unbatch(), dset): self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"]) self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"]) self.assertEqual(tf_dset_w_batch.unbatch().element_spec, tf_dset_wo_batch.element_spec) self.assertEqual(tf_dset_w_batch.element_spec, tf_dset_wo_batch.batch(2).element_spec) def test_cast_with_sliced_list(): old_features = Features({"foo": List(Value("int64"))}) new_features = Features({"foo": List(Value("int32"))}) dataset = Dataset.from_dict({"foo": [[i] * (i % 3) for i in range(20)]}, features=old_features) casted_dataset = dataset.cast(new_features, batch_size=2) # small batch size to slice the ListArray assert dataset["foo"] == casted_dataset["foo"] assert casted_dataset.features == new_features @pytest.mark.parametrize("include_nulls", [False, True]) def test_class_encode_column_with_none(include_nulls): dataset = Dataset.from_dict({"col_1": ["a", "b", "c", None, "d", None]}) dataset = dataset.class_encode_column("col_1", include_nulls=include_nulls) class_names = ["a", "b", "c", "d"] if include_nulls: class_names += ["None"] assert isinstance(dataset.features["col_1"], ClassLabel) assert set(dataset.features["col_1"].names) == set(class_names) assert (None in dataset.unique("col_1")) == (not include_nulls) @pytest.mark.parametrize("null_placement", ["first", "last"]) def test_sort_with_none(null_placement): dataset = Dataset.from_dict({"col_1": ["item_2", "item_3", "item_1", None, "item_4", None]}) dataset = dataset.sort("col_1", null_placement=null_placement) if null_placement == "first": assert dataset["col_1"] == [None, None, "item_1", "item_2", "item_3", "item_4"] else: assert dataset["col_1"] == ["item_1", "item_2", "item_3", "item_4", None, None] def test_update_metadata_with_features(dataset_dict): table1 = pa.Table.from_pydict(dataset_dict) features1 = Features.from_arrow_schema(table1.schema) features2 = features1.copy() features2["col_2"] = ClassLabel(num_classes=len(table1)) assert features1 != features2 table2 = update_metadata_with_features(table1, features2) metadata = json.loads(table2.schema.metadata[b"huggingface"].decode()) assert features2 == Features.from_dict(metadata["info"]["features"]) with Dataset(table1) as dset1, Dataset(table2) as dset2: assert dset1.features == features1 assert dset2.features == features2 @pytest.mark.parametrize("dataset_type", ["in_memory", "memory_mapped", "mixed"]) @pytest.mark.parametrize("axis, expected_shape", [(0, (4, 3)), (1, (2, 6))]) def test_concatenate_datasets(dataset_type, axis, expected_shape, dataset_dict, arrow_path): table = { "in_memory": InMemoryTable.from_pydict(dataset_dict), "memory_mapped": MemoryMappedTable.from_file(arrow_path), } tables = [ table[dataset_type if dataset_type != "mixed" else "memory_mapped"].slice(0, 2), # shape = (2, 3) table[dataset_type if dataset_type != "mixed" else "in_memory"].slice(2, 4), # shape = (2, 3) ] if axis == 1: # don't duplicate columns tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names]) datasets = [Dataset(table) for table in tables] dataset = concatenate_datasets(datasets, axis=axis) assert dataset.shape == expected_shape assert_arrow_metadata_are_synced_with_dataset_features(dataset) def test_concatenate_datasets_new_columns(): dataset1 = Dataset.from_dict({"col_1": ["a", "b", "c"]}) dataset2 = Dataset.from_dict({"col_1": ["d", "e", "f"], "col_2": [True, False, True]}) dataset = concatenate_datasets([dataset1, dataset2]) assert dataset.data.shape == (6, 2) assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool")}) assert dataset[:] == {"col_1": ["a", "b", "c", "d", "e", "f"], "col_2": [None, None, None, True, False, True]} dataset3 = Dataset.from_dict({"col_3": ["a_1"]}) dataset = concatenate_datasets([dataset, dataset3]) assert dataset.data.shape == (7, 3) assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool"), "col_3": Value("string")}) assert dataset[:] == { "col_1": ["a", "b", "c", "d", "e", "f", None], "col_2": [None, None, None, True, False, True, None], "col_3": [None, None, None, None, None, None, "a_1"], } @pytest.mark.parametrize("axis", [0, 1]) def test_concatenate_datasets_complex_features(axis): n = 5 dataset1 = Dataset.from_dict( {"col_1": [0] * n, "col_2": list(range(n))}, features=Features({"col_1": Value("int32"), "col_2": ClassLabel(num_classes=n)}), ) if axis == 1: dataset2 = dataset1.rename_columns({col: col + "_" for col in dataset1.column_names}) expected_features = Features({**dataset1.features, **dataset2.features}) else: dataset2 = dataset1 expected_features = dataset1.features assert concatenate_datasets([dataset1, dataset2], axis=axis).features == expected_features @pytest.mark.parametrize("other_dataset_type", ["in_memory", "memory_mapped", "concatenation"]) @pytest.mark.parametrize("axis, expected_shape", [(0, (8, 3)), (1, (4, 6))]) def test_concatenate_datasets_with_concatenation_tables( axis, expected_shape, other_dataset_type, dataset_dict, arrow_path ): def _create_concatenation_table(axis): if axis == 0: # shape: (4, 3) = (4, 1) + (4, 2) concatenation_table = ConcatenationTable.from_blocks( [ [ InMemoryTable.from_pydict({"col_1": dataset_dict["col_1"]}), MemoryMappedTable.from_file(arrow_path).remove_column(0), ] ] ) elif axis == 1: # shape: (4, 3) = (1, 3) + (3, 3) concatenation_table = ConcatenationTable.from_blocks( [ [InMemoryTable.from_pydict(dataset_dict).slice(0, 1)], [MemoryMappedTable.from_file(arrow_path).slice(1, 4)], ] ) return concatenation_table concatenation_table = _create_concatenation_table(axis) assert concatenation_table.shape == (4, 3) if other_dataset_type == "in_memory": other_table = InMemoryTable.from_pydict(dataset_dict) elif other_dataset_type == "memory_mapped": other_table = MemoryMappedTable.from_file(arrow_path) elif other_dataset_type == "concatenation": other_table = _create_concatenation_table(axis) assert other_table.shape == (4, 3) tables = [concatenation_table, other_table] if axis == 1: # don't duplicate columns tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names]) for tables in [tables, reversed(tables)]: datasets = [Dataset(table) for table in tables] dataset = concatenate_datasets(datasets, axis=axis) assert dataset.shape == expected_shape def test_concatenate_datasets_duplicate_columns(dataset): with pytest.raises(ValueError) as excinfo: concatenate_datasets([dataset, dataset], axis=1) assert "duplicated" in str(excinfo.value) def test_interleave_datasets(): d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets([d1, d2, d3]) expected_length = 3 * min(len(d1), len(d2), len(d3)) expected_values = [x["a"] for x in itertools.chain(*zip(d1, d2, d3))] assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert dataset._fingerprint == interleave_datasets([d1, d2, d3])._fingerprint def test_interleave_datasets_probabilities(): seed = 42 probabilities = [0.3, 0.5, 0.2] d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed) expected_length = 7 # hardcoded expected_values = [10, 11, 20, 12, 0, 21, 13] # hardcoded assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert ( dataset._fingerprint == interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)._fingerprint ) def test_interleave_datasets_oversampling_strategy(): d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") expected_length = 3 * max(len(d1), len(d2), len(d3)) expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20] # hardcoded assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert dataset._fingerprint == interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")._fingerprint def test_interleave_datasets_probabilities_oversampling_strategy(): seed = 42 probabilities = [0.3, 0.5, 0.2] d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets( [d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed ) expected_length = 16 # hardcoded expected_values = [10, 11, 20, 12, 0, 21, 13, 10, 1, 11, 12, 22, 13, 20, 10, 2] # hardcoded assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert ( dataset._fingerprint == interleave_datasets( [d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed )._fingerprint ) @pytest.mark.parametrize("batch_size", [4, 5]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_dataset_iter_batch(batch_size, drop_last_batch): n = 25 dset = Dataset.from_dict({"i": list(range(n))}) all_col_values = list(range(n)) batches = [] for i, batch in enumerate(dset.iter(batch_size, drop_last_batch=drop_last_batch)): assert batch == {"i": all_col_values[i * batch_size : (i + 1) * batch_size]} batches.append(batch) if drop_last_batch: assert all(len(batch["i"]) == batch_size for batch in batches) else: assert all(len(batch["i"]) == batch_size for batch in batches[:-1]) assert len(batches[-1]["i"]) <= batch_size @pytest.mark.parametrize( "column, expected_dtype", [(["a", "b", "c", "d"], "string"), ([1, 2, 3, 4], "int64"), ([1.0, 2.0, 3.0, 4.0], "float64")], ) @pytest.mark.parametrize("in_memory", [False, True]) @pytest.mark.parametrize( "transform", [ None, ("shuffle", (42,), {}), ("with_format", ("pandas",), {}), ("class_encode_column", ("col_2",), {}), ("select", (range(3),), {}), ], ) def test_dataset_add_column(column, expected_dtype, in_memory, transform, dataset_dict, arrow_path): column_name = "col_4" original_dataset = ( Dataset(InMemoryTable.from_pydict(dataset_dict)) if in_memory else Dataset(MemoryMappedTable.from_file(arrow_path)) ) if transform is not None: transform_name, args, kwargs = transform original_dataset: Dataset = getattr(original_dataset, transform_name)(*args, **kwargs) column = column[:3] if transform is not None and transform_name == "select" else column dataset = original_dataset.add_column(column_name, column) assert dataset.data.shape == (3, 4) if transform is not None and transform_name == "select" else (4, 4) expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} # Sort expected features as in the original dataset expected_features = {feature: expected_features[feature] for feature in original_dataset.features} # Add new column feature expected_features[column_name] = expected_dtype assert dataset.data.column_names == list(expected_features.keys()) for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one assert dataset.format["type"] == original_dataset.format["type"] assert dataset._fingerprint != original_dataset._fingerprint dataset.reset_format() original_dataset.reset_format() assert all(dataset[col] == original_dataset[col] for col in original_dataset.column_names) assert set(dataset["col_4"]) == set(column) if dataset._indices is not None: dataset_indices = dataset._indices["indices"].to_pylist() expected_dataset_indices = original_dataset._indices["indices"].to_pylist() assert dataset_indices == expected_dataset_indices assert_arrow_metadata_are_synced_with_dataset_features(dataset) @pytest.mark.parametrize( "transform", [None, ("shuffle", (42,), {}), ("with_format", ("pandas",), {}), ("class_encode_column", ("col_2",), {})], ) @pytest.mark.parametrize("in_memory", [False, True]) @pytest.mark.parametrize( "item", [ {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "2", "col_2": "2", "col_3": "2"}, {"col_1": 2, "col_2": 2, "col_3": 2}, {"col_1": 2.0, "col_2": 2.0, "col_3": 2.0}, ], ) def test_dataset_add_item(item, in_memory, dataset_dict, arrow_path, transform): dataset_to_test = ( Dataset(InMemoryTable.from_pydict(dataset_dict)) if in_memory else Dataset(MemoryMappedTable.from_file(arrow_path)) ) if transform is not None: transform_name, args, kwargs = transform dataset_to_test: Dataset = getattr(dataset_to_test, transform_name)(*args, **kwargs) dataset = dataset_to_test.add_item(item) assert dataset.data.shape == (5, 3) expected_features = dataset_to_test.features assert sorted(dataset.data.column_names) == sorted(expected_features.keys()) for feature, expected_dtype in expected_features.items(): assert dataset.features[feature] == expected_dtype assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one assert dataset.format["type"] == dataset_to_test.format["type"] assert dataset._fingerprint != dataset_to_test._fingerprint dataset.reset_format() dataset_to_test.reset_format() assert dataset[:-1] == dataset_to_test[:] assert {k: int(v) for k, v in dataset[-1].items()} == {k: int(v) for k, v in item.items()} if dataset._indices is not None: dataset_indices = dataset._indices["indices"].to_pylist() dataset_to_test_indices = dataset_to_test._indices["indices"].to_pylist() assert dataset_indices == dataset_to_test_indices + [len(dataset_to_test._data)] def test_dataset_add_item_new_columns(): dataset = Dataset.from_dict({"col_1": [0, 1, 2]}, features=Features({"col_1": Value("uint8")})) dataset = dataset.add_item({"col_1": 3, "col_2": "a"}) assert dataset.data.shape == (4, 2) assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string")}) assert dataset[:] == {"col_1": [0, 1, 2, 3], "col_2": [None, None, None, "a"]} dataset = dataset.add_item({"col_3": True}) assert dataset.data.shape == (5, 3) assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string"), "col_3": Value("bool")}) assert dataset[:] == { "col_1": [0, 1, 2, 3, None], "col_2": [None, None, None, "a", None], "col_3": [None, None, None, None, True], } def test_dataset_add_item_introduce_feature_type(): dataset = Dataset.from_dict({"col_1": [None, None, None]}) dataset = dataset.add_item({"col_1": "a"}) assert dataset.data.shape == (4, 1) assert dataset.features == Features({"col_1": Value("string")}) assert dataset[:] == {"col_1": [None, None, None, "a"]} def test_dataset_filter_batched_indices(): ds = Dataset.from_dict({"num": [0, 1, 2, 3]}) ds = ds.filter(lambda num: num % 2 == 0, input_columns="num", batch_size=2) assert all(item["num"] % 2 == 0 for item in ds) @pytest.mark.parametrize("in_memory", [False, True]) def test_dataset_from_file(in_memory, dataset, arrow_file): filename = arrow_file with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset_from_file = Dataset.from_file(filename, in_memory=in_memory) assert dataset_from_file.features.type == dataset.features.type assert dataset_from_file.features == dataset.features assert dataset_from_file.cache_files == ([{"filename": filename}] if not in_memory else []) def _check_csv_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_csv_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_csv_features(features, csv_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_csv(csv_path, features=features, cache_dir=cache_dir) _check_csv_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_csv_split(split, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, split=split) _check_csv_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path): if issubclass(path_type, str): path = csv_path elif issubclass(path_type, list): path = [csv_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_csv(path, cache_dir=cache_dir) _check_csv_dataset(dataset, expected_features) def _check_json_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_json(jsonl_path, features=features, cache_dir=cache_dir) _check_json_dataset(dataset, expected_features) def test_dataset_from_json_with_class_label_feature(jsonl_str_path, tmp_path): features = Features( { "col_1": ClassLabel(names=["s0", "s1", "s2", "s3"]), "col_2": Value("int64"), "col_3": Value("float64"), } ) cache_dir = tmp_path / "cache" dataset = Dataset.from_json(jsonl_str_path, features=features, cache_dir=cache_dir) assert dataset.features["col_1"].dtype == "int64" @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_json_split(split, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, split=split) _check_json_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path): if issubclass(path_type, str): path = jsonl_path elif issubclass(path_type, list): path = [jsonl_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_json(path, cache_dir=cache_dir) _check_json_dataset(dataset, expected_features) def _check_parquet_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_parquet_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_parquet_features(features, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_parquet(parquet_path, features=features, cache_dir=cache_dir) _check_parquet_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_parquet_split(split, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, split=split) _check_parquet_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path): if issubclass(path_type, str): path = parquet_path elif issubclass(path_type, list): path = [parquet_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_parquet(path, cache_dir=cache_dir) _check_parquet_dataset(dataset, expected_features) def _check_text_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_text(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_text_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ], ) def test_dataset_from_text_features(features, text_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"text": "string"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_text(text_path, features=features, cache_dir=cache_dir) _check_text_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_text_split(split, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = Dataset.from_text(text_path, cache_dir=cache_dir, split=split) _check_text_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_text_path_type(path_type, text_path, tmp_path): if issubclass(path_type, str): path = text_path elif issubclass(path_type, list): path = [text_path] cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = Dataset.from_text(path, cache_dir=cache_dir) _check_text_dataset(dataset, expected_features) @pytest.fixture def data_generator(): def _gen(): data = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] for item in data: yield item return _gen def _check_generator_dataset(dataset, expected_features, split): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.split == split assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_generator_keep_in_memory(keep_in_memory, data_generator, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_generator_dataset(dataset, expected_features, NamedSplit("train")) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_generator_features(features, data_generator, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_generator(data_generator, features=features, cache_dir=cache_dir) _check_generator_dataset(dataset, expected_features, NamedSplit("train")) @pytest.mark.parametrize( "split", [None, NamedSplit("train"), "train", NamedSplit("foo"), "foo"], ) def test_dataset_from_generator_split(split, data_generator, tmp_path): cache_dir = tmp_path / "cache" default_expected_split = "train" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_split = split if split else default_expected_split if split: dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, split=split) else: dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir) _check_generator_dataset(dataset, expected_features, expected_split) @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark(): import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [ ("0", 0, 0.0), ("1", 1, 1.0), ("2", 2, 2.0), ("3", 3, 3.0), ] df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float") dataset = Dataset.from_spark(df) assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_features(): import PIL.Image import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())] df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>") features = Features({"idx": Value("int64"), "image": Image()}) dataset = Dataset.from_spark( df, features=features, ) assert isinstance(dataset, Dataset) assert dataset.num_rows == 1 assert dataset.num_columns == 2 assert dataset.column_names == ["idx", "image"] assert isinstance(dataset[0]["image"], PIL.Image.Image) assert dataset.features == features assert_arrow_metadata_are_synced_with_dataset_features(dataset) @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_different_cache(): import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.createDataFrame([("0", 0)], "col_1: string, col_2: int") dataset = Dataset.from_spark(df) assert isinstance(dataset, Dataset) different_df = spark.createDataFrame([("1", 1)], "col_1: string, col_2: int") different_dataset = Dataset.from_spark(different_df) assert isinstance(different_dataset, Dataset) assert dataset[0]["col_1"] == "0" # Check to make sure that the second dataset wasn't read from the cache. assert different_dataset[0]["col_1"] == "1" def _check_sql_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("con_type", ["string", "engine"]) def test_dataset_from_sql_con_type(con_type, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning, caplog): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} if con_type == "string": con = "sqlite:///" + sqlite_path elif con_type == "engine": import sqlalchemy con = sqlalchemy.create_engine("sqlite:///" + sqlite_path) with caplog.at_level(INFO, logger=get_logger().name): dataset = Dataset.from_sql( "dataset", con, cache_dir=cache_dir, ) if con_type == "string": assert "couldn't be hashed properly" not in caplog.text elif con_type == "engine": assert "couldn't be hashed properly" in caplog.text dataset = Dataset.from_sql( "dataset", con, cache_dir=cache_dir, ) _check_sql_dataset(dataset, expected_features) @require_sqlalchemy @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_sql("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir) _check_sql_dataset(dataset, expected_features) @require_sqlalchemy @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_sql( "dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory ) _check_sql_dataset(dataset, expected_features) def test_dataset_to_json(dataset, tmp_path): file_path = tmp_path / "test_path.jsonl" bytes_written = dataset.to_json(path_or_buf=file_path) assert file_path.is_file() assert bytes_written == file_path.stat().st_size df = pd.read_json(file_path, orient="records", lines=True) assert df.shape == dataset.shape assert list(df.columns) == list(dataset.column_names) @pytest.mark.parametrize("in_memory", [False, True]) @pytest.mark.parametrize( "method_and_params", [ ("rename_column", (), {"original_column_name": "labels", "new_column_name": "label"}), ("remove_columns", (), {"column_names": "labels"}), ( "cast", (), { "features": Features( { "tokens": List(Value("string")), "labels": List(Value("int16")), "answers": { "text": List(Value("string")), "answer_start": List(Value("int32")), }, "id": Value("int32"), } ) }, ), ("flatten", (), {}), ], ) def test_pickle_dataset_after_transforming_the_table(in_memory, method_and_params, arrow_file): method, args, kwargs = method_and_params with ( Dataset.from_file(arrow_file, in_memory=in_memory) as dataset, Dataset.from_file(arrow_file, in_memory=in_memory) as reference_dataset, ): out = getattr(dataset, method)(*args, **kwargs) dataset = out if out is not None else dataset pickled_dataset = pickle.dumps(dataset) reloaded_dataset = pickle.loads(pickled_dataset) assert dataset._data != reference_dataset._data assert dataset._data.table == reloaded_dataset._data.table def test_dummy_dataset_serialize_fs(dataset, mockfs): dataset_path = "mock://my_dataset" dataset.save_to_disk(dataset_path, storage_options=mockfs.storage_options) assert mockfs.isdir(dataset_path) assert mockfs.glob(dataset_path + "/*") reloaded = Dataset.load_from_disk(dataset_path, storage_options=mockfs.storage_options) assert len(reloaded) == len(dataset) assert reloaded.features == dataset.features assert reloaded.to_dict() == dataset.to_dict() @pytest.mark.parametrize( "uri_or_path", [ "relative/path", "/absolute/path", "hf://bucket/relative/path", "hdfs://relative/path", "hdfs:///absolute/path", ], ) def test_build_local_temp_path(uri_or_path): extracted_path = strip_protocol(uri_or_path) local_temp_path = Dataset._build_local_temp_path(extracted_path).as_posix() extracted_path_without_anchor = Path(extracted_path).relative_to(Path(extracted_path).anchor).as_posix() # Check that the local temp path is relative to the system temp dir path_relative_to_tmp_dir = Path(local_temp_path).relative_to(Path(tempfile.gettempdir())).as_posix() assert ( "hdfs://" not in path_relative_to_tmp_dir and "hf://" not in path_relative_to_tmp_dir and not local_temp_path.startswith(extracted_path_without_anchor) and local_temp_path.endswith(extracted_path_without_anchor) ), f"Local temp path: {local_temp_path}" class StratifiedTest(TestCase): def test_errors_train_test_split_stratify(self): ys = [ np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]), np.array([0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), ] for i in range(len(ys)): features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(ys[i])))}) data = {"text": np.ones(len(ys[i])), "label": ys[i]} d1 = Dataset.from_dict(data, features=features) # For checking stratify_by_column exist as key in self.features.keys() if i == 0: self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="labl") # For checking minimum class count error elif i == 1: self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label") # For check typeof label as ClassLabel type elif i == 2: d1 = Dataset.from_dict(data) self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label") # For checking test_size should be greater than or equal to number of classes elif i == 3: self.assertRaises(ValueError, d1.train_test_split, 0.30, stratify_by_column="label") # For checking train_size should be greater than or equal to number of classes elif i == 4: self.assertRaises(ValueError, d1.train_test_split, 0.60, stratify_by_column="label") def test_train_test_split_startify(self): ys = [ np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3]), np.array([0] * 800 + [1] * 50), ] for y in ys: features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(y)))}) data = {"text": np.ones(len(y)), "label": y} d1 = Dataset.from_dict(data, features=features) d1 = d1.train_test_split(test_size=0.33, stratify_by_column="label") y = np.asanyarray(y) # To make it indexable for y[train] test_size = np.ceil(0.33 * len(y)) train_size = len(y) - test_size npt.assert_array_equal(np.unique(d1["train"]["label"]), np.unique(d1["test"]["label"])) # checking classes proportion p_train = np.bincount(np.unique(d1["train"]["label"], return_inverse=True)[1]) / float( len(d1["train"]["label"]) ) p_test = np.bincount(np.unique(d1["test"]["label"], return_inverse=True)[1]) / float( len(d1["test"]["label"]) ) npt.assert_array_almost_equal(p_train, p_test, 1) assert len(d1["train"]["text"]) + len(d1["test"]["text"]) == y.size assert len(d1["train"]["text"]) == train_size assert len(d1["test"]["text"]) == test_size def test_dataset_estimate_nbytes(): ds = Dataset.from_dict({"a": ["0" * 100] * 100}) assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than full dataset size" ds = Dataset.from_dict({"a": ["0" * 100] * 100}).select([0]) assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk" ds = Dataset.from_dict({"a": ["0" * 100] * 100}) ds = concatenate_datasets([ds] * 100) assert 0.9 * ds._estimate_nbytes() < 100 * 100 * 100, "must be smaller than full dataset size" assert 1.1 * ds._estimate_nbytes() > 100 * 100 * 100, "must be bigger than full dataset size" ds = Dataset.from_dict({"a": ["0" * 100] * 100}) ds = concatenate_datasets([ds] * 100).select([0]) assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk" def test_dataset_to_iterable_dataset(dataset: Dataset): iterable_dataset = dataset.to_iterable_dataset() assert isinstance(iterable_dataset, IterableDataset) assert list(iterable_dataset) == list(dataset) assert iterable_dataset.features == dataset.features iterable_dataset = dataset.to_iterable_dataset(num_shards=3) assert isinstance(iterable_dataset, IterableDataset) assert list(iterable_dataset) == list(dataset) assert iterable_dataset.features == dataset.features assert iterable_dataset.num_shards == 3 with pytest.raises(ValueError): dataset.to_iterable_dataset(num_shards=len(dataset) + 1) assert dataset.with_format("torch").to_iterable_dataset()._formatting.format_type == "torch" with pytest.raises(NotImplementedError): dataset.with_format("torch", columns=[dataset.column_names[0]]).to_iterable_dataset() @require_pil def test_dataset_format_with_unformatted_image(): import PIL ds = Dataset.from_dict( {"a": [np.arange(4 * 4 * 3).reshape(4, 4, 3)] * 10, "b": [[0, 1]] * 10}, Features({"a": Image(), "b": List(Value("int64"))}), ) ds.set_format("np", columns=["b"], output_all_columns=True) assert isinstance(ds[0]["a"], PIL.Image.Image) assert isinstance(ds[0]["b"], np.ndarray) @pytest.mark.parametrize("batch_size", [1, 4]) @require_torch def test_dataset_with_torch_dataloader(dataset, batch_size): from torch.utils.data import DataLoader from datasets import config dataloader = DataLoader(dataset, batch_size=batch_size) with patch.object(dataset, "_getitem", wraps=dataset._getitem) as mock_getitem: out = list(dataloader) getitem_call_count = mock_getitem.call_count assert len(out) == len(dataset) // batch_size + int(len(dataset) % batch_size > 0) # calling dataset[list_of_indices] is much more efficient than [dataset[idx] for idx in list of indices] if config.TORCH_VERSION >= version.parse("1.13.0"): assert getitem_call_count == len(dataset) // batch_size + int(len(dataset) % batch_size > 0) @pytest.mark.parametrize("return_lazy_dict", [True, False, "mix"]) def test_map_cases(return_lazy_dict): def f(x): """May return a mix of LazyDict and regular Dict""" if x["a"] < 2: x["a"] = -1 return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [-1, -1, 2, 3]} def f(x): """May return a mix of LazyDict and regular Dict, but sometimes with None values""" if x["a"] < 2: x["a"] = None return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [None, None, 2, 3]} def f(x): """Return a LazyDict, but we remove a lazy column and add a new one""" if x["a"] < 2: x["b"] = -1 return x else: x["b"] = x["a"] return x ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f, remove_columns=["a"]) outputs = ds[:] assert outputs == {"b": [-1, -1, 2, 3]} # The formatted dataset version removes the lazy column from a different dictionary, hence it should be preserved in the output ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.with_format("numpy") ds = ds.map(f, remove_columns=["a"]) ds = ds.with_format(None) outputs = ds[:] assert outputs == {"a": [0, 1, 2, 3], "b": [-1, -1, 2, 3]} def f(x): """May return a mix of LazyDict and regular Dict, but we replace a lazy column""" if x["a"] < 2: x["a"] = -1 return dict(x) if return_lazy_dict is False else x else: x["a"] = x["a"] return x if return_lazy_dict is True else {"a": x["a"]} ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f, remove_columns=["a"]) outputs = ds[:] assert outputs == ({"a": [-1, -1, 2, 3]} if return_lazy_dict is False else {}) def f(x): """May return a mix of LazyDict and regular Dict, but we modify a nested lazy column in-place""" if x["a"]["b"] < 2: x["a"]["c"] = -1 return dict(x) if return_lazy_dict is False else x else: x["a"]["c"] = x["a"]["b"] return x if return_lazy_dict is True else {} ds = Dataset.from_dict({"a": [{"b": 0}, {"b": 1}, {"b": 2}, {"b": 3}]}) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [{"b": 0, "c": -1}, {"b": 1, "c": -1}, {"b": 2, "c": 2}, {"b": 3, "c": 3}]} def f(x): """May return a mix of LazyDict and regular Dict, but using an extension type""" if x["a"][0][0] < 2: x["a"] = [[-1]] return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} features = Features({"a": Array2D(shape=(1, 1), dtype="int32")}) ds = Dataset.from_dict({"a": [[[i]] for i in [0, 1, 2, 3]]}, features=features) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [[[i]] for i in [-1, -1, 2, 3]]} def f(x): """May return a mix of LazyDict and regular Dict, but using a nested extension type""" if x["a"]["nested"][0][0] < 2: x["a"] = {"nested": [[-1]]} return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} features = Features({"a": {"nested": Array2D(shape=(1, 1), dtype="int64")}}) ds = Dataset.from_dict({"a": [{"nested": [[i]]} for i in [0, 1, 2, 3]]}, features=features) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [{"nested": [[i]]} for i in [-1, -1, 2, 3]]} def test_map_async(): dset = Dataset.from_dict({"x": range(100)}) async def f(example): await asyncio.sleep(0.1) return {"y": 1} _start = time.time() out = dset.map(f) assert time.time() - _start < 2.0 assert out[0]["y"] == 1 async def f(batch): await asyncio.sleep(0.1) return {"y": [1] * len(batch["x"])} _start = time.time() out = dset.map(f, batched=True) assert time.time() - _start < 2.0 assert out[0]["y"] == 1 def test_filter_async(): dset = Dataset.from_dict({"x": range(100)}) async def f(example): await asyncio.sleep(0.1) return example["x"] == 42 _start = time.time() out = dset.filter(f) assert time.time() - _start < 2.0 assert len(out) == 1 async def f(batch): await asyncio.sleep(0.1) return [x == 42 for x in batch["x"]] _start = time.time() out = dset.filter(f, batched=True) assert time.time() - _start < 2.0 assert len(out) == 1 def test_dataset_getitem_int_np_equivalence(): ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) assert ds[1] == ds[np.int64(1)] def test_dataset_getitem_raises(): ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) with pytest.raises(TypeError): ds[False] with pytest.raises(TypeError): ds._getitem(True) with pytest.raises(TypeError): ds[np.bool_(True)] with pytest.raises(TypeError): ds[1.0] def test_categorical_dataset(tmpdir): n_legs = pa.array([2, 4, 5, 100]) animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]).cast( pa.dictionary(pa.int32(), pa.string()) ) names = ["n_legs", "animals"] table = pa.Table.from_arrays([n_legs, animals], names=names) table_path = str(tmpdir / "data.parquet") pa.parquet.write_table(table, table_path) dataset = Dataset.from_parquet(table_path) entry = dataset[0] # Categorical types get transparently converted to string assert entry["animals"] == "Flamingo" def test_dataset_batch(): # Create a simple Dataset data = {"id": list(range(10)), "text": [f"Text {i}" for i in range(10)]} ds = Dataset.from_dict(data) # Test with batch_size=3, drop_last_batch=False batched_ds = ds.batch(batch_size=3, drop_last_batch=False) batches = list(batched_ds) assert len(batches) == 4 # 3 full batches and 1 partial batch for i, batch in enumerate(batches[:3]): # Check full batches assert len(batch["id"]) == 3 assert len(batch["text"]) == 3 assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2] assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"] # Check last partial batch assert len(batches[3]["id"]) == 1 assert len(batches[3]["text"]) == 1 assert batches[3]["id"] == [9] assert batches[3]["text"] == ["Text 9"] # Test with batch_size=3, drop_last_batch=True batched_ds = ds.batch(batch_size=3, drop_last_batch=True) batches = list(batched_ds) assert len(batches) == 3 # Only full batches for i, batch in enumerate(batches): assert len(batch["id"]) == 3 assert len(batch["text"]) == 3 assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2] assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"] # Test with batch_size=4 (doesn't evenly divide dataset size) batched_ds = ds.batch(batch_size=4, drop_last_batch=False) batches = list(batched_ds) assert len(batches) == 3 # 2 full batches and 1 partial batch for i, batch in enumerate(batches[:2]): # Check full batches assert len(batch["id"]) == 4 assert len(batch["text"]) == 4 assert batch["id"] == [4 * i, 4 * i + 1, 4 * i + 2, 4 * i + 3] assert batch["text"] == [f"Text {4 * i}", f"Text {4 * i + 1}", f"Text {4 * i + 2}", f"Text {4 * i + 3}"] # Check last partial batch assert len(batches[2]["id"]) == 2 assert len(batches[2]["text"]) == 2 assert batches[2]["id"] == [8, 9] assert batches[2]["text"] == ["Text 8", "Text 9"] def test_dataset_from_dict_with_large_list(): data = {"col_1": [[1, 2], [3, 4]]} features = Features({"col_1": LargeList(Value("int64"))}) ds = Dataset.from_dict(data, features=features) assert isinstance(ds, Dataset) assert pa.types.is_large_list(ds.data.schema.field("col_1").type) def test_dataset_save_to_disk_with_large_list(tmp_path): data = {"col_1": [[1, 2], [3, 4]]} features = Features({"col_1": LargeList(Value("int64"))}) ds = Dataset.from_dict(data, features=features) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() def test_dataset_save_to_disk_and_load_from_disk_round_trip_with_large_list(tmp_path): data = {"col_1": [[1, 2], [3, 4]]} features = Features({"col_1": LargeList(Value("int64"))}) ds = Dataset.from_dict(data, features=features) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() loaded_ds = load_from_disk(dataset_path) assert len(loaded_ds) == len(ds) assert loaded_ds.features == ds.features assert loaded_ds.to_dict() == ds.to_dict() @require_polars def test_from_polars_with_large_list(): import polars as pl df = pl.from_dict({"col_1": [[1, 2], [3, 4]]}) ds = Dataset.from_polars(df) assert isinstance(ds, Dataset) @require_polars def test_from_polars_save_to_disk_with_large_list(tmp_path): import polars as pl df = pl.from_dict({"col_1": [[1, 2], [3, 4]]}) ds = Dataset.from_polars(df) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() @require_polars def test_from_polars_save_to_disk_and_load_from_disk_round_trip_with_large_list(tmp_path): import polars as pl df = pl.from_dict({"col_1": [[1, 2], [3, 4]]}) ds = Dataset.from_polars(df) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() loaded_ds = load_from_disk(dataset_path) assert len(loaded_ds) == len(ds) assert loaded_ds.features == ds.features assert loaded_ds.to_dict() == ds.to_dict() @require_polars def test_polars_round_trip(): ds = Dataset.from_dict({"x": [[1, 2], [3, 4, 5]], "y": ["a", "b"]}) assert isinstance(Dataset.from_polars(ds.to_polars()), Dataset)
datasets/tests/test_arrow_dataset.py/0
{ "file_path": "datasets/tests/test_arrow_dataset.py", "repo_id": "datasets", "token_count": 123217 }
114
import datetime from pathlib import Path from unittest import TestCase import numpy as np import pandas as pd import pyarrow as pa import pytest from datasets import Audio, Features, Image, IterableDataset from datasets.formatting import NumpyFormatter, PandasFormatter, PythonFormatter, query_table from datasets.formatting.formatting import ( LazyBatch, LazyRow, NumpyArrowExtractor, PandasArrowExtractor, PythonArrowExtractor, ) from datasets.table import InMemoryTable from .utils import ( require_jax, require_numpy1_on_windows, require_pil, require_polars, require_sndfile, require_tf, require_torch, require_torchcodec, ) class AnyArray: def __init__(self, data) -> None: self.data = data def __array__(self) -> np.ndarray: return np.asarray(self.data) def _gen_any_arrays(): for _ in range(10): yield {"array": AnyArray(list(range(10)))} @pytest.fixture def any_arrays_dataset(): return IterableDataset.from_generator(_gen_any_arrays) _COL_A = [0, 1, 2] _COL_B = ["foo", "bar", "foobar"] _COL_C = [[[1.0, 0.0, 0.0]] * 2, [[0.0, 1.0, 0.0]] * 2, [[0.0, 0.0, 1.0]] * 2] _COL_D = [datetime.datetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)] * 3 _INDICES = [1, 0] IMAGE_PATH_1 = Path(__file__).parent / "features" / "data" / "test_image_rgb.jpg" IMAGE_PATH_2 = Path(__file__).parent / "features" / "data" / "test_image_rgba.png" AUDIO_PATH_1 = Path(__file__).parent / "features" / "data" / "test_audio_44100.wav" class ArrowExtractorTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D}) def test_python_extractor(self): pa_table = self._create_dummy_table() extractor = PythonArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0], "d": _COL_D[0]}) col = extractor.extract_column(pa_table) self.assertEqual(col, _COL_A) batch = extractor.extract_batch(pa_table) self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D}) def test_numpy_extractor(self): pa_table = self._create_dummy_table().drop(["c", "d"]) extractor = NumpyArrowExtractor() row = extractor.extract_row(pa_table) np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0]}) col = extractor.extract_column(pa_table) np.testing.assert_equal(col, np.array(_COL_A)) batch = extractor.extract_batch(pa_table) np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B)}) def test_numpy_extractor_nested(self): pa_table = self._create_dummy_table().drop(["a", "b", "d"]) extractor = NumpyArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row["c"][0].dtype, np.float64) self.assertEqual(row["c"].dtype, object) col = extractor.extract_column(pa_table) self.assertEqual(col[0][0].dtype, np.float64) self.assertEqual(col[0].dtype, object) self.assertEqual(col.dtype, object) batch = extractor.extract_batch(pa_table) self.assertEqual(batch["c"][0][0].dtype, np.float64) self.assertEqual(batch["c"][0].dtype, object) self.assertEqual(batch["c"].dtype, object) def test_numpy_extractor_temporal(self): pa_table = self._create_dummy_table().drop(["a", "b", "c"]) extractor = NumpyArrowExtractor() row = extractor.extract_row(pa_table) self.assertTrue(np.issubdtype(row["d"].dtype, np.datetime64)) col = extractor.extract_column(pa_table) self.assertTrue(np.issubdtype(col[0].dtype, np.datetime64)) self.assertTrue(np.issubdtype(col.dtype, np.datetime64)) batch = extractor.extract_batch(pa_table) self.assertTrue(np.issubdtype(batch["d"][0].dtype, np.datetime64)) self.assertTrue(np.issubdtype(batch["d"].dtype, np.datetime64)) def test_pandas_extractor(self): pa_table = self._create_dummy_table() extractor = PandasArrowExtractor() row = extractor.extract_row(pa_table) self.assertIsInstance(row, pd.DataFrame) pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1]) pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1]) col = extractor.extract_column(pa_table) pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a")) batch = extractor.extract_batch(pa_table) self.assertIsInstance(batch, pd.DataFrame) pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a")) pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b")) def test_pandas_extractor_nested(self): pa_table = self._create_dummy_table().drop(["a", "b", "d"]) extractor = PandasArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row["c"][0][0].dtype, np.float64) self.assertEqual(row["c"].dtype, object) col = extractor.extract_column(pa_table) self.assertEqual(col[0][0].dtype, np.float64) self.assertEqual(col[0].dtype, object) self.assertEqual(col.dtype, object) batch = extractor.extract_batch(pa_table) self.assertEqual(batch["c"][0][0].dtype, np.float64) self.assertEqual(batch["c"][0].dtype, object) self.assertEqual(batch["c"].dtype, object) def test_pandas_extractor_temporal(self): pa_table = self._create_dummy_table().drop(["a", "b", "c"]) extractor = PandasArrowExtractor() row = extractor.extract_row(pa_table) self.assertTrue(pd.api.types.is_datetime64_any_dtype(row["d"].dtype)) col = extractor.extract_column(pa_table) self.assertTrue(isinstance(col[0], datetime.datetime)) self.assertTrue(pd.api.types.is_datetime64_any_dtype(col.dtype)) batch = extractor.extract_batch(pa_table) self.assertTrue(isinstance(batch["d"][0], datetime.datetime)) self.assertTrue(pd.api.types.is_datetime64_any_dtype(batch["d"].dtype)) @require_polars def test_polars_extractor(self): import polars as pl from datasets.formatting.polars_formatter import PolarsArrowExtractor pa_table = self._create_dummy_table() extractor = PolarsArrowExtractor() row = extractor.extract_row(pa_table) self.assertIsInstance(row, pl.DataFrame) assert pl.Series.eq(row["a"], pl.Series("a", _COL_A)[:1]).all() assert pl.Series.eq(row["b"], pl.Series("b", _COL_B)[:1]).all() col = extractor.extract_column(pa_table) assert pl.Series.eq(col, pl.Series("a", _COL_A)).all() batch = extractor.extract_batch(pa_table) self.assertIsInstance(batch, pl.DataFrame) assert pl.Series.eq(batch["a"], pl.Series("a", _COL_A)).all() assert pl.Series.eq(batch["b"], pl.Series("b", _COL_B)).all() @require_polars def test_polars_nested(self): import polars as pl from datasets.formatting.polars_formatter import PolarsArrowExtractor pa_table = self._create_dummy_table().drop(["a", "b", "d"]) extractor = PolarsArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row["c"][0][0].dtype, pl.Float64) self.assertEqual(row["c"].dtype, pl.List(pl.List(pl.Float64))) col = extractor.extract_column(pa_table) self.assertEqual(col[0][0].dtype, pl.Float64) self.assertEqual(col[0].dtype, pl.List(pl.Float64)) self.assertEqual(col.dtype, pl.List(pl.List(pl.Float64))) batch = extractor.extract_batch(pa_table) self.assertEqual(batch["c"][0][0].dtype, pl.Float64) self.assertEqual(batch["c"][0].dtype, pl.List(pl.Float64)) self.assertEqual(batch["c"].dtype, pl.List(pl.List(pl.Float64))) @require_polars def test_polars_temporal(self): from datasets.formatting.polars_formatter import PolarsArrowExtractor pa_table = self._create_dummy_table().drop(["a", "b", "c"]) extractor = PolarsArrowExtractor() row = extractor.extract_row(pa_table) self.assertTrue(row["d"].dtype.is_temporal()) col = extractor.extract_column(pa_table) self.assertTrue(isinstance(col[0], datetime.datetime)) self.assertTrue(col.dtype.is_temporal()) batch = extractor.extract_batch(pa_table) self.assertTrue(isinstance(batch["d"][0], datetime.datetime)) self.assertTrue(batch["d"].dtype.is_temporal()) class LazyDictTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) def _create_dummy_formatter(self): return PythonFormatter(lazy=True) def test_lazy_dict_copy(self): pa_table = self._create_dummy_table() formatter = self._create_dummy_formatter() lazy_batch = formatter.format_batch(pa_table) lazy_batch_copy = lazy_batch.copy() self.assertEqual(type(lazy_batch), type(lazy_batch_copy)) self.assertEqual(lazy_batch.items(), lazy_batch_copy.items()) lazy_batch["d"] = [1, 2, 3] self.assertNotEqual(lazy_batch.items(), lazy_batch_copy.items()) class FormatterTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) def test_python_formatter(self): pa_table = self._create_dummy_table() formatter = PythonFormatter() row = formatter.format_row(pa_table) self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]}) col = formatter.format_column(pa_table) self.assertEqual(col, _COL_A) batch = formatter.format_batch(pa_table) self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C}) def test_python_formatter_lazy(self): pa_table = self._create_dummy_table() formatter = PythonFormatter(lazy=True) row = formatter.format_row(pa_table) self.assertIsInstance(row, LazyRow) self.assertEqual(row["a"], _COL_A[0]) self.assertEqual(row["b"], _COL_B[0]) self.assertEqual(row["c"], _COL_C[0]) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch, LazyBatch) self.assertEqual(batch["a"], _COL_A) self.assertEqual(batch["b"], _COL_B) self.assertEqual(batch["c"], _COL_C) def test_numpy_formatter(self): pa_table = self._create_dummy_table() formatter = NumpyFormatter() row = formatter.format_row(pa_table) np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])}) col = formatter.format_column(pa_table) np.testing.assert_equal(col, np.array(_COL_A)) batch = formatter.format_batch(pa_table) np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)}) assert batch["c"].shape == np.array(_COL_C).shape def test_numpy_formatter_np_array_kwargs(self): pa_table = self._create_dummy_table().drop(["b"]) formatter = NumpyFormatter(dtype=np.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, np.dtype(np.float16)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, np.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, np.dtype(np.float16)) self.assertEqual(batch["c"].dtype, np.dtype(np.float16)) @require_pil def test_numpy_formatter_image(self): # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = NumpyFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, np.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, np.uint8) self.assertEqual(col.shape, (2, 480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"].dtype, np.uint8) self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = NumpyFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, np.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertIsInstance(col, np.ndarray) self.assertEqual(col.dtype, object) self.assertEqual(col[0].dtype, np.uint8) self.assertEqual(col[0].shape, (480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], np.ndarray) self.assertEqual(batch["image"].dtype, object) self.assertEqual(batch["image"][0].dtype, np.uint8) self.assertEqual(batch["image"][0].shape, (480, 640, 3)) @require_torchcodec @require_sndfile def test_numpy_formatter_audio(self): pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = NumpyFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) self.assertEqual(row["audio"].get_all_samples().data.cpu().numpy().dtype, np.dtype(np.float32)) col = formatter.format_column(pa_table) self.assertEqual(col[0].get_all_samples().data.cpu().numpy().dtype, np.float32) batch = formatter.format_batch(pa_table) self.assertEqual(batch["audio"][0].get_all_samples().data.cpu().numpy().dtype, np.dtype(np.float32)) def test_pandas_formatter(self): pa_table = self._create_dummy_table() formatter = PandasFormatter() row = formatter.format_row(pa_table) self.assertIsInstance(row, pd.DataFrame) pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1]) pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1]) col = formatter.format_column(pa_table) pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a")) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch, pd.DataFrame) pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a")) pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b")) @require_polars def test_polars_formatter(self): import polars as pl from datasets.formatting import PolarsFormatter pa_table = self._create_dummy_table() formatter = PolarsFormatter() row = formatter.format_row(pa_table) self.assertIsInstance(row, pl.DataFrame) assert pl.Series.eq(row["a"], pl.Series("a", _COL_A)[:1]).all() assert pl.Series.eq(row["b"], pl.Series("b", _COL_B)[:1]).all() col = formatter.format_column(pa_table) assert pl.Series.eq(col, pl.Series("a", _COL_A)).all() batch = formatter.format_batch(pa_table) self.assertIsInstance(batch, pl.DataFrame) assert pl.Series.eq(batch["a"], pl.Series("a", _COL_A)).all() assert pl.Series.eq(batch["b"], pl.Series("b", _COL_B)).all() @require_numpy1_on_windows @require_torch def test_torch_formatter(self): import torch from datasets.formatting import TorchFormatter pa_table = self._create_dummy_table() formatter = TorchFormatter() row = formatter.format_row(pa_table) torch.testing.assert_close(row["a"], torch.tensor(_COL_A, dtype=torch.int64)[0]) assert row["b"] == _COL_B[0] torch.testing.assert_close(row["c"], torch.tensor(_COL_C, dtype=torch.float32)[0]) col = formatter.format_column(pa_table) torch.testing.assert_close(col, torch.tensor(_COL_A, dtype=torch.int64)) batch = formatter.format_batch(pa_table) torch.testing.assert_close(batch["a"], torch.tensor(_COL_A, dtype=torch.int64)) assert batch["b"] == _COL_B torch.testing.assert_close(batch["c"], torch.tensor(_COL_C, dtype=torch.float32)) assert batch["c"].shape == np.array(_COL_C).shape @require_numpy1_on_windows @require_torch def test_torch_formatter_torch_tensor_kwargs(self): import torch from datasets.formatting import TorchFormatter pa_table = self._create_dummy_table().drop(["b"]) formatter = TorchFormatter(dtype=torch.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, torch.float16) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, torch.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, torch.float16) self.assertEqual(batch["c"].dtype, torch.float16) @require_numpy1_on_windows @require_torch @require_pil def test_torch_formatter_image(self): import torch from datasets.formatting import TorchFormatter # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = TorchFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, torch.uint8) # torch uses CHW format contrary to numpy which uses HWC self.assertEqual(row["image"].shape, (3, 480, 640)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, torch.uint8) self.assertEqual(col.shape, (2, 3, 480, 640)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"].dtype, torch.uint8) self.assertEqual(batch["image"].shape, (2, 3, 480, 640)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = TorchFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, torch.uint8) self.assertEqual(row["image"].shape, (3, 480, 640)) col = formatter.format_column(pa_table) self.assertIsInstance(col, list) self.assertEqual(col[0].dtype, torch.uint8) self.assertEqual(col[0].shape, (3, 480, 640)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], list) self.assertEqual(batch["image"][0].dtype, torch.uint8) self.assertEqual(batch["image"][0].shape, (3, 480, 640)) @require_torch @require_torchcodec @require_sndfile def test_torch_formatter_audio(self): import torch from datasets.formatting import TorchFormatter pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = TorchFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) self.assertEqual(row["audio"].get_all_samples().data.dtype, torch.float32) col = formatter.format_column(pa_table) self.assertEqual(col[0].get_all_samples().data.dtype, torch.float32) batch = formatter.format_batch(pa_table) self.assertEqual(batch["audio"][0].get_all_samples().data.dtype, torch.float32) @require_tf def test_tf_formatter(self): import tensorflow as tf from datasets.formatting import TFFormatter pa_table = self._create_dummy_table() formatter = TFFormatter() row = formatter.format_row(pa_table) tf.debugging.assert_equal(row["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)[0]) tf.debugging.assert_equal(row["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)[0]) tf.debugging.assert_equal(row["c"], tf.convert_to_tensor(_COL_C, dtype=tf.float32)[0]) col = formatter.format_column(pa_table) tf.debugging.assert_equal(col, tf.ragged.constant(_COL_A, dtype=tf.int64)) batch = formatter.format_batch(pa_table) tf.debugging.assert_equal(batch["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)) tf.debugging.assert_equal(batch["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)) self.assertIsInstance(batch["c"], tf.Tensor) self.assertEqual(batch["c"].dtype, tf.float32) tf.debugging.assert_equal( batch["c"].shape.as_list(), tf.convert_to_tensor(_COL_C, dtype=tf.float32).shape.as_list() ) tf.debugging.assert_equal(tf.convert_to_tensor(batch["c"]), tf.convert_to_tensor(_COL_C, dtype=tf.float32)) @require_tf def test_tf_formatter_tf_tensor_kwargs(self): import tensorflow as tf from datasets.formatting import TFFormatter pa_table = self._create_dummy_table().drop(["b"]) formatter = TFFormatter(dtype=tf.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, tf.float16) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, tf.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, tf.float16) self.assertEqual(batch["c"].dtype, tf.float16) @require_tf @require_pil def test_tf_formatter_image(self): import tensorflow as tf from datasets.formatting import TFFormatter # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = TFFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, tf.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, tf.uint8) self.assertEqual(col.shape, (2, 480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"][0].dtype, tf.uint8) self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = TFFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, tf.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertIsInstance(col, list) self.assertEqual(col[0].dtype, tf.uint8) self.assertEqual(col[0].shape, (480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], list) self.assertEqual(batch["image"][0].dtype, tf.uint8) self.assertEqual(batch["image"][0].shape, (480, 640, 3)) @require_tf @require_sndfile def test_tf_formatter_audio(self): import tensorflow as tf from datasets.formatting import TFFormatter pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = TFFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) tf_row = tf.convert_to_tensor(row["audio"].get_all_samples().data.cpu().numpy()) self.assertEqual(tf_row.dtype, tf.float32) col = formatter.format_column(pa_table) tf_col_0 = tf.convert_to_tensor(col[0].get_all_samples().data.cpu().numpy()) self.assertEqual(tf_col_0.dtype, tf.float32) batch = formatter.format_batch(pa_table) tf_batch_0 = tf.convert_to_tensor(batch["audio"][0].get_all_samples().data.cpu().numpy()) self.assertEqual(tf_batch_0.dtype, tf.float32) @require_jax def test_jax_formatter(self): import jax import jax.numpy as jnp from datasets.formatting import JaxFormatter pa_table = self._create_dummy_table() formatter = JaxFormatter() row = formatter.format_row(pa_table) jnp.allclose(row["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)[0]) assert row["b"] == _COL_B[0] jnp.allclose(row["c"], jnp.array(_COL_C, dtype=jnp.float32)[0]) col = formatter.format_column(pa_table) jnp.allclose(col, jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)) batch = formatter.format_batch(pa_table) jnp.allclose(batch["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)) assert batch["b"] == _COL_B jnp.allclose(batch["c"], jnp.array(_COL_C, dtype=jnp.float32)) assert batch["c"].shape == np.array(_COL_C).shape @require_jax def test_jax_formatter_jnp_array_kwargs(self): import jax.numpy as jnp from datasets.formatting import JaxFormatter pa_table = self._create_dummy_table().drop(["b"]) formatter = JaxFormatter(dtype=jnp.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, jnp.float16) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, jnp.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, jnp.float16) self.assertEqual(batch["c"].dtype, jnp.float16) @require_jax @require_pil def test_jax_formatter_image(self): import jax.numpy as jnp from datasets.formatting import JaxFormatter # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = JaxFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, jnp.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, jnp.uint8) self.assertEqual(col.shape, (2, 480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"].dtype, jnp.uint8) self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = JaxFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, jnp.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertIsInstance(col, list) self.assertEqual(col[0].dtype, jnp.uint8) self.assertEqual(col[0].shape, (480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], list) self.assertEqual(batch["image"][0].dtype, jnp.uint8) self.assertEqual(batch["image"][0].shape, (480, 640, 3)) @require_jax @require_torchcodec @require_sndfile def test_jax_formatter_audio(self): import jax.numpy as jnp from datasets.formatting import JaxFormatter pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = JaxFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) self.assertEqual(row["audio"]["array"].dtype, jnp.float32) col = formatter.format_column(pa_table) self.assertEqual(col[0]["array"].dtype, jnp.float32) batch = formatter.format_batch(pa_table) self.assertEqual(batch["audio"][0]["array"].dtype, jnp.float32) @require_jax def test_jax_formatter_device(self): import jax from datasets.formatting import JaxFormatter pa_table = self._create_dummy_table() device = jax.devices()[0] formatter = JaxFormatter(device=str(device)) row = formatter.format_row(pa_table) assert row["a"].devices().pop() == device assert row["c"].devices().pop() == device col = formatter.format_column(pa_table) assert col.devices().pop() == device batch = formatter.format_batch(pa_table) assert batch["a"].devices().pop() == device assert batch["c"].devices().pop() == device class QueryTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) def _create_dummy_arrow_indices(self): return pa.Table.from_arrays([pa.array(_INDICES, type=pa.uint64())], names=["indices"]) def assertTableEqual(self, first: pa.Table, second: pa.Table): self.assertEqual(first.schema, second.schema) for first_array, second_array in zip(first, second): self.assertEqual(first_array, second_array) self.assertEqual(first, second) def test_query_table_int(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows # classical usage subtable = query_table(table, 0) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) subtable = query_table(table, 1) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) subtable = query_table(table, -1) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) # raise an IndexError with self.assertRaises(IndexError): query_table(table, n) with self.assertRaises(IndexError): query_table(table, -(n + 1)) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, 0, indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, len(indices), indices=indices) def test_query_table_slice(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows # classical usage subtable = query_table(table, slice(0, 1)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) subtable = query_table(table, slice(1, 2)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) subtable = query_table(table, slice(-2, -1)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": _COL_A[-2:-1], "b": _COL_B[-2:-1], "c": _COL_C[-2:-1]}) ) # usage with None subtable = query_table(table, slice(-1, None)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) subtable = query_table(table, slice(None, n + 1)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": _COL_A[: n + 1], "b": _COL_B[: n + 1], "c": _COL_C[: n + 1]}) ) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})) subtable = query_table(table, slice(-(n + 1), None)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": _COL_A[-(n + 1) :], "b": _COL_B[-(n + 1) :], "c": _COL_C[-(n + 1) :]}) ) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})) # usage with step subtable = query_table(table, slice(None, None, 2)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[::2], "b": _COL_B[::2], "c": _COL_C[::2]})) # empty ouput but no errors subtable = query_table(table, slice(-1, 0)) # usage with both negative and positive idx assert len(_COL_A[-1:0]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) subtable = query_table(table, slice(2, 1)) assert len(_COL_A[2:1]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) subtable = query_table(table, slice(n, n)) assert len(_COL_A[n:n]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) subtable = query_table(table, slice(n, n + 1)) assert len(_COL_A[n : n + 1]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) # it's not possible to get an error with a slice # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, slice(0, 1), indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) subtable = query_table(table, slice(n - 1, n), indices=indices) assert len(indices.column(0).to_pylist()[n - 1 : n]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) def test_query_table_range(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C) # classical usage subtable = query_table(table, range(0, 1)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(0, 1)], "b": np_B[range(0, 1)], "c": np_C[range(0, 1)].tolist()}), ) subtable = query_table(table, range(1, 2)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(1, 2)], "b": np_B[range(1, 2)], "c": np_C[range(1, 2)].tolist()}), ) subtable = query_table(table, range(-2, -1)) self.assertTableEqual( subtable, pa.Table.from_pydict( {"a": np_A[range(-2, -1)], "b": np_B[range(-2, -1)], "c": np_C[range(-2, -1)].tolist()} ), ) # usage with both negative and positive idx subtable = query_table(table, range(-1, 0)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(-1, 0)], "b": np_B[range(-1, 0)], "c": np_C[range(-1, 0)].tolist()}), ) subtable = query_table(table, range(-1, n)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(-1, n)], "b": np_B[range(-1, n)], "c": np_C[range(-1, n)].tolist()}), ) # usage with step subtable = query_table(table, range(0, n, 2)) self.assertTableEqual( subtable, pa.Table.from_pydict( {"a": np_A[range(0, n, 2)], "b": np_B[range(0, n, 2)], "c": np_C[range(0, n, 2)].tolist()} ), ) subtable = query_table(table, range(0, n + 1, 2 * n)) self.assertTableEqual( subtable, pa.Table.from_pydict( { "a": np_A[range(0, n + 1, 2 * n)], "b": np_B[range(0, n + 1, 2 * n)], "c": np_C[range(0, n + 1, 2 * n)].tolist(), } ), ) # empty ouput but no errors subtable = query_table(table, range(2, 1)) assert len(np_A[range(2, 1)]) == 0 self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) subtable = query_table(table, range(n, n)) assert len(np_A[range(n, n)]) == 0 self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) # raise an IndexError with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[range(0, n + 1)] query_table(table, range(0, n + 1)) with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[range(-(n + 1), -1)] query_table(table, range(-(n + 1), -1)) with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[range(n, n + 1)] query_table(table, range(n, n + 1)) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, range(0, 1), indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, range(len(indices), len(indices) + 1), indices=indices) def test_query_table_str(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) subtable = query_table(table, "a") self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A})) with self.assertRaises(KeyError): query_table(table, "z") indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, "a", indices=indices) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": [_COL_A[i] for i in _INDICES]})) def test_query_table_iterable(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C) # classical usage subtable = query_table(table, [0]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[0]], "b": np_B[[0]], "c": np_C[[0]].tolist()}) ) subtable = query_table(table, [1]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[1]], "b": np_B[[1]], "c": np_C[[1]].tolist()}) ) subtable = query_table(table, [-1]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[-1]], "b": np_B[[-1]], "c": np_C[[-1]].tolist()}) ) subtable = query_table(table, [0, -1, 1]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}), ) # numpy iterable subtable = query_table(table, np.array([0, -1, 1])) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}), ) # empty ouput but no errors subtable = query_table(table, []) assert len(np_A[[]]) == 0 self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) # raise an IndexError with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[[n]] query_table(table, [n]) with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[[-(n + 1)]] query_table(table, [-(n + 1)]) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, [0], indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, [len(indices)], indices=indices) def test_query_table_indexable_type(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows # classical usage subtable = query_table(table, np.int64(0)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) subtable = query_table(table, np.int64(1)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) subtable = query_table(table, np.int64(-1)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) # raise an IndexError with self.assertRaises(IndexError): query_table(table, np.int64(n)) with self.assertRaises(IndexError): query_table(table, np.int64(-(n + 1))) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, np.int64(0), indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, np.int64(len(indices)), indices=indices) def test_query_table_invalid_key_type(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) with self.assertRaises(TypeError): query_table(table, 0.0) with self.assertRaises(TypeError): query_table(table, [0, "a"]) with self.assertRaises(TypeError): query_table(table, int) with self.assertRaises(TypeError): def iter_to_inf(start=0): while True: yield start start += 1 query_table(table, iter_to_inf()) @pytest.fixture(scope="session") def arrow_table(): return pa.Table.from_pydict({"col_int": [0, 1, 2], "col_float": [0.0, 1.0, 2.0]}) @require_tf @pytest.mark.parametrize( "cast_schema", [ None, [("col_int", pa.int64()), ("col_float", pa.float64())], [("col_int", pa.int32()), ("col_float", pa.float64())], [("col_int", pa.int64()), ("col_float", pa.float32())], ], ) def test_tf_formatter_sets_default_dtypes(cast_schema, arrow_table): import tensorflow as tf from datasets.formatting import TFFormatter if cast_schema: arrow_table = arrow_table.cast(pa.schema(cast_schema)) arrow_table_dict = arrow_table.to_pydict() list_int = arrow_table_dict["col_int"] list_float = arrow_table_dict["col_float"] formatter = TFFormatter() row = formatter.format_row(arrow_table) tf.debugging.assert_equal(row["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)[0]) tf.debugging.assert_equal(row["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)[0]) col = formatter.format_column(arrow_table) tf.debugging.assert_equal(col, tf.ragged.constant(list_int, dtype=tf.int64)) batch = formatter.format_batch(arrow_table) tf.debugging.assert_equal(batch["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)) tf.debugging.assert_equal(batch["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)) @require_numpy1_on_windows @require_torch @pytest.mark.parametrize( "cast_schema", [ None, [("col_int", pa.int64()), ("col_float", pa.float64())], [("col_int", pa.int32()), ("col_float", pa.float64())], [("col_int", pa.int64()), ("col_float", pa.float32())], ], ) def test_torch_formatter_sets_default_dtypes(cast_schema, arrow_table): import torch from datasets.formatting import TorchFormatter if cast_schema: arrow_table = arrow_table.cast(pa.schema(cast_schema)) arrow_table_dict = arrow_table.to_pydict() list_int = arrow_table_dict["col_int"] list_float = arrow_table_dict["col_float"] formatter = TorchFormatter() row = formatter.format_row(arrow_table) torch.testing.assert_close(row["col_int"], torch.tensor(list_int, dtype=torch.int64)[0]) torch.testing.assert_close(row["col_float"], torch.tensor(list_float, dtype=torch.float32)[0]) col = formatter.format_column(arrow_table) torch.testing.assert_close(col, torch.tensor(list_int, dtype=torch.int64)) batch = formatter.format_batch(arrow_table) torch.testing.assert_close(batch["col_int"], torch.tensor(list_int, dtype=torch.int64)) torch.testing.assert_close(batch["col_float"], torch.tensor(list_float, dtype=torch.float32)) def test_iterable_dataset_of_arrays_format_to_arrow(any_arrays_dataset: IterableDataset): formatted = any_arrays_dataset.with_format("arrow") assert all(isinstance(example, pa.Table) for example in formatted) def test_iterable_dataset_of_arrays_format_to_numpy(any_arrays_dataset: IterableDataset): formatted = any_arrays_dataset.with_format("np") assert all(isinstance(example["array"], np.ndarray) for example in formatted) @require_torch def test_iterable_dataset_of_arrays_format_to_torch(any_arrays_dataset: IterableDataset): import torch formatted = any_arrays_dataset.with_format("torch") assert all(isinstance(example["array"], torch.Tensor) for example in formatted) @require_tf def test_iterable_dataset_of_arrays_format_to_tf(any_arrays_dataset: IterableDataset): import tensorflow as tf formatted = any_arrays_dataset.with_format("tf") assert all(isinstance(example["array"], tf.Tensor) for example in formatted) @require_jax def test_iterable_dataset_of_arrays_format_to_jax(any_arrays_dataset: IterableDataset): import jax.numpy as jnp formatted = any_arrays_dataset.with_format("jax") assert all(isinstance(example["array"], jnp.ndarray) for example in formatted)
datasets/tests/test_formatting.py/0
{ "file_path": "datasets/tests/test_formatting.py", "repo_id": "datasets", "token_count": 21615 }
115
import copy import pickle from decimal import Decimal from functools import partial from typing import Union from unittest.mock import MagicMock import numpy as np import pyarrow as pa import pytest from datasets.features import Array2D, ClassLabel, Features, Image, LargeList, List, Value from datasets.features.features import Array2DExtensionType, get_nested_type from datasets.table import ( ConcatenationTable, InMemoryTable, MemoryMappedTable, Table, TableBlock, _in_memory_arrow_table_from_buffer, _in_memory_arrow_table_from_file, _interpolation_search, _memory_mapped_arrow_table_from_file, array_cast, cast_array_to_feature, cast_table_to_schema, concat_tables, embed_array_storage, embed_table_storage, inject_arrow_table_documentation, table_cast, table_iter, ) from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, slow @pytest.fixture(scope="session") def in_memory_pa_table(arrow_file) -> pa.Table: return pa.ipc.open_stream(arrow_file).read_all() def _to_testing_blocks(table: TableBlock) -> list[list[TableBlock]]: assert len(table) > 2 blocks = [ [table.slice(0, 2)], [table.slice(2).drop([c for c in table.column_names if c != "tokens"]), table.slice(2).drop(["tokens"])], ] return blocks @pytest.fixture(scope="session") def in_memory_blocks(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) return _to_testing_blocks(table) @pytest.fixture(scope="session") def memory_mapped_blocks(arrow_file): table = MemoryMappedTable.from_file(arrow_file) return _to_testing_blocks(table) @pytest.fixture(scope="session") def mixed_in_memory_and_memory_mapped_blocks(in_memory_blocks, memory_mapped_blocks): return in_memory_blocks[:1] + memory_mapped_blocks[1:] def assert_deepcopy_without_bringing_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_doesnt_increase(): copied_table = copy.deepcopy(table) assert isinstance(copied_table, MemoryMappedTable) assert copied_table.table == table.table def assert_deepcopy_does_bring_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_increases(): copied_table = copy.deepcopy(table) assert isinstance(copied_table, MemoryMappedTable) assert copied_table.table == table.table def assert_pickle_without_bringing_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_doesnt_increase(): pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert isinstance(unpickled_table, MemoryMappedTable) assert unpickled_table.table == table.table def assert_pickle_does_bring_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_increases(): pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert isinstance(unpickled_table, MemoryMappedTable) assert unpickled_table.table == table.table def assert_index_attributes_equal(table: Table, other: Table): assert table._batches == other._batches np.testing.assert_array_equal(table._offsets, other._offsets) assert table._schema == other._schema def add_suffix_to_column_names(table, suffix): return table.rename_columns([f"{name}{suffix}" for name in table.column_names]) def test_inject_arrow_table_documentation(in_memory_pa_table): method = pa.Table.slice def function_to_wrap(*args): return method(*args) args = (0, 1) wrapped_method = inject_arrow_table_documentation(method)(function_to_wrap) assert method(in_memory_pa_table, *args) == wrapped_method(in_memory_pa_table, *args) assert "pyarrow.Table" not in wrapped_method.__doc__ assert "Table" in wrapped_method.__doc__ def test_in_memory_arrow_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_increases(): pa_table = _in_memory_arrow_table_from_file(arrow_file) assert in_memory_pa_table == pa_table def test_in_memory_arrow_table_from_buffer(in_memory_pa_table): with assert_arrow_memory_increases(): buf_writer = pa.BufferOutputStream() writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) writer.write_table(in_memory_pa_table) writer.close() buf_writer.close() pa_table = _in_memory_arrow_table_from_buffer(buf_writer.getvalue()) assert in_memory_pa_table == pa_table def test_memory_mapped_arrow_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_doesnt_increase(): pa_table = _memory_mapped_arrow_table_from_file(arrow_file) assert in_memory_pa_table == pa_table def test_table_init(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.table == in_memory_pa_table def test_table_validate(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.validate() == in_memory_pa_table.validate() def test_table_equals(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.equals(in_memory_pa_table) def test_table_to_batches(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_batches() == in_memory_pa_table.to_batches() def test_table_to_pydict(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_pydict() == in_memory_pa_table.to_pydict() def test_table_to_string(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_string() == in_memory_pa_table.to_string() def test_table_field(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names table = Table(in_memory_pa_table) assert table.field("tokens") == in_memory_pa_table.field("tokens") def test_table_column(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names table = Table(in_memory_pa_table) assert table.column("tokens") == in_memory_pa_table.column("tokens") def test_table_itercolumns(in_memory_pa_table): table = Table(in_memory_pa_table) assert isinstance(table.itercolumns(), type(in_memory_pa_table.itercolumns())) assert list(table.itercolumns()) == list(in_memory_pa_table.itercolumns()) def test_table_getitem(in_memory_pa_table): table = Table(in_memory_pa_table) assert table[0] == in_memory_pa_table[0] def test_table_len(in_memory_pa_table): table = Table(in_memory_pa_table) assert len(table) == len(in_memory_pa_table) def test_table_str(in_memory_pa_table): table = Table(in_memory_pa_table) assert str(table) == str(in_memory_pa_table).replace("pyarrow.Table", "Table") assert repr(table) == repr(in_memory_pa_table).replace("pyarrow.Table", "Table") @pytest.mark.parametrize( "attribute", ["schema", "columns", "num_columns", "num_rows", "shape", "nbytes", "column_names"] ) def test_table_attributes(in_memory_pa_table, attribute): table = Table(in_memory_pa_table) assert getattr(table, attribute) == getattr(in_memory_pa_table, attribute) def test_in_memory_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_increases(): table = InMemoryTable.from_file(arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_buffer(in_memory_pa_table): with assert_arrow_memory_increases(): buf_writer = pa.BufferOutputStream() writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) writer.write_table(in_memory_pa_table) writer.close() buf_writer.close() table = InMemoryTable.from_buffer(buf_writer.getvalue()) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_pandas(in_memory_pa_table): df = in_memory_pa_table.to_pandas() with assert_arrow_memory_increases(): # with no schema it might infer another order of the fields in the schema table = InMemoryTable.from_pandas(df) assert isinstance(table, InMemoryTable) # by specifying schema we get the same order of features, and so the exact same table table = InMemoryTable.from_pandas(df, schema=in_memory_pa_table.schema) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_arrays(in_memory_pa_table): arrays = list(in_memory_pa_table.columns) names = list(in_memory_pa_table.column_names) table = InMemoryTable.from_arrays(arrays, names=names) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_pydict(in_memory_pa_table): pydict = in_memory_pa_table.to_pydict() with assert_arrow_memory_increases(): table = InMemoryTable.from_pydict(pydict) assert isinstance(table, InMemoryTable) assert table.table == pa.Table.from_pydict(pydict) def test_in_memory_table_from_pylist(in_memory_pa_table): pylist = InMemoryTable(in_memory_pa_table).to_pylist() table = InMemoryTable.from_pylist(pylist) assert isinstance(table, InMemoryTable) assert pylist == table.to_pylist() def test_in_memory_table_from_batches(in_memory_pa_table): batches = list(in_memory_pa_table.to_batches()) table = InMemoryTable.from_batches(batches) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_deepcopy(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) def test_in_memory_table_pickle(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert_index_attributes_equal(table, unpickled_table) @slow def test_in_memory_table_pickle_big_table(): big_table_4GB = InMemoryTable.from_pydict({"col": [0] * ((4 * 8 << 30) // 64)}) length = len(big_table_4GB) big_table_4GB = pickle.dumps(big_table_4GB) big_table_4GB = pickle.loads(big_table_4GB) assert len(big_table_4GB) == length def test_in_memory_table_slice(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, InMemoryTable) def test_in_memory_table_filter(in_memory_pa_table): mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = InMemoryTable(in_memory_pa_table).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, InMemoryTable) def test_in_memory_table_flatten(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, InMemoryTable) def test_in_memory_table_combine_chunks(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, InMemoryTable) def test_in_memory_table_cast(in_memory_pa_table): assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = InMemoryTable(in_memory_pa_table).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, InMemoryTable) def test_in_memory_table_cast_reorder_struct(): table = InMemoryTable( pa.Table.from_pydict( { "top": [ { "foo": "a", "bar": "b", } ] } ) ) schema = pa.schema({"top": pa.struct({"bar": pa.string(), "foo": pa.string()})}) assert table.cast(schema).schema == schema def test_in_memory_table_cast_with_hf_features(): table = InMemoryTable(pa.Table.from_pydict({"labels": [0, 1]})) features = Features({"labels": ClassLabel(names=["neg", "pos"])}) schema = features.arrow_schema assert table.cast(schema).schema == schema assert Features.from_arrow_schema(table.cast(schema).schema) == features def test_in_memory_table_replace_schema_metadata(in_memory_pa_table): metadata = {"huggingface": "{}"} table = InMemoryTable(in_memory_pa_table).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, InMemoryTable) def test_in_memory_table_add_column(in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).add_column(i, field_, column) assert table.table == in_memory_pa_table.add_column(i, field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_append_column(in_memory_pa_table): field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).append_column(field_, column) assert table.table == in_memory_pa_table.append_column(field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_remove_column(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, InMemoryTable) def test_in_memory_table_set_column(in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).set_column(i, field_, column) assert table.table == in_memory_pa_table.set_column(i, field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_rename_columns(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = InMemoryTable(in_memory_pa_table).rename_columns(names) assert table.table == in_memory_pa_table.rename_columns(names) assert isinstance(table, InMemoryTable) def test_in_memory_table_drop(in_memory_pa_table): names = [in_memory_pa_table.column_names[0]] table = InMemoryTable(in_memory_pa_table).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, InMemoryTable) def test_memory_mapped_table_init(arrow_file, in_memory_pa_table): table = MemoryMappedTable(_memory_mapped_arrow_table_from_file(arrow_file), arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, MemoryMappedTable) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, MemoryMappedTable) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_from_file_with_replay(arrow_file, in_memory_pa_table): replays = [("slice", (0, 1), {}), ("flatten", (), {})] with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file, replays=replays) assert len(table) == 1 for method, args, kwargs in replays: in_memory_pa_table = getattr(in_memory_pa_table, method)(*args, **kwargs) assert table.table == in_memory_pa_table assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_deepcopy(arrow_file): table = MemoryMappedTable.from_file(arrow_file) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert table.path == copied_table.path assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) def test_memory_mapped_table_pickle(arrow_file): table = MemoryMappedTable.from_file(arrow_file) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert unpickled_table.path == table.path assert_index_attributes_equal(table, unpickled_table) def test_memory_mapped_table_pickle_doesnt_fill_memory(arrow_file): with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_pickle_applies_replay(arrow_file): replays = [("slice", (0, 1), {}), ("flatten", (), {})] with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file, replays=replays) assert isinstance(table, MemoryMappedTable) assert table.replays == replays assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_slice(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, MemoryMappedTable) assert table.replays == [("slice", (1, 2), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_filter(arrow_file, in_memory_pa_table): mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = MemoryMappedTable.from_file(arrow_file).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, MemoryMappedTable) assert table.replays == [("filter", (mask,), {})] assert_deepcopy_without_bringing_data_in_memory(table) # filter DOES increase memory # assert_pickle_without_bringing_data_in_memory(table) assert_pickle_does_bring_data_in_memory(table) def test_memory_mapped_table_flatten(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, MemoryMappedTable) assert table.replays == [("flatten", (), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_combine_chunks(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, MemoryMappedTable) assert table.replays == [("combine_chunks", (), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_cast(arrow_file, in_memory_pa_table): assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = MemoryMappedTable.from_file(arrow_file).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, MemoryMappedTable) assert table.replays == [("cast", (schema,), {})] assert_deepcopy_without_bringing_data_in_memory(table) # cast DOES increase memory when converting integers precision for example # assert_pickle_without_bringing_data_in_memory(table) assert_pickle_does_bring_data_in_memory(table) def test_memory_mapped_table_replace_schema_metadata(arrow_file, in_memory_pa_table): metadata = {"huggingface": "{}"} table = MemoryMappedTable.from_file(arrow_file).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, MemoryMappedTable) assert table.replays == [("replace_schema_metadata", (metadata,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_add_column(arrow_file, in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).add_column(i, field_, column) assert table.table == in_memory_pa_table.add_column(i, field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("add_column", (i, field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_append_column(arrow_file, in_memory_pa_table): field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).append_column(field_, column) assert table.table == in_memory_pa_table.append_column(field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("append_column", (field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_remove_column(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, MemoryMappedTable) assert table.replays == [("remove_column", (0,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_set_column(arrow_file, in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).set_column(i, field_, column) assert table.table == in_memory_pa_table.set_column(i, field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("set_column", (i, field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_rename_columns(arrow_file, in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = MemoryMappedTable.from_file(arrow_file).rename_columns(names) assert table.table == in_memory_pa_table.rename_columns(names) assert isinstance(table, MemoryMappedTable) assert table.replays == [("rename_columns", (names,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_drop(arrow_file, in_memory_pa_table): names = [in_memory_pa_table.column_names[0]] table = MemoryMappedTable.from_file(arrow_file).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, MemoryMappedTable) assert table.replays == [("drop", (names,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_init( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = ( in_memory_blocks if blocks_type == "in_memory" else memory_mapped_blocks if blocks_type == "memory_mapped" else mixed_in_memory_and_memory_mapped_blocks ) table = ConcatenationTable(in_memory_pa_table, blocks) assert table.table == in_memory_pa_table assert table.blocks == blocks def test_concatenation_table_from_blocks(in_memory_pa_table, in_memory_blocks): assert len(in_memory_pa_table) > 2 in_memory_table = InMemoryTable(in_memory_pa_table) t1, t2 = in_memory_table.slice(0, 2), in_memory_table.slice(2) table = ConcatenationTable.from_blocks(in_memory_table) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks([t1, t2]) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks([[t1], [t2]]) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks(in_memory_blocks) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_from_blocks_doesnt_increase_memory( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] with assert_arrow_memory_doesnt_increase(): table = ConcatenationTable.from_blocks(blocks) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table if blocks_type == "in_memory": assert table.blocks == [[InMemoryTable(in_memory_pa_table)]] else: assert table.blocks == blocks @pytest.mark.parametrize("axis", [0, 1]) def test_concatenation_table_from_tables(axis, in_memory_pa_table, arrow_file): in_memory_table = InMemoryTable(in_memory_pa_table) concatenation_table = ConcatenationTable.from_blocks(in_memory_table) memory_mapped_table = MemoryMappedTable.from_file(arrow_file) tables = [in_memory_pa_table, in_memory_table, concatenation_table, memory_mapped_table] if axis == 0: expected_table = pa.concat_tables([in_memory_pa_table] * len(tables)) else: # avoids error due to duplicate column names tables[1:] = [add_suffix_to_column_names(table, i) for i, table in enumerate(tables[1:], 1)] expected_table = in_memory_pa_table for table in tables[1:]: for name, col in zip(table.column_names, table.columns): expected_table = expected_table.append_column(name, col) with assert_arrow_memory_doesnt_increase(): table = ConcatenationTable.from_tables(tables, axis=axis) assert isinstance(table, ConcatenationTable) assert table.table == expected_table # because of consolidation, we end up with 1 InMemoryTable and 1 MemoryMappedTable assert len(table.blocks) == 1 if axis == 1 else 2 assert len(table.blocks[0]) == 1 if axis == 0 else 2 assert axis == 1 or len(table.blocks[1]) == 1 assert isinstance(table.blocks[0][0], InMemoryTable) assert isinstance(table.blocks[1][0] if axis == 0 else table.blocks[0][1], MemoryMappedTable) def test_concatenation_table_from_tables_axis1_misaligned_blocks(arrow_file): table = MemoryMappedTable.from_file(arrow_file) t1 = table.slice(0, 2) t2 = table.slice(0, 3).rename_columns([col + "_1" for col in table.column_names]) concatenated = ConcatenationTable.from_tables( [ ConcatenationTable.from_blocks([[t1], [t1], [t1]]), ConcatenationTable.from_blocks([[t2], [t2]]), ], axis=1, ) assert len(concatenated) == 6 assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] concatenated = ConcatenationTable.from_tables( [ ConcatenationTable.from_blocks([[t2], [t2]]), ConcatenationTable.from_blocks([[t1], [t1], [t1]]), ], axis=1, ) assert len(concatenated) == 6 assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_deepcopy( blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert table.blocks == copied_table.blocks assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_pickle( blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert unpickled_table.blocks == table.blocks assert_index_attributes_equal(table, unpickled_table) def test_concat_tables_with_features_metadata(arrow_file, in_memory_pa_table): input_features = Features.from_arrow_schema(in_memory_pa_table.schema) input_features["id"] = Value("int64", id="my_id") intput_schema = input_features.arrow_schema t0 = in_memory_pa_table.replace_schema_metadata(intput_schema.metadata) t1 = MemoryMappedTable.from_file(arrow_file) tables = [t0, t1] concatenated_table = concat_tables(tables, axis=0) output_schema = concatenated_table.schema output_features = Features.from_arrow_schema(output_schema) assert output_schema == intput_schema assert output_schema.metadata == intput_schema.metadata assert output_features == input_features assert output_features["id"].id == "my_id" @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_slice( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, ConcatenationTable) def test_concatenation_table_slice_mixed_schemas_vertically(arrow_file): t1 = MemoryMappedTable.from_file(arrow_file) t2 = InMemoryTable.from_pydict({"additional_column": ["foo"]}) expected = pa.table( { **{column: values + [None] for column, values in t1.to_pydict().items()}, "additional_column": [None] * len(t1) + ["foo"], } ) blocks = [[t1], [t2]] table = ConcatenationTable.from_blocks(blocks) assert table.to_pydict() == expected.to_pydict() assert isinstance(table, ConcatenationTable) reloaded = pickle.loads(pickle.dumps(table)) assert reloaded.to_pydict() == expected.to_pydict() assert isinstance(reloaded, ConcatenationTable) reloaded = pickle.loads(pickle.dumps(table.slice(1, 2))) assert reloaded.to_pydict() == expected.slice(1, 2).to_pydict() assert isinstance(reloaded, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_filter( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = ConcatenationTable.from_blocks(blocks).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_flatten( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_combine_chunks( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_cast( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types assert pa.int64() in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = ConcatenationTable.from_blocks(blocks).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, ConcatenationTable) schema = pa.schema( { k: v if v != pa.int64() else pa.int32() for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = ConcatenationTable.from_blocks(blocks).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concat_tables_cast_with_features_metadata( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] input_features = Features.from_arrow_schema(in_memory_pa_table.schema) input_features["id"] = Value("int64", id="my_id") intput_schema = input_features.arrow_schema concatenated_table = ConcatenationTable.from_blocks(blocks).cast(intput_schema) output_schema = concatenated_table.schema output_features = Features.from_arrow_schema(output_schema) assert output_schema == intput_schema assert output_schema.metadata == intput_schema.metadata assert output_features == input_features assert output_features["id"].id == "my_id" @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_replace_schema_metadata( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] metadata = {"huggingface": "{}"} table = ConcatenationTable.from_blocks(blocks).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_add_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).add_column(i, field_, column) # assert table.table == in_memory_pa_table.add_column(i, field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.add_column(i, field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_append_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).append_column(field_, column) # assert table.table == in_memory_pa_table.append_column(field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.append_column(field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_remove_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_set_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).set_column(i, field_, column) # assert table.table == in_memory_pa_table.set_column(i, field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.set_column(i, field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_rename_columns( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = ConcatenationTable.from_blocks(blocks).rename_columns(names) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table.rename_columns(names) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_drop( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] names = [in_memory_pa_table.column_names[0]] table = ConcatenationTable.from_blocks(blocks).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, ConcatenationTable) def test_concat_tables(arrow_file, in_memory_pa_table): t0 = in_memory_pa_table t1 = InMemoryTable(t0) t2 = MemoryMappedTable.from_file(arrow_file) t3 = ConcatenationTable.from_blocks(t1) tables = [t0, t1, t2, t3] concatenated_table = concat_tables(tables, axis=0) assert concatenated_table.table == pa.concat_tables([t0] * 4) assert concatenated_table.table.shape == (40, 4) assert isinstance(concatenated_table, ConcatenationTable) assert len(concatenated_table.blocks) == 3 # t0 and t1 are consolidated as a single InMemoryTable assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) assert isinstance(concatenated_table.blocks[1][0], MemoryMappedTable) assert isinstance(concatenated_table.blocks[2][0], InMemoryTable) # add suffix to avoid error due to duplicate column names concatenated_table = concat_tables( [add_suffix_to_column_names(table, i) for i, table in enumerate(tables)], axis=1 ) assert concatenated_table.table.shape == (10, 16) assert len(concatenated_table.blocks[0]) == 3 # t0 and t1 are consolidated as a single InMemoryTable assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) assert isinstance(concatenated_table.blocks[0][1], MemoryMappedTable) assert isinstance(concatenated_table.blocks[0][2], InMemoryTable) def _interpolation_search_ground_truth(arr: list[int], x: int) -> Union[int, IndexError]: for i in range(len(arr) - 1): if arr[i] <= x < arr[i + 1]: return i return IndexError class _ListWithGetitemCounter(list): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.unique_getitem_calls = set() def __getitem__(self, i): out = super().__getitem__(i) self.unique_getitem_calls.add(i) return out @property def getitem_unique_count(self): return len(self.unique_getitem_calls) @pytest.mark.parametrize( "arr, x", [(np.arange(0, 14, 3), x) for x in range(-1, 22)] + [(list(np.arange(-5, 5)), x) for x in range(-6, 6)] + [([0, 1_000, 1_001, 1_003], x) for x in [-1, 0, 2, 100, 999, 1_000, 1_001, 1_002, 1_003, 1_004]] + [(list(range(1_000)), x) for x in [-1, 0, 1, 10, 666, 999, 1_000, 1_0001]], ) def test_interpolation_search(arr, x): ground_truth = _interpolation_search_ground_truth(arr, x) if isinstance(ground_truth, int): arr = _ListWithGetitemCounter(arr) output = _interpolation_search(arr, x) assert ground_truth == output # 4 maximum unique getitem calls is expected for the cases of this test # but it can be bigger for large and messy arrays. assert arr.getitem_unique_count <= 4 else: with pytest.raises(ground_truth): _interpolation_search(arr, x) def test_indexed_table_mixin(): n_rows_per_chunk = 10 n_chunks = 4 pa_table = pa.Table.from_pydict({"col": [0] * n_rows_per_chunk}) pa_table = pa.concat_tables([pa_table] * n_chunks) table = Table(pa_table) assert all(table._offsets.tolist() == np.cumsum([0] + [n_rows_per_chunk] * n_chunks)) assert table.fast_slice(5) == pa_table.slice(5) assert table.fast_slice(2, 13) == pa_table.slice(2, 13) def test_cast_integer_array_to_features(): arr = pa.array([[0, 1]]) assert cast_array_to_feature(arr, List(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, List(Value("string")), allow_decimal_to_str=False).type == pa.list_(pa.string()) with pytest.raises(TypeError): cast_array_to_feature(arr, List(Value("string")), allow_primitive_to_str=False) def test_cast_float_array_to_features(): arr = pa.array([[0.0, 1.0]]) assert cast_array_to_feature(arr, List(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, List(Value("string")), allow_decimal_to_str=False).type == pa.list_(pa.string()) with pytest.raises(TypeError): cast_array_to_feature(arr, List(Value("string")), allow_primitive_to_str=False) def test_cast_boolean_array_to_features(): arr = pa.array([[False, True]]) assert cast_array_to_feature(arr, List(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, List(Value("string")), allow_decimal_to_str=False).type == pa.list_(pa.string()) with pytest.raises(TypeError): cast_array_to_feature(arr, List(Value("string")), allow_primitive_to_str=False) def test_cast_decimal_array_to_features(): arr = pa.array([[Decimal(0), Decimal(1)]]) assert cast_array_to_feature(arr, List(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, List(Value("string")), allow_primitive_to_str=False).type == pa.list_( pa.string() ) with pytest.raises(TypeError): cast_array_to_feature(arr, List(Value("string")), allow_decimal_to_str=False) @pytest.mark.parametrize( "array_list, expected_list", [ ([{"age": 25}, {"age": 63}], [{"age": 25, "name": None}, {"age": 63, "name": None}]), ([{}, {}], [{"age": None, "name": None}, {"age": None, "name": None}]), # completely empty struct ], ) def test_cast_array_to_feature_with_struct_with_missing_fields(array_list, expected_list): arr = pa.array(array_list) feature = {"age": Value("int32"), "name": Value("string")} cast_array = cast_array_to_feature(arr, feature) assert cast_array.type == pa.struct({"age": pa.int32(), "name": pa.string()}) assert cast_array.to_pylist() == expected_list def test_cast_array_to_features_nested(): arr = pa.array([[{"foo": [0]}]]) assert cast_array_to_feature(arr, List({"foo": List(Value("string"))})).type == pa.list_( pa.struct({"foo": pa.list_(pa.string())}) ) def test_cast_array_to_features_to_nested_with_no_fields(): arr = pa.array([{}]) assert cast_array_to_feature(arr, {}).type == pa.struct({}) assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist() def test_cast_array_to_features_nested_with_nulls(): # same type arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) casted_array = cast_array_to_feature(arr, {"foo": List(List(Value("int64")))}) assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}) assert casted_array.to_pylist() == arr.to_pylist() # different type arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) casted_array = cast_array_to_feature(arr, {"foo": List(List(Value("int32")))}) assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))}) assert casted_array.to_pylist() == [{"foo": [None, [0]]}] def test_cast_array_to_features_to_null_type(): # same type arr = pa.array([[None, None]]) assert cast_array_to_feature(arr, List(Value("null"))).type == pa.list_(pa.null()) # different type arr = pa.array([[None, 1]]) with pytest.raises(TypeError): cast_array_to_feature(arr, List(Value("null"))) def test_cast_array_to_features_array_xd(): # same storage type arr = pa.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], pa.list_(pa.list_(pa.int32(), 2), 2)) casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="int32")) assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="int32") # different storage type casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="float32")) assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="float32") def test_cast_array_to_features_sequence_classlabel(): arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64())) assert cast_array_to_feature(arr, List(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) arr = pa.array([[], ["bar"], ["foo", "bar"]], pa.list_(pa.string())) assert cast_array_to_feature(arr, List(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) # Test empty arrays arr = pa.array([[], []], pa.list_(pa.int64())) assert cast_array_to_feature(arr, List(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) arr = pa.array([[], []], pa.list_(pa.string())) assert cast_array_to_feature(arr, List(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) # Test invalid class labels arr = pa.array([[2]], pa.list_(pa.int64())) with pytest.raises(ValueError): assert cast_array_to_feature(arr, List(ClassLabel(names=["foo", "bar"]))) arr = pa.array([["baz"]], pa.list_(pa.string())) with pytest.raises(ValueError): assert cast_array_to_feature(arr, List(ClassLabel(names=["foo", "bar"]))) @pytest.mark.parametrize( "arr", [ pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32(), 3)), ], ) @pytest.mark.parametrize("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)]) @pytest.mark.parametrize("target_value_feature", [Value("int64")]) def test_cast_fixed_size_list_array_to_features_sequence(arr, slice, target_value_feature): arr = arr if slice is None else arr[slice] # Fixed size list casted_array = cast_array_to_feature(arr, List(target_value_feature, length=arr.type.list_size)) assert casted_array.type == get_nested_type(List(target_value_feature, length=arr.type.list_size)) assert casted_array.to_pylist() == arr.to_pylist() with pytest.raises(TypeError): cast_array_to_feature(arr, List(target_value_feature, length=arr.type.list_size + 1)) # Variable size list casted_array = cast_array_to_feature(arr, List(target_value_feature)) assert casted_array.type == get_nested_type(List(target_value_feature)) assert casted_array.to_pylist() == arr.to_pylist() casted_array = cast_array_to_feature(arr, List(target_value_feature)) assert casted_array.type == get_nested_type(List(target_value_feature)) assert casted_array.to_pylist() == arr.to_pylist() @pytest.mark.parametrize( "arr", [ pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32())), ], ) @pytest.mark.parametrize("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)]) @pytest.mark.parametrize("target_value_feature", [Value("int64")]) def test_cast_list_array_to_features_sequence(arr, slice, target_value_feature): arr = arr if slice is None else arr[slice] # Variable size list casted_array = cast_array_to_feature(arr, List(target_value_feature)) assert casted_array.type == get_nested_type(List(target_value_feature)) assert casted_array.to_pylist() == arr.to_pylist() casted_array = cast_array_to_feature(arr, List(target_value_feature)) assert casted_array.type == get_nested_type(List(target_value_feature)) assert casted_array.to_pylist() == arr.to_pylist() # Fixed size list list_size = arr.value_lengths().drop_null()[0].as_py() if arr.value_lengths().drop_null() else 2 casted_array = cast_array_to_feature(arr, List(target_value_feature, length=list_size)) assert casted_array.type == get_nested_type(List(target_value_feature, length=list_size)) assert casted_array.to_pylist() == arr.to_pylist() @pytest.mark.parametrize("sequence_feature_dtype", ["string", "int64"]) @pytest.mark.parametrize("from_list_type", ["list", "fixed_size_list", "large_list"]) @pytest.mark.parametrize("list_within_struct", [False, True]) def test_cast_array_to_feature_with_list_array_and_sequence_feature( list_within_struct, from_list_type, sequence_feature_dtype ): list_feature = { "list": List, "fixed_size_list": partial(List, length=2), "large_list": LargeList, } list_type = { "list": pa.list_, "fixed_size_list": partial(pa.list_, list_size=2), "large_list": pa.large_list, } primitive_type = { "string": pa.string(), "int64": pa.int64(), } to_type = "list" array_data = [0, 1] array_type = list_type[from_list_type](pa.int64()) sequence_feature = list_feature[from_list_type](Value(sequence_feature_dtype)) expected_array_type = list_type[from_list_type](primitive_type[sequence_feature_dtype]) if list_within_struct: array_data = {"col_1": array_data} array_type = pa.struct({"col_1": array_type}) sequence_feature = {"col_1": sequence_feature} expected_array_type = pa.struct({"col_1": expected_array_type}) array_data = [array_data] * 2 array_type = list_type[from_list_type](array_type) feature = list_feature[to_type](sequence_feature) expected_array_type = list_type[to_type](expected_array_type) array = pa.array([array_data], type=array_type) cast_array = cast_array_to_feature(array, feature) assert cast_array.type == expected_array_type @pytest.mark.parametrize("large_list_feature_value_type", ["string", "int64"]) @pytest.mark.parametrize("from_list_type", ["list", "fixed_size_list", "large_list"]) def test_cast_array_to_feature_with_list_array_and_large_list_feature(from_list_type, large_list_feature_value_type): list_type = { "list": pa.list_, "fixed_size_list": partial(pa.list_, list_size=2), "large_list": pa.large_list, } primitive_type = { "string": pa.string(), "int64": pa.int64(), } to_type = "large_list" array_data = [0, 1] array_type = list_type[from_list_type](pa.int64()) large_list_feature_value = Value(large_list_feature_value_type) expected_array_type = list_type[to_type](primitive_type[large_list_feature_value_type]) feature = LargeList(large_list_feature_value) array = pa.array([array_data], type=array_type) cast_array = cast_array_to_feature(array, feature) assert cast_array.type == expected_array_type def test_cast_array_xd_to_features_sequence(): arr = np.random.randint(0, 10, size=(8, 2, 3)).tolist() arr = Array2DExtensionType(shape=(2, 3), dtype="int64").wrap_array(pa.array(arr, pa.list_(pa.list_(pa.int64())))) arr = pa.ListArray.from_arrays([0, None, 4, 8], arr) # Variable size list casted_array = cast_array_to_feature(arr, List(Array2D(shape=(2, 3), dtype="int32"))) assert casted_array.type == get_nested_type(List(Array2D(shape=(2, 3), dtype="int32"))) assert casted_array.to_pylist() == arr.to_pylist() # Fixed size list casted_array = cast_array_to_feature(arr, List(Array2D(shape=(2, 3), dtype="int32"), length=4)) assert casted_array.type == get_nested_type(List(Array2D(shape=(2, 3), dtype="int32"), length=4)) assert casted_array.to_pylist() == arr.to_pylist() def test_embed_array_storage(image_file): array = pa.array([{"bytes": None, "path": image_file}], type=Image.pa_type) embedded_images_array = embed_array_storage(array, Image()) assert isinstance(embedded_images_array.to_pylist()[0]["path"], str) assert embedded_images_array.to_pylist()[0]["path"] == "test_image_rgb.jpg" assert isinstance(embedded_images_array.to_pylist()[0]["bytes"], bytes) def test_embed_array_storage_nested(image_file): array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type)) embedded_images_array = embed_array_storage(array, List(Image())) assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes) array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type})) embedded_images_array = embed_array_storage(array, {"foo": Image()}) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes) @pytest.mark.parametrize( "array, feature, expected_embedded_array_type", [ ( pa.array([[{"path": "image_path"}]], type=pa.list_(Image.pa_type)), List(Image()), pa.types.is_list, ), ( pa.array([[{"path": "image_path"}]], type=pa.large_list(Image.pa_type)), LargeList(Image()), pa.types.is_large_list, ), ], ) def test_embed_array_storage_with_list_types(array, feature, expected_embedded_array_type, monkeypatch): mock_embed_storage = MagicMock( return_value=pa.StructArray.from_arrays( [pa.array([b"image_bytes"], type=pa.binary()), pa.array(["image_path"], type=pa.string())], ["bytes", "path"], ) ) monkeypatch.setattr(Image, "embed_storage", mock_embed_storage) embedded_images_array = embed_array_storage(array, feature) assert expected_embedded_array_type(embedded_images_array.type) assert embedded_images_array.to_pylist() == [[{"bytes": b"image_bytes", "path": "image_path"}]] def test_embed_table_storage(image_file): features = Features({"image": Image()}) table = table_cast(pa.table({"image": [image_file]}), features.arrow_schema) embedded_images_table = embed_table_storage(table) assert isinstance(embedded_images_table.to_pydict()["image"][0]["path"], str) assert isinstance(embedded_images_table.to_pydict()["image"][0]["bytes"], bytes) @pytest.mark.parametrize( "table", [ InMemoryTable(pa.table({"foo": range(10)})), InMemoryTable(pa.concat_tables([pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})])), InMemoryTable(pa.concat_tables([pa.table({"foo": [i]}) for i in range(10)])), ], ) @pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_table_iter(table, batch_size, drop_last_batch): num_rows = len(table) if not drop_last_batch else len(table) // batch_size * batch_size num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size subtables = list(table_iter(table, batch_size=batch_size, drop_last_batch=drop_last_batch)) assert len(subtables) == num_batches if drop_last_batch: assert all(len(subtable) == batch_size for subtable in subtables) else: assert all(len(subtable) == batch_size for subtable in subtables[:-1]) assert len(subtables[-1]) <= batch_size if num_rows > 0: reloaded = pa.concat_tables(subtables) assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() @pytest.mark.parametrize("to_type", ["list", "fixed_size_list", "large_list"]) @pytest.mark.parametrize("from_type", ["list", "fixed_size_list", "large_list"]) def test_array_cast(from_type, to_type): array_type = { "list": pa.list_(pa.int64()), "fixed_size_list": pa.list_(pa.int64(), 2), "large_list": pa.large_list(pa.int64()), } arr = pa.array([[0, 1]], type=array_type[from_type]) cast_arr = array_cast(arr, array_type[to_type]) assert cast_arr.type == array_type[to_type] assert cast_arr.values == arr.values def test_cast_table_to_schema_with_missing_fields(): table = pa.table({"age": [25, 63]}) schema = pa.schema({"age": pa.int32(), "name": pa.string()}) cast_table = cast_table_to_schema(table, schema) assert cast_table.schema == pa.schema({"age": pa.int32(), "name": pa.string()}) assert cast_table.to_pydict() == {"age": [25, 63], "name": [None, None]}
datasets/tests/test_table.py/0
{ "file_path": "datasets/tests/test_table.py", "repo_id": "datasets", "token_count": 24680 }
116
# Files for typos # Instruction: https://github.com/marketplace/actions/typos-action#getting-started [default.extend-identifiers] [default.extend-words] NIN="NIN" # NIN is used in scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py nd="np" # nd may be np (numpy) parms="parms" # parms is used in scripts/convert_original_stable_diffusion_to_diffusers.py [files] extend-exclude = ["_typos.toml"]
diffusers/_typos.toml/0
{ "file_path": "diffusers/_typos.toml", "repo_id": "diffusers", "token_count": 151 }
117
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # IP-Adapter [IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder. <Tip> Learn how to load an IP-Adapter checkpoint and image in the IP-Adapter [loading](../../using-diffusers/loading_adapters#ip-adapter) guide, and you can see how to use it in the [usage](../../using-diffusers/ip_adapter) guide. </Tip> ## IPAdapterMixin [[autodoc]] loaders.ip_adapter.IPAdapterMixin ## SD3IPAdapterMixin [[autodoc]] loaders.ip_adapter.SD3IPAdapterMixin - all - is_ip_adapter_active ## IPAdapterMaskProcessor [[autodoc]] image_processor.IPAdapterMaskProcessor
diffusers/docs/source/en/api/loaders/ip_adapter.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/ip_adapter.md", "repo_id": "diffusers", "token_count": 388 }
118
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Tiny AutoEncoder Tiny AutoEncoder for Stable Diffusion (TAESD) was introduced in [madebyollin/taesd](https://github.com/madebyollin/taesd) by Ollin Boer Bohan. It is a tiny distilled version of Stable Diffusion's VAE that can quickly decode the latents in a [`StableDiffusionPipeline`] or [`StableDiffusionXLPipeline`] almost instantly. To use with Stable Diffusion v-2.1: ```python import torch from diffusers import DiffusionPipeline, AutoencoderTiny pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", torch_dtype=torch.float16 ) pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "slice of delicious New York-style berry cheesecake" image = pipe(prompt, num_inference_steps=25).images[0] image ``` To use with Stable Diffusion XL 1.0 ```python import torch from diffusers import DiffusionPipeline, AutoencoderTiny pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "slice of delicious New York-style berry cheesecake" image = pipe(prompt, num_inference_steps=25).images[0] image ``` ## AutoencoderTiny [[autodoc]] AutoencoderTiny ## AutoencoderTinyOutput [[autodoc]] models.autoencoders.autoencoder_tiny.AutoencoderTinyOutput
diffusers/docs/source/en/api/models/autoencoder_tiny.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoder_tiny.md", "repo_id": "diffusers", "token_count": 670 }
119
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNetModel The ControlNet model was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, Maneesh Agrawala. It provides a greater degree of control over text-to-image generation by conditioning the model on additional inputs such as edge maps, depth maps, segmentation maps, and keypoints for pose detection. The abstract from the paper is: *We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.* ## Loading from the original format By default the [`ControlNetModel`] should be loaded with [`~ModelMixin.from_pretrained`], but it can also be loaded from the original format using [`FromOriginalModelMixin.from_single_file`] as follows: ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path controlnet = ControlNetModel.from_single_file(url) url = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet) ``` ## ControlNetModel [[autodoc]] ControlNetModel ## ControlNetOutput [[autodoc]] models.controlnets.controlnet.ControlNetOutput ## FlaxControlNetModel [[autodoc]] FlaxControlNetModel ## FlaxControlNetOutput [[autodoc]] models.controlnets.controlnet_flax.FlaxControlNetOutput
diffusers/docs/source/en/api/models/controlnet.md/0
{ "file_path": "diffusers/docs/source/en/api/models/controlnet.md", "repo_id": "diffusers", "token_count": 782 }
120
<!--Copyright 2025 The HuggingFace Team, The InstantX Team, and the XLabs Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet with Flux.1 <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </div> FluxControlNetPipeline is an implementation of ControlNet for Flux.1. ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process. The abstract from the paper is: *We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.* This controlnet code is implemented by [The InstantX Team](https://huggingface.co/InstantX). You can find pre-trained checkpoints for Flux-ControlNet in the table below: | ControlNet type | Developer | Link | | -------- | ---------- | ---- | | Canny | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Canny) | | Depth | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/Shakker-Labs/FLUX.1-dev-ControlNet-Depth) | | Union | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union) | XLabs ControlNets are also supported, which was contributed by the [XLabs team](https://huggingface.co/XLabs-AI). | ControlNet type | Developer | Link | | -------- | ---------- | ---- | | Canny | [The XLabs Team](https://huggingface.co/XLabs-AI) | [Link](https://huggingface.co/XLabs-AI/flux-controlnet-canny-diffusers) | | Depth | [The XLabs Team](https://huggingface.co/XLabs-AI) | [Link](https://huggingface.co/XLabs-AI/flux-controlnet-depth-diffusers) | | HED | [The XLabs Team](https://huggingface.co/XLabs-AI) | [Link](https://huggingface.co/XLabs-AI/flux-controlnet-hed-diffusers) | <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## FluxControlNetPipeline [[autodoc]] FluxControlNetPipeline - all - __call__ ## FluxPipelineOutput [[autodoc]] pipelines.flux.pipeline_output.FluxPipelineOutput
diffusers/docs/source/en/api/pipelines/controlnet_flux.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/controlnet_flux.md", "repo_id": "diffusers", "token_count": 1159 }
121
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Flux <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"> </div> Flux is a series of text-to-image generation models based on diffusion transformers. To know more about Flux, check out the original [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/) by the creators of Flux, Black Forest Labs. Original model checkpoints for Flux can be found [here](https://huggingface.co/black-forest-labs). Original inference code can be found [here](https://github.com/black-forest-labs/flux). <Tip> Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to [this blog post](https://huggingface.co/blog/quanto-diffusers) to learn more. For an exhaustive list of resources, check out [this gist](https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c). [Caching](../../optimization/cache) may also speed up inference by storing and reusing intermediate outputs. </Tip> Flux comes in the following variants: | model type | model id | |:----------:|:--------:| | Timestep-distilled | [`black-forest-labs/FLUX.1-schnell`](https://huggingface.co/black-forest-labs/FLUX.1-schnell) | | Guidance-distilled | [`black-forest-labs/FLUX.1-dev`](https://huggingface.co/black-forest-labs/FLUX.1-dev) | | Fill Inpainting/Outpainting (Guidance-distilled) | [`black-forest-labs/FLUX.1-Fill-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Fill-dev) | | Canny Control (Guidance-distilled) | [`black-forest-labs/FLUX.1-Canny-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev) | | Depth Control (Guidance-distilled) | [`black-forest-labs/FLUX.1-Depth-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev) | | Canny Control (LoRA) | [`black-forest-labs/FLUX.1-Canny-dev-lora`](https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev-lora) | | Depth Control (LoRA) | [`black-forest-labs/FLUX.1-Depth-dev-lora`](https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev-lora) | | Redux (Adapter) | [`black-forest-labs/FLUX.1-Redux-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev) | | Kontext | [`black-forest-labs/FLUX.1-kontext`](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev) | All checkpoints have different usage which we detail below. ### Timestep-distilled * `max_sequence_length` cannot be more than 256. * `guidance_scale` needs to be 0. * As this is a timestep-distilled model, it benefits from fewer sampling steps. ```python import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() prompt = "A cat holding a sign that says hello world" out = pipe( prompt=prompt, guidance_scale=0., height=768, width=1360, num_inference_steps=4, max_sequence_length=256, ).images[0] out.save("image.png") ``` ### Guidance-distilled * The guidance-distilled variant takes about 50 sampling steps for good-quality generation. * It doesn't have any limitations around the `max_sequence_length`. ```python import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() prompt = "a tiny astronaut hatching from an egg on the moon" out = pipe( prompt=prompt, guidance_scale=3.5, height=768, width=1360, num_inference_steps=50, ).images[0] out.save("image.png") ``` ### Fill Inpainting/Outpainting * Flux Fill pipeline does not require `strength` as an input like regular inpainting pipelines. * It supports both inpainting and outpainting. ```python import torch from diffusers import FluxFillPipeline from diffusers.utils import load_image image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup_mask.png") repo_id = "black-forest-labs/FLUX.1-Fill-dev" pipe = FluxFillPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16).to("cuda") image = pipe( prompt="a white paper cup", image=image, mask_image=mask, height=1632, width=1232, max_sequence_length=512, generator=torch.Generator("cpu").manual_seed(0) ).images[0] image.save(f"output.png") ``` ### Canny Control **Note:** `black-forest-labs/Flux.1-Canny-dev` is _not_ a [`ControlNetModel`] model. ControlNet models are a separate component from the UNet/Transformer whose residuals are added to the actual underlying model. Canny Control is an alternate architecture that achieves effectively the same results as a ControlNet model would, by using channel-wise concatenation with input control condition and ensuring the transformer learns structure control by following the condition as closely as possible. ```python # !pip install -U controlnet-aux import torch from controlnet_aux import CannyDetector from diffusers import FluxControlPipeline from diffusers.utils import load_image pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-Canny-dev", torch_dtype=torch.bfloat16).to("cuda") prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png") processor = CannyDetector() control_image = processor(control_image, low_threshold=50, high_threshold=200, detect_resolution=1024, image_resolution=1024) image = pipe( prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=50, guidance_scale=30.0, ).images[0] image.save("output.png") ``` Canny Control is also possible with a LoRA variant of this condition. The usage is as follows: ```python # !pip install -U controlnet-aux import torch from controlnet_aux import CannyDetector from diffusers import FluxControlPipeline from diffusers.utils import load_image pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to("cuda") pipe.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora") prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png") processor = CannyDetector() control_image = processor(control_image, low_threshold=50, high_threshold=200, detect_resolution=1024, image_resolution=1024) image = pipe( prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=50, guidance_scale=30.0, ).images[0] image.save("output.png") ``` ### Depth Control **Note:** `black-forest-labs/Flux.1-Depth-dev` is _not_ a ControlNet model. [`ControlNetModel`] models are a separate component from the UNet/Transformer whose residuals are added to the actual underlying model. Depth Control is an alternate architecture that achieves effectively the same results as a ControlNet model would, by using channel-wise concatenation with input control condition and ensuring the transformer learns structure control by following the condition as closely as possible. ```python # !pip install git+https://github.com/huggingface/image_gen_aux import torch from diffusers import FluxControlPipeline, FluxTransformer2DModel from diffusers.utils import load_image from image_gen_aux import DepthPreprocessor pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-Depth-dev", torch_dtype=torch.bfloat16).to("cuda") prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png") processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf") control_image = processor(control_image)[0].convert("RGB") image = pipe( prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=30, guidance_scale=10.0, generator=torch.Generator().manual_seed(42), ).images[0] image.save("output.png") ``` Depth Control is also possible with a LoRA variant of this condition. The usage is as follows: ```python # !pip install git+https://github.com/huggingface/image_gen_aux import torch from diffusers import FluxControlPipeline, FluxTransformer2DModel from diffusers.utils import load_image from image_gen_aux import DepthPreprocessor pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to("cuda") pipe.load_lora_weights("black-forest-labs/FLUX.1-Depth-dev-lora") prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png") processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf") control_image = processor(control_image)[0].convert("RGB") image = pipe( prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=30, guidance_scale=10.0, generator=torch.Generator().manual_seed(42), ).images[0] image.save("output.png") ``` ### Redux * Flux Redux pipeline is an adapter for FLUX.1 base models. It can be used with both flux-dev and flux-schnell, for image-to-image generation. * You can first use the `FluxPriorReduxPipeline` to get the `prompt_embeds` and `pooled_prompt_embeds`, and then feed them into the `FluxPipeline` for image-to-image generation. * When use `FluxPriorReduxPipeline` with a base pipeline, you can set `text_encoder=None` and `text_encoder_2=None` in the base pipeline, in order to save VRAM. ```python import torch from diffusers import FluxPriorReduxPipeline, FluxPipeline from diffusers.utils import load_image device = "cuda" dtype = torch.bfloat16 repo_redux = "black-forest-labs/FLUX.1-Redux-dev" repo_base = "black-forest-labs/FLUX.1-dev" pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(repo_redux, torch_dtype=dtype).to(device) pipe = FluxPipeline.from_pretrained( repo_base, text_encoder=None, text_encoder_2=None, torch_dtype=torch.bfloat16 ).to(device) image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png") pipe_prior_output = pipe_prior_redux(image) images = pipe( guidance_scale=2.5, num_inference_steps=50, generator=torch.Generator("cpu").manual_seed(0), **pipe_prior_output, ).images images[0].save("flux-redux.png") ``` ### Kontext Flux Kontext is a model that allows in-context control of the image generation process, allowing for editing, refinement, relighting, style transfer, character customization, and more. ```python import torch from diffusers import FluxKontextPipeline from diffusers.utils import load_image pipe = FluxKontextPipeline.from_pretrained( "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16 ) pipe.to("cuda") image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png").convert("RGB") prompt = "Make Pikachu hold a sign that says 'Black Forest Labs is awesome', yarn art style, detailed, vibrant colors" image = pipe( image=image, prompt=prompt, guidance_scale=2.5, generator=torch.Generator().manual_seed(42), ).images[0] image.save("flux-kontext.png") ``` Flux Kontext comes with an integrity safety checker, which should be run after the image generation step. To run the safety checker, install the official repository from [black-forest-labs/flux](https://github.com/black-forest-labs/flux) and add the following code: ```python from flux.content_filters import PixtralContentFilter # ... pipeline invocation to generate images integrity_checker = PixtralContentFilter(torch.device("cuda")) image_ = np.array(image) / 255.0 image_ = 2 * image_ - 1 image_ = torch.from_numpy(image_).to("cuda", dtype=torch.float32).unsqueeze(0).permute(0, 3, 1, 2) if integrity_checker.test_image(image_): raise ValueError("Your image has been flagged. Choose another prompt/image or try again.") ``` ### Kontext Inpainting `FluxKontextInpaintPipeline` enables image modification within a fixed mask region. It currently supports both text-based conditioning and image-reference conditioning. <hfoptions id="kontext-inpaint"> <hfoption id="text-only"> ```python import torch from diffusers import FluxKontextInpaintPipeline from diffusers.utils import load_image prompt = "Change the yellow dinosaur to green one" img_url = ( "https://github.com/ZenAI-Vietnam/Flux-Kontext-pipelines/blob/main/assets/dinosaur_input.jpeg?raw=true" ) mask_url = ( "https://github.com/ZenAI-Vietnam/Flux-Kontext-pipelines/blob/main/assets/dinosaur_mask.png?raw=true" ) source = load_image(img_url) mask = load_image(mask_url) pipe = FluxKontextInpaintPipeline.from_pretrained( "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16 ) pipe.to("cuda") image = pipe(prompt=prompt, image=source, mask_image=mask, strength=1.0).images[0] image.save("kontext_inpainting_normal.png") ``` </hfoption> <hfoption id="image conditioning"> ```python import torch from diffusers import FluxKontextInpaintPipeline from diffusers.utils import load_image pipe = FluxKontextInpaintPipeline.from_pretrained( "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16 ) pipe.to("cuda") prompt = "Replace this ball" img_url = "https://images.pexels.com/photos/39362/the-ball-stadion-football-the-pitch-39362.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500" mask_url = "https://github.com/ZenAI-Vietnam/Flux-Kontext-pipelines/blob/main/assets/ball_mask.png?raw=true" image_reference_url = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTah3x6OL_ECMBaZ5ZlJJhNsyC-OSMLWAI-xw&s" source = load_image(img_url) mask = load_image(mask_url) image_reference = load_image(image_reference_url) mask = pipe.mask_processor.blur(mask, blur_factor=12) image = pipe( prompt=prompt, image=source, mask_image=mask, image_reference=image_reference, strength=1.0 ).images[0] image.save("kontext_inpainting_ref.png") ``` </hfoption> </hfoptions> ## Combining Flux Turbo LoRAs with Flux Control, Fill, and Redux We can combine Flux Turbo LoRAs with Flux Control and other pipelines like Fill and Redux to enable few-steps' inference. The example below shows how to do that for Flux Control LoRA for depth and turbo LoRA from [`ByteDance/Hyper-SD`](https://hf.co/ByteDance/Hyper-SD). ```py from diffusers import FluxControlPipeline from image_gen_aux import DepthPreprocessor from diffusers.utils import load_image from huggingface_hub import hf_hub_download import torch control_pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) control_pipe.load_lora_weights("black-forest-labs/FLUX.1-Depth-dev-lora", adapter_name="depth") control_pipe.load_lora_weights( hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd" ) control_pipe.set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125]) control_pipe.enable_model_cpu_offload() prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png") processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf") control_image = processor(control_image)[0].convert("RGB") image = control_pipe( prompt=prompt, control_image=control_image, height=1024, width=1024, num_inference_steps=8, guidance_scale=10.0, generator=torch.Generator().manual_seed(42), ).images[0] image.save("output.png") ``` ## Note about `unload_lora_weights()` when using Flux LoRAs When unloading the Control LoRA weights, call `pipe.unload_lora_weights(reset_to_overwritten_params=True)` to reset the `pipe.transformer` completely back to its original form. The resultant pipeline can then be used with methods like [`DiffusionPipeline.from_pipe`]. More details about this argument are available in [this PR](https://github.com/huggingface/diffusers/pull/10397). ## IP-Adapter <Tip> Check out [IP-Adapter](../../../using-diffusers/ip_adapter) to learn more about how IP-Adapters work. </Tip> An IP-Adapter lets you prompt Flux with images, in addition to the text prompt. This is especially useful when describing complex concepts that are difficult to articulate through text alone and you have reference images. ```python import torch from diffusers import FluxPipeline from diffusers.utils import load_image pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ).to("cuda") image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_input.jpg").resize((1024, 1024)) pipe.load_ip_adapter( "XLabs-AI/flux-ip-adapter", weight_name="ip_adapter.safetensors", image_encoder_pretrained_model_name_or_path="openai/clip-vit-large-patch14" ) pipe.set_ip_adapter_scale(1.0) image = pipe( width=1024, height=1024, prompt="wearing sunglasses", negative_prompt="", true_cfg_scale=4.0, generator=torch.Generator().manual_seed(4444), ip_adapter_image=image, ).images[0] image.save('flux_ip_adapter_output.jpg') ``` <div class="justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_output.jpg"/> <figcaption class="mt-2 text-sm text-center text-gray-500">IP-Adapter examples with prompt "wearing sunglasses"</figcaption> </div> ## Optimize Flux is a very large model and requires ~50GB of RAM/VRAM to load all the modeling components. Enable some of the optimizations below to lower the memory requirements. ### Group offloading [Group offloading](../../optimization/memory#group-offloading) lowers VRAM usage by offloading groups of internal layers rather than the whole model or weights. You need to use [`~hooks.apply_group_offloading`] on all the model components of a pipeline. The `offload_type` parameter allows you to toggle between block and leaf-level offloading. Setting it to `leaf_level` offloads the lowest leaf-level parameters to the CPU instead of offloading at the module-level. On CUDA devices that support asynchronous data streaming, set `use_stream=True` to overlap data transfer and computation to accelerate inference. > [!TIP] > It is possible to mix block and leaf-level offloading for different components in a pipeline. ```py import torch from diffusers import FluxPipeline from diffusers.hooks import apply_group_offloading model_id = "black-forest-labs/FLUX.1-dev" dtype = torch.bfloat16 pipe = FluxPipeline.from_pretrained( model_id, torch_dtype=dtype, ) apply_group_offloading( pipe.transformer, offload_type="leaf_level", offload_device=torch.device("cpu"), onload_device=torch.device("cuda"), use_stream=True, ) apply_group_offloading( pipe.text_encoder, offload_device=torch.device("cpu"), onload_device=torch.device("cuda"), offload_type="leaf_level", use_stream=True, ) apply_group_offloading( pipe.text_encoder_2, offload_device=torch.device("cpu"), onload_device=torch.device("cuda"), offload_type="leaf_level", use_stream=True, ) apply_group_offloading( pipe.vae, offload_device=torch.device("cpu"), onload_device=torch.device("cuda"), offload_type="leaf_level", use_stream=True, ) prompt="A cat wearing sunglasses and working as a lifeguard at pool." generator = torch.Generator().manual_seed(181201) image = pipe( prompt, width=576, height=1024, num_inference_steps=30, generator=generator ).images[0] image ``` ### Running FP16 inference Flux can generate high-quality images with FP16 (i.e. to accelerate inference on Turing/Volta GPUs) but produces different outputs compared to FP32/BF16. The issue is that some activations in the text encoders have to be clipped when running in FP16, which affects the overall image. Forcing text encoders to run with FP32 inference thus removes this output difference. See [here](https://github.com/huggingface/diffusers/pull/9097#issuecomment-2272292516) for details. FP16 inference code: ```python import torch from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) # can replace schnell with dev # to run on low vram GPUs (i.e. between 4 and 32 GB VRAM) pipe.enable_sequential_cpu_offload() pipe.vae.enable_slicing() pipe.vae.enable_tiling() pipe.to(torch.float16) # casting here instead of in the pipeline constructor because doing so in the constructor loads all models into CPU memory at once prompt = "A cat holding a sign that says hello world" out = pipe( prompt=prompt, guidance_scale=0., height=768, width=1360, num_inference_steps=4, max_sequence_length=256, ).images[0] out.save("image.png") ``` ### Quantization Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model. Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`FluxPipeline`] for inference with bitsandbytes. ```py import torch from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, FluxTransformer2DModel, FluxPipeline from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel quant_config = BitsAndBytesConfig(load_in_8bit=True) text_encoder_8bit = T5EncoderModel.from_pretrained( "black-forest-labs/FLUX.1-dev", subfolder="text_encoder_2", quantization_config=quant_config, torch_dtype=torch.float16, ) quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True) transformer_8bit = FluxTransformer2DModel.from_pretrained( "black-forest-labs/FLUX.1-dev", subfolder="transformer", quantization_config=quant_config, torch_dtype=torch.float16, ) pipeline = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", text_encoder_2=text_encoder_8bit, transformer=transformer_8bit, torch_dtype=torch.float16, device_map="balanced", ) prompt = "a tiny astronaut hatching from an egg on the moon" image = pipeline(prompt, guidance_scale=3.5, height=768, width=1360, num_inference_steps=50).images[0] image.save("flux.png") ``` ## Single File Loading for the `FluxTransformer2DModel` The `FluxTransformer2DModel` supports loading checkpoints in the original format shipped by Black Forest Labs. This is also useful when trying to load finetunes or quantized versions of the models that have been published by the community. <Tip> `FP8` inference can be brittle depending on the GPU type, CUDA version, and `torch` version that you are using. It is recommended that you use the `optimum-quanto` library in order to run FP8 inference on your machine. </Tip> The following example demonstrates how to run Flux with less than 16GB of VRAM. First install `optimum-quanto` ```shell pip install optimum-quanto ``` Then run the following example ```python import torch from diffusers import FluxTransformer2DModel, FluxPipeline from transformers import T5EncoderModel, CLIPTextModel from optimum.quanto import freeze, qfloat8, quantize bfl_repo = "black-forest-labs/FLUX.1-dev" dtype = torch.bfloat16 transformer = FluxTransformer2DModel.from_single_file("https://huggingface.co/Kijai/flux-fp8/blob/main/flux1-dev-fp8.safetensors", torch_dtype=dtype) quantize(transformer, weights=qfloat8) freeze(transformer) text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype) quantize(text_encoder_2, weights=qfloat8) freeze(text_encoder_2) pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, text_encoder_2=None, torch_dtype=dtype) pipe.transformer = transformer pipe.text_encoder_2 = text_encoder_2 pipe.enable_model_cpu_offload() prompt = "A cat holding a sign that says hello world" image = pipe( prompt, guidance_scale=3.5, output_type="pil", num_inference_steps=20, generator=torch.Generator("cpu").manual_seed(0) ).images[0] image.save("flux-fp8-dev.png") ``` ## FluxPipeline [[autodoc]] FluxPipeline - all - __call__ ## FluxImg2ImgPipeline [[autodoc]] FluxImg2ImgPipeline - all - __call__ ## FluxInpaintPipeline [[autodoc]] FluxInpaintPipeline - all - __call__ ## FluxControlNetInpaintPipeline [[autodoc]] FluxControlNetInpaintPipeline - all - __call__ ## FluxControlNetImg2ImgPipeline [[autodoc]] FluxControlNetImg2ImgPipeline - all - __call__ ## FluxControlPipeline [[autodoc]] FluxControlPipeline - all - __call__ ## FluxControlImg2ImgPipeline [[autodoc]] FluxControlImg2ImgPipeline - all - __call__ ## FluxPriorReduxPipeline [[autodoc]] FluxPriorReduxPipeline - all - __call__ ## FluxFillPipeline [[autodoc]] FluxFillPipeline - all - __call__ ## FluxKontextPipeline [[autodoc]] FluxKontextPipeline - all - __call__ ## FluxKontextInpaintPipeline [[autodoc]] FluxKontextInpaintPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/flux.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/flux.md", "repo_id": "diffusers", "token_count": 9302 }
122
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # SDXL Turbo Stable Diffusion XL (SDXL) Turbo was proposed in [Adversarial Diffusion Distillation](https://stability.ai/research/adversarial-diffusion-distillation) by Axel Sauer, Dominik Lorenz, Andreas Blattmann, and Robin Rombach. The abstract from the paper is: *We introduce Adversarial Diffusion Distillation (ADD), a novel training approach that efficiently samples large-scale foundational image diffusion models in just 1–4 steps while maintaining high image quality. We use score distillation to leverage large-scale off-the-shelf image diffusion models as a teacher signal in combination with an adversarial loss to ensure high image fidelity even in the low-step regime of one or two sampling steps. Our analyses show that our model clearly outperforms existing few-step methods (GANs,Latent Consistency Models) in a single step and reaches the performance of state-of-the-art diffusion models (SDXL) in only four steps. ADD is the first method to unlock single-step, real-time image synthesis with foundation models.* ## Tips - SDXL Turbo uses the exact same architecture as [SDXL](./stable_diffusion_xl), which means it also has the same API. Please refer to the [SDXL](./stable_diffusion_xl) API reference for more details. - SDXL Turbo should disable guidance scale by setting `guidance_scale=0.0`. - SDXL Turbo should use `timestep_spacing='trailing'` for the scheduler and use between 1 and 4 steps. - SDXL Turbo has been trained to generate images of size 512x512. - SDXL Turbo is open-access, but not open-source meaning that one might have to buy a model license in order to use it for commercial applications. Make sure to read the [official model card](https://huggingface.co/stabilityai/sdxl-turbo) to learn more. <Tip> To learn how to use SDXL Turbo for various tasks, how to optimize performance, and other usage examples, take a look at the [SDXL Turbo](../../../using-diffusers/sdxl_turbo) guide. Check out the [Stability AI](https://huggingface.co/stabilityai) Hub organization for the official base and refiner model checkpoints! </Tip>
diffusers/docs/source/en/api/pipelines/stable_diffusion/sdxl_turbo.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/sdxl_turbo.md", "repo_id": "diffusers", "token_count": 677 }
123
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Accelerate inference Diffusion models are slow at inference because generation is an iterative process where noise is gradually refined into an image or video over a certain number of "steps". To speedup this process, you can try experimenting with different [schedulers](../api/schedulers/overview), reduce the precision of the model weights for faster computations, use more memory-efficient attention mechanisms, and more. Combine and use these techniques together to make inference faster than using any single technique on its own. This guide will go over how to accelerate inference. ## Model data type The precision and data type of the model weights affect inference speed because a higher precision requires more memory to load and more time to perform the computations. PyTorch loads model weights in float32 or full precision by default, so changing the data type is a simple way to quickly get faster inference. <hfoptions id="dtypes"> <hfoption id="bfloat16"> bfloat16 is similar to float16 but it is more robust to numerical errors. Hardware support for bfloat16 varies, but most modern GPUs are capable of supporting bfloat16. ```py import torch from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16 ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" pipeline(prompt, num_inference_steps=30).images[0] ``` </hfoption> <hfoption id="float16"> float16 is similar to bfloat16 but may be more prone to numerical errors. ```py import torch from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" pipeline(prompt, num_inference_steps=30).images[0] ``` </hfoption> <hfoption id="TensorFloat-32"> [TensorFloat-32 (tf32)](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) mode is supported on NVIDIA Ampere GPUs and it computes the convolution and matrix multiplication operations in tf32. Storage and other operations are kept in float32. This enables significantly faster computations when combined with bfloat16 or float16. PyTorch only enables tf32 mode for convolutions by default and you'll need to explicitly enable it for matrix multiplications. ```py import torch from diffusers import StableDiffusionXLPipeline torch.backends.cuda.matmul.allow_tf32 = True pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16 ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" pipeline(prompt, num_inference_steps=30).images[0] ``` Refer to the [mixed precision training](https://huggingface.co/docs/transformers/en/perf_train_gpu_one#mixed-precision) docs for more details. </hfoption> </hfoptions> ## Scaled dot product attention > [!TIP] > Memory-efficient attention optimizes for inference speed *and* [memory usage](./memory#memory-efficient-attention)! [Scaled dot product attention (SDPA)](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) implements several attention backends, [FlashAttention](https://github.com/Dao-AILab/flash-attention), [xFormers](https://github.com/facebookresearch/xformers), and a native C++ implementation. It automatically selects the most optimal backend for your hardware. SDPA is enabled by default if you're using PyTorch >= 2.0 and no additional changes are required to your code. You could try experimenting with other attention backends though if you'd like to choose your own. The example below uses the [torch.nn.attention.sdpa_kernel](https://pytorch.org/docs/stable/generated/torch.nn.attention.sdpa_kernel.html) context manager to enable efficient attention. ```py from torch.nn.attention import SDPBackend, sdpa_kernel import torch from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16 ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" with sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION): image = pipeline(prompt, num_inference_steps=30).images[0] ``` ## torch.compile [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) accelerates inference by compiling PyTorch code and operations into optimized kernels. Diffusers typically compiles the more compute-intensive models like the UNet, transformer, or VAE. Enable the following compiler settings for maximum speed (refer to the [full list](https://github.com/pytorch/pytorch/blob/main/torch/_inductor/config.py) for more options). ```py import torch from diffusers import StableDiffusionXLPipeline torch._inductor.config.conv_1x1_as_mm = True torch._inductor.config.coordinate_descent_tuning = True torch._inductor.config.epilogue_fusion = False torch._inductor.config.coordinate_descent_check_all_directions = True ``` Load and compile the UNet and VAE. There are several different modes you can choose from, but `"max-autotune"` optimizes for the fastest speed by compiling to a CUDA graph. CUDA graphs effectively reduces the overhead by launching multiple GPU operations through a single CPU operation. > [!TIP] > With PyTorch 2.3.1, you can control the caching behavior of torch.compile. This is particularly beneficial for compilation modes like `"max-autotune"` which performs a grid-search over several compilation flags to find the optimal configuration. Learn more in the [Compile Time Caching in torch.compile](https://pytorch.org/tutorials/recipes/torch_compile_caching_tutorial.html) tutorial. Changing the memory layout to [channels_last](./memory#torchchannels_last) also optimizes memory and inference speed. ```py pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.unet.to(memory_format=torch.channels_last) pipeline.vae.to(memory_format=torch.channels_last) pipeline.unet = torch.compile( pipeline.unet, mode="max-autotune", fullgraph=True ) pipeline.vae.decode = torch.compile( pipeline.vae.decode, mode="max-autotune", fullgraph=True ) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" pipeline(prompt, num_inference_steps=30).images[0] ``` Compilation is slow the first time, but once compiled, it is significantly faster. Try to only use the compiled pipeline on the same type of inference operations. Calling the compiled pipeline on a different image size retriggers compilation which is slow and inefficient. ### Dynamic shape compilation > [!TIP] > Make sure to always use the nightly version of PyTorch for better support. `torch.compile` keeps track of input shapes and conditions, and if these are different, it recompiles the model. For example, if a model is compiled on a 1024x1024 resolution image and used on an image with a different resolution, it triggers recompilation. To avoid recompilation, add `dynamic=True` to try and generate a more dynamic kernel to avoid recompilation when conditions change. ```diff + torch.fx.experimental._config.use_duck_shape = False + pipeline.unet = torch.compile( pipeline.unet, fullgraph=True, dynamic=True ) ``` Specifying `use_duck_shape=False` instructs the compiler if it should use the same symbolic variable to represent input sizes that are the same. For more details, check out this [comment](https://github.com/huggingface/diffusers/pull/11327#discussion_r2047659790). Not all models may benefit from dynamic compilation out of the box and may require changes. Refer to this [PR](https://github.com/huggingface/diffusers/pull/11297/) that improved the [`AuraFlowPipeline`] implementation to benefit from dynamic compilation. Feel free to open an issue if dynamic compilation doesn't work as expected for a Diffusers model. ### Regional compilation [Regional compilation](https://docs.pytorch.org/tutorials/recipes/regional_compilation.html) trims cold-start latency by only compiling the *small and frequently-repeated block(s)* of a model - typically a transformer layer - and enables reusing compiled artifacts for every subsequent occurrence. For many diffusion architectures, this delivers the same runtime speedups as full-graph compilation and reduces compile time by 8–10x. Use the [`~ModelMixin.compile_repeated_blocks`] method, a helper that wraps `torch.compile`, on any component such as the transformer model as shown below. ```py # pip install -U diffusers import torch from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, ).to("cuda") # compile only the repeated transformer layers inside the UNet pipeline.unet.compile_repeated_blocks(fullgraph=True) ``` To enable regional compilation for a new model, add a `_repeated_blocks` attribute to a model class containing the class names (as strings) of the blocks you want to compile. ```py class MyUNet(ModelMixin): _repeated_blocks = ("Transformer2DModel",) # ← compiled by default ``` > [!TIP] > For more regional compilation examples, see the reference [PR](https://github.com/huggingface/diffusers/pull/11705). There is also a [compile_regions](https://github.com/huggingface/accelerate/blob/273799c85d849a1954a4f2e65767216eb37fa089/src/accelerate/utils/other.py#L78) method in [Accelerate](https://huggingface.co/docs/accelerate/index) that automatically selects candidate blocks in a model to compile. The remaining graph is compiled separately. This is useful for quick experiments because there aren't as many options for you to set which blocks to compile or adjust compilation flags. ```py # pip install -U accelerate import torch from diffusers import StableDiffusionXLPipeline from accelerate.utils import compile regions pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.unet = compile_regions(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` [`~ModelMixin.compile_repeated_blocks`] is intentionally explicit. List the blocks to repeat in `_repeated_blocks` and the helper only compiles those blocks. It offers predictable behavior and easy reasoning about cache reuse in one line of code. ### Graph breaks It is important to specify `fullgraph=True` in torch.compile to ensure there are no graph breaks in the underlying model. This allows you to take advantage of torch.compile without any performance degradation. For the UNet and VAE, this changes how you access the return variables. ```diff - latents = unet( - latents, timestep=timestep, encoder_hidden_states=prompt_embeds -).sample + latents = unet( + latents, timestep=timestep, encoder_hidden_states=prompt_embeds, return_dict=False +)[0] ``` ### GPU sync The `step()` function is [called](https://github.com/huggingface/diffusers/blob/1d686bac8146037e97f3fd8c56e4063230f71751/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py#L1228) on the scheduler each time after the denoiser makes a prediction, and the `sigmas` variable is [indexed](https://github.com/huggingface/diffusers/blob/1d686bac8146037e97f3fd8c56e4063230f71751/src/diffusers/schedulers/scheduling_euler_discrete.py#L476). When placed on the GPU, it introduces latency because of the communication sync between the CPU and GPU. It becomes more evident when the denoiser has already been compiled. In general, the `sigmas` should [stay on the CPU](https://github.com/huggingface/diffusers/blob/35a969d297cba69110d175ee79c59312b9f49e1e/src/diffusers/schedulers/scheduling_euler_discrete.py#L240) to avoid the communication sync and latency. <Tip> Refer to the [torch.compile and Diffusers: A Hands-On Guide to Peak Performance](https://pytorch.org/blog/torch-compile-and-diffusers-a-hands-on-guide-to-peak-performance/) blog post for maximizing performance with `torch.compile` for diffusion models. </Tip> ### Benchmarks Refer to the [diffusers/benchmarks](https://huggingface.co/datasets/diffusers/benchmarks) dataset to see inference latency and memory usage data for compiled pipelines. The [diffusers-torchao](https://github.com/sayakpaul/diffusers-torchao#benchmarking-results) repository also contains benchmarking results for compiled versions of Flux and CogVideoX. ## Dynamic quantization [Dynamic quantization](https://pytorch.org/tutorials/recipes/recipes/dynamic_quantization.html) improves inference speed by reducing precision to enable faster math operations. This particular type of quantization determines how to scale the activations based on the data at runtime rather than using a fixed scaling factor. As a result, the scaling factor is more accurately aligned with the data. The example below applies [dynamic int8 quantization](https://pytorch.org/tutorials/recipes/recipes/dynamic_quantization.html) to the UNet and VAE with the [torchao](../quantization/torchao) library. > [!TIP] > Refer to our [torchao](../quantization/torchao) docs to learn more about how to use the Diffusers torchao integration. Configure the compiler tags for maximum speed. ```py import torch from torchao import apply_dynamic_quant from diffusers import StableDiffusionXLPipeline torch._inductor.config.conv_1x1_as_mm = True torch._inductor.config.coordinate_descent_tuning = True torch._inductor.config.epilogue_fusion = False torch._inductor.config.coordinate_descent_check_all_directions = True torch._inductor.config.force_fuse_int_mm_with_mul = True torch._inductor.config.use_mixed_mm = True ``` Filter out some linear layers in the UNet and VAE which don't benefit from dynamic quantization with the [dynamic_quant_filter_fn](https://github.com/huggingface/diffusion-fast/blob/0f169640b1db106fe6a479f78c1ed3bfaeba3386/utils/pipeline_utils.py#L16). ```py pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16 ).to("cuda") apply_dynamic_quant(pipeline.unet, dynamic_quant_filter_fn) apply_dynamic_quant(pipeline.vae, dynamic_quant_filter_fn) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" pipeline(prompt, num_inference_steps=30).images[0] ``` ## Fused projection matrices > [!WARNING] > The [fuse_qkv_projections](https://github.com/huggingface/diffusers/blob/58431f102cf39c3c8a569f32d71b2ea8caa461e1/src/diffusers/pipelines/pipeline_utils.py#L2034) method is experimental and support is limited to mostly Stable Diffusion pipelines. Take a look at this [PR](https://github.com/huggingface/diffusers/pull/6179) to learn more about how to enable it for other pipelines An input is projected into three subspaces, represented by the projection matrices Q, K, and V, in an attention block. These projections are typically calculated separately, but you can horizontally combine these into a single matrix and perform the projection in a single step. It increases the size of the matrix multiplications of the input projections and also improves the impact of quantization. ```py pipeline.fuse_qkv_projections() ``` ## Resources - Read the [Presenting Flux Fast: Making Flux go brrr on H100s](https://pytorch.org/blog/presenting-flux-fast-making-flux-go-brrr-on-h100s/) blog post to learn more about how you can combine all of these optimizations with [TorchInductor](https://docs.pytorch.org/docs/stable/torch.compiler.html) and [AOTInductor](https://docs.pytorch.org/docs/stable/torch.compiler_aot_inductor.html) for a ~2.5x speedup using recipes from [flux-fast](https://github.com/huggingface/flux-fast). These recipes support AMD hardware and [Flux.1 Kontext Dev](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev). - Read the [torch.compile and Diffusers: A Hands-On Guide to Peak Performance](https://pytorch.org/blog/torch-compile-and-diffusers-a-hands-on-guide-to-peak-performance/) blog post to maximize performance when using `torch.compile`.
diffusers/docs/source/en/optimization/fp16.md/0
{ "file_path": "diffusers/docs/source/en/optimization/fp16.md", "repo_id": "diffusers", "token_count": 5105 }
124
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Getting started Quantization focuses on representing data with fewer bits while also trying to preserve the precision of the original data. This often means converting a data type to represent the same information with fewer bits. For example, if your model weights are stored as 32-bit floating points and they're quantized to 16-bit floating points, this halves the model size which makes it easier to store and reduces memory usage. Lower precision can also speedup inference because it takes less time to perform calculations with fewer bits. Diffusers supports multiple quantization backends to make large diffusion models like [Flux](../api/pipelines/flux) more accessible. This guide shows how to use the [`~quantizers.PipelineQuantizationConfig`] class to quantize a pipeline during its initialization from a pretrained or non-quantized checkpoint. ## Pipeline-level quantization There are two ways to use [`~quantizers.PipelineQuantizationConfig`] depending on how much customization you want to apply to the quantization configuration. - for basic use cases, define the `quant_backend`, `quant_kwargs`, and `components_to_quantize` arguments - for granular quantization control, define a `quant_mapping` that provides the quantization configuration for individual model components ### Basic quantization Initialize [`~quantizers.PipelineQuantizationConfig`] with the following parameters. - `quant_backend` specifies which quantization backend to use. Currently supported backends include: `bitsandbytes_4bit`, `bitsandbytes_8bit`, `gguf`, `quanto`, and `torchao`. - `quant_kwargs` specifies the quantization arguments to use. > [!TIP] > These `quant_kwargs` arguments are different for each backend. Refer to the [Quantization API](../api/quantization) docs to view the arguments for each backend. - `components_to_quantize` specifies which components of the pipeline to quantize. Typically, you should quantize the most compute intensive components like the transformer. The text encoder is another component to consider quantizing if a pipeline has more than one such as [`FluxPipeline`]. The example below quantizes the T5 text encoder in [`FluxPipeline`] while keeping the CLIP model intact. The example below loads the bitsandbytes backend with the following arguments from [`~quantizers.quantization_config.BitsAndBytesConfig`], `load_in_4bit`, `bnb_4bit_quant_type`, and `bnb_4bit_compute_dtype`. ```py import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, components_to_quantize=["transformer", "text_encoder_2"], ) ``` Pass the `pipeline_quant_config` to [`~DiffusionPipeline.from_pretrained`] to quantize the pipeline. ```py pipe = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ).to("cuda") image = pipe("photo of a cute dog").images[0] ``` ### Advanced quantization The `quant_mapping` argument provides more options for how to quantize each individual component in a pipeline, like combining different quantization backends. Initialize [`~quantizers.PipelineQuantizationConfig`] and pass a `quant_mapping` to it. The `quant_mapping` allows you to specify the quantization options for each component in the pipeline such as the transformer and text encoder. The example below uses two quantization backends, [`~quantizers.quantization_config.QuantoConfig`] and [`transformers.BitsAndBytesConfig`], for the transformer and text encoder. ```py import torch from diffusers import DiffusionPipeline from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig from diffusers.quantizers.quantization_config import QuantoConfig from diffusers.quantizers import PipelineQuantizationConfig from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig pipeline_quant_config = PipelineQuantizationConfig( quant_mapping={ "transformer": QuantoConfig(weights_dtype="int8"), "text_encoder_2": TransformersBitsAndBytesConfig( load_in_4bit=True, compute_dtype=torch.bfloat16 ), } ) ``` There is a separate bitsandbytes backend in [Transformers](https://huggingface.co/docs/transformers/main_classes/quantization#transformers.BitsAndBytesConfig). You need to import and use [`transformers.BitsAndBytesConfig`] for components that come from Transformers. For example, `text_encoder_2` in [`FluxPipeline`] is a [`~transformers.T5EncoderModel`] from Transformers so you need to use [`transformers.BitsAndBytesConfig`] instead of [`diffusers.BitsAndBytesConfig`]. > [!TIP] > Use the [basic quantization](#basic-quantization) method above if you don't want to manage these distinct imports or aren't sure where each pipeline component comes from. ```py import torch from diffusers import DiffusionPipeline from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig from diffusers.quantizers import PipelineQuantizationConfig from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig pipeline_quant_config = PipelineQuantizationConfig( quant_mapping={ "transformer": DiffusersBitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16), "text_encoder_2": TransformersBitsAndBytesConfig( load_in_4bit=True, compute_dtype=torch.bfloat16 ), } ) ``` Pass the `pipeline_quant_config` to [`~DiffusionPipeline.from_pretrained`] to quantize the pipeline. ```py pipe = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ).to("cuda") image = pipe("photo of a cute dog").images[0] ``` ## Resources Check out the resources below to learn more about quantization. - If you are new to quantization, we recommend checking out the following beginner-friendly courses in collaboration with DeepLearning.AI. - [Quantization Fundamentals with Hugging Face](https://www.deeplearning.ai/short-courses/quantization-fundamentals-with-hugging-face/) - [Quantization in Depth](https://www.deeplearning.ai/short-courses/quantization-in-depth/) - Refer to the [Contribute new quantization method guide](https://huggingface.co/docs/transformers/main/en/quantization/contribute) if you're interested in adding a new quantization method. - The Transformers quantization [Overview](https://huggingface.co/docs/transformers/quantization/overview#when-to-use-what) provides an overview of the pros and cons of different quantization backends. - Read the [Exploring Quantization Backends in Diffusers](https://huggingface.co/blog/diffusers-quantization) blog post for a brief introduction to each quantization backend, how to choose a backend, and combining quantization with other memory optimizations.
diffusers/docs/source/en/quantization/overview.md/0
{ "file_path": "diffusers/docs/source/en/quantization/overview.md", "repo_id": "diffusers", "token_count": 2169 }
125
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LoRA <Tip warning={true}> This is experimental and the API may change in the future. </Tip> [LoRA (Low-Rank Adaptation of Large Language Models)](https://hf.co/papers/2106.09685) is a popular and lightweight training technique that significantly reduces the number of trainable parameters. It works by inserting a smaller number of new weights into the model and only these are trained. This makes training with LoRA much faster, memory-efficient, and produces smaller model weights (a few hundred MBs), which are easier to store and share. LoRA can also be combined with other training techniques like DreamBooth to speedup training. <Tip> LoRA is very versatile and supported for [DreamBooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py), [Kandinsky 2.2](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_decoder.py), [Stable Diffusion XL](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora_sdxl.py), [text-to-image](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py), and [Wuerstchen](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py). </Tip> This guide will explore the [train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Navigate to the example folder with the training script and install the required dependencies for the script you're using: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/text_to_image pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/text_to_image pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script has many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L85) function. Default values are provided for most parameters that work pretty well, but you can also set your own values in the training command if you'd like. For example, to increase the number of epochs to train: ```bash accelerate launch train_text_to_image_lora.py \ --num_train_epochs=150 \ ``` Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the LoRA relevant parameters: - `--rank`: the inner dimension of the low-rank matrices to train; a higher rank means more trainable parameters - `--learning_rate`: the default learning rate is 1e-4, but with LoRA, you can use a higher learning rate ## Training script The dataset preprocessing code and training loop are found in the [`main()`](https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L371) function, and if you need to adapt the training script, this is where you'll make your changes. As with the script parameters, a walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the LoRA relevant parts of the script. <hfoptions id="lora"> <hfoption id="UNet"> Diffusers uses [`~peft.LoraConfig`] from the [PEFT](https://hf.co/docs/peft) library to set up the parameters of the LoRA adapter such as the rank, alpha, and which modules to insert the LoRA weights into. The adapter is added to the UNet, and only the LoRA layers are filtered for optimization in `lora_layers`. ```py unet_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], ) unet.add_adapter(unet_lora_config) lora_layers = filter(lambda p: p.requires_grad, unet.parameters()) ``` </hfoption> <hfoption id="text encoder"> Diffusers also supports finetuning the text encoder with LoRA from the [PEFT](https://hf.co/docs/peft) library when necessary such as finetuning Stable Diffusion XL (SDXL). The [`~peft.LoraConfig`] is used to configure the parameters of the LoRA adapter which are then added to the text encoder, and only the LoRA layers are filtered for training. ```py text_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], ) text_encoder_one.add_adapter(text_lora_config) text_encoder_two.add_adapter(text_lora_config) text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters())) text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters())) ``` </hfoption> </hfoptions> The [optimizer](https://github.com/huggingface/diffusers/blob/e4b8f173b97731686e290b2eb98e7f5df2b1b322/examples/text_to_image/train_text_to_image_lora.py#L529) is initialized with the `lora_layers` because these are the only weights that'll be optimized: ```py optimizer = optimizer_cls( lora_layers, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Aside from setting up the LoRA layers, the training script is more or less the same as train_text_to_image.py! ## Launch the script Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 Let's train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and dataset respectively. You should also specify where to save the model in `OUTPUT_DIR`, and the name of the model to save to on the Hub with `HUB_MODEL_ID`. The script creates and saves the following files to your repository: - saved model checkpoints - `pytorch_lora_weights.safetensors` (the trained LoRA weights) If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. <Tip warning={true}> A full training run takes ~5 hours on a 2080 Ti GPU with 11GB of VRAM. </Tip> ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="/sddata/finetune/lora/naruto" export HUB_MODEL_ID="naruto-lora" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --dataloader_num_workers=8 \ --resolution=512 \ --center_crop \ --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=15000 \ --learning_rate=1e-04 \ --max_grad_norm=1 \ --lr_scheduler="cosine" \ --lr_warmup_steps=0 \ --output_dir=${OUTPUT_DIR} \ --push_to_hub \ --hub_model_id=${HUB_MODEL_ID} \ --report_to=wandb \ --checkpointing_steps=500 \ --validation_prompt="A naruto with blue eyes." \ --seed=1337 ``` Once training has been completed, you can use your model for inference: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipeline.load_lora_weights("path/to/lora/model", weight_name="pytorch_lora_weights.safetensors") image = pipeline("A naruto with blue eyes").images[0] ``` ## Next steps Congratulations on training a new model with LoRA! To learn more about how to use your new model, the following guides may be helpful: - Learn how to [load different LoRA formats](../using-diffusers/loading_adapters#LoRA) trained using community trainers like Kohya and TheLastBen. - Learn how to use and [combine multiple LoRA's](../tutorials/using_peft_for_inference) with PEFT for inference.
diffusers/docs/source/en/training/lora.md/0
{ "file_path": "diffusers/docs/source/en/training/lora.md", "repo_id": "diffusers", "token_count": 3354 }
126
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet [ControlNet](https://huggingface.co/papers/2302.05543) is an adapter that enables controllable generation such as generating an image of a cat in a *specific pose* or following the lines in a sketch of a *specific* cat. It works by adding a smaller network of "zero convolution" layers and progressively training these to avoid disrupting with the original model. The original model parameters are frozen to avoid retraining it. A ControlNet is conditioned on extra visual information or "structural controls" (canny edge, depth maps, human pose, etc.) that can be combined with text prompts to generate images that are guided by the visual input. > [!TIP] > ControlNets are available to many models such as [Flux](../api/pipelines/controlnet_flux), [Hunyuan-DiT](../api/pipelines/controlnet_hunyuandit), [Stable Diffusion 3](../api/pipelines/controlnet_sd3), and more. The examples in this guide use Flux and Stable Diffusion XL. Load a ControlNet conditioned on a specific control, such as canny edge, and pass it to the pipeline in [`~DiffusionPipeline.from_pretrained`]. <hfoptions id="usage"> <hfoption id="text-to-image"> Generate a canny image with [opencv-python](https://github.com/opencv/opencv-python). ```py import cv2 import numpy as np from PIL import Image from diffusers.utils import load_image original_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png" ) image = np.array(original_image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) ``` Pass the canny image to the pipeline. Use the `controlnet_conditioning_scale` parameter to determine how much weight to assign to the control. ```py import torch from diffusers.utils import load_image from diffusers import FluxControlNetPipeline, FluxControlNetModel controlnet = FluxControlNetModel.from_pretrained( "InstantX/FLUX.1-dev-Controlnet-Canny", torch_dtype=torch.bfloat16 ) pipeline = FluxControlNetPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", controlnet=controlnet, torch_dtype=torch.bfloat16 ).to("cuda") prompt = """ A photorealistic overhead image of a cat reclining sideways in a flamingo pool floatie holding a margarita. The cat is floating leisurely in the pool and completely relaxed and happy. """ pipeline( prompt, control_image=canny_image, controlnet_conditioning_scale=0.5, num_inference_steps=50, guidance_scale=3.5, ).images[0] ``` <div style="display: flex; gap: 10px; justify-content: space-around; align-items: flex-end;"> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png" width="300" alt="Generated image (prompt only)"/> <figcaption style="text-align: center;">original image</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/canny-cat.png" width="300" alt="Control image (Canny edges)"/> <figcaption style="text-align: center;">canny image</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/canny-cat-generated.png" width="300" alt="Generated image (ControlNet + prompt)"/> <figcaption style="text-align: center;">generated image</figcaption> </figure> </div> </hfoption> <hfoption id="image-to-image"> Generate a depth map with a depth estimation pipeline from Transformers. ```py import torch import numpy as np from PIL import Image from transformers import DPTImageProcessor, DPTForDepthEstimation from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL from diffusers.utils import load_image depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda") feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") def get_depth_map(image): image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") with torch.no_grad(), torch.autocast("cuda"): depth_map = depth_estimator(image).predicted_depth depth_map = torch.nn.functional.interpolate( depth_map.unsqueeze(1), size=(1024, 1024), mode="bicubic", align_corners=False, ) depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) depth_map = (depth_map - depth_min) / (depth_max - depth_min) image = torch.cat([depth_map] * 3, dim=1) image = image.permute(0, 2, 3, 1).cpu().numpy()[0] image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) return image depth_image = get_depth_map(image) ``` Pass the depth map to the pipeline. Use the `controlnet_conditioning_scale` parameter to determine how much weight to assign to the control. ```py controlnet = ControlNetModel.from_pretrained( "diffusers/controlnet-depth-sdxl-1.0-small", torch_dtype=torch.float16, ) vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) pipeline = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16, ).to("cuda") prompt = """ A photorealistic overhead image of a cat reclining sideways in a flamingo pool floatie holding a margarita. The cat is floating leisurely in the pool and completely relaxed and happy. """ image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png" ).resize((1024, 1024)) controlnet_conditioning_scale = 0.5 pipeline( prompt, image=image, control_image=depth_image, controlnet_conditioning_scale=controlnet_conditioning_scale, strength=0.99, num_inference_steps=100, ).images[0] ``` <div style="display: flex; gap: 10px; justify-content: space-around; align-items: flex-end;"> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png" width="300" alt="Generated image (prompt only)"/> <figcaption style="text-align: center;">original image</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_depth_image.png" width="300" alt="Control image (Canny edges)"/> <figcaption style="text-align: center;">depth map</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_depth_cat.png" width="300" alt="Generated image (ControlNet + prompt)"/> <figcaption style="text-align: center;">generated image</figcaption> </figure> </div> </hfoption> <hfoption id="inpainting"> Generate a mask image and convert it to a tensor to mark the pixels in the original image as masked if the corresponding pixel in the mask image is over a certain threshold. ```py import cv2 import torch import numpy as np from PIL import Image from diffusers.utils import load_image from diffusers import StableDiffusionXLControlNetInpaintPipeline, ControlNetModel init_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png" ) init_image = init_image.resize((1024, 1024)) mask_image = load_image( "/content/cat_mask.png" ) mask_image = mask_image.resize((1024, 1024)) def make_canny_condition(image): image = np.array(image) image = cv2.Canny(image, 100, 200) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) image = Image.fromarray(image) return image control_image = make_canny_condition(init_image) ``` Pass the mask and control image to the pipeline. Use the `controlnet_conditioning_scale` parameter to determine how much weight to assign to the control. ```py controlnet = ControlNetModel.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 ) pipeline = StableDiffusionXLControlNetInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 ) pipeline( "a cute and fluffy bunny rabbit", num_inference_steps=100, strength=0.99, controlnet_conditioning_scale=0.5, image=init_image, mask_image=mask_image, control_image=control_image, ).images[0] ``` <div style="display: flex; gap: 10px; justify-content: space-around; align-items: flex-end;"> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png" width="300" alt="Generated image (prompt only)"/> <figcaption style="text-align: center;">original image</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat_mask.png" width="300" alt="Control image (Canny edges)"/> <figcaption style="text-align: center;">mask image</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_rabbit_inpaint.png" width="300" alt="Generated image (ControlNet + prompt)"/> <figcaption style="text-align: center;">generated image</figcaption> </figure> </div> </hfoption> </hfoptions> ## Multi-ControlNet You can compose multiple ControlNet conditionings, such as canny image and a depth map, to create a *MultiControlNet*. For the best rersults, you should mask conditionings so they don't overlap and experiment with different `controlnet_conditioning_scale` parameters to adjust how much weight is assigned to each control input. The example below composes a canny image and depth map. Pass the ControlNets as a list to the pipeline and resize the images to the expected input size. ```py import torch from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL controlnets = [ ControlNetModel.from_pretrained( "diffusers/controlnet-depth-sdxl-1.0-small", torch_dtype=torch.float16 ), ControlNetModel.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, ), ] vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) pipeline = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnets, vae=vae, torch_dtype=torch.float16 ).to("cuda") prompt = """ a relaxed rabbit sitting on a striped towel next to a pool with a tropical drink nearby, bright sunny day, vacation scene, 35mm photograph, film, professional, 4k, highly detailed """ negative_prompt = "lowres, bad anatomy, worst quality, low quality, deformed, ugly" images = [canny_image.resize((1024, 1024)), depth_image.resize((1024, 1024))] pipeline( prompt, negative_prompt=negative_prompt, image=images, num_inference_steps=100, controlnet_conditioning_scale=[0.5, 0.5], strength=0.7, ).images[0] ``` <div style="display: flex; gap: 10px; justify-content: space-around; align-items: flex-end;"> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/canny-cat.png" width="300" alt="Generated image (prompt only)"/> <figcaption style="text-align: center;">canny image</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multicontrolnet_depth.png" width="300" alt="Control image (Canny edges)"/> <figcaption style="text-align: center;">depth map</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_multi_controlnet.png" width="300" alt="Generated image (ControlNet + prompt)"/> <figcaption style="text-align: center;">generated image</figcaption> </figure> </div> ## guess_mode [Guess mode](https://github.com/lllyasviel/ControlNet/discussions/188) generates an image from **only** the control input (canny edge, depth map, pose, etc.) and without guidance from a prompt. It adjusts the scale of the ControlNet's output residuals by a fixed ratio depending on block depth. The earlier `DownBlock` is only scaled by `0.1` and the `MidBlock` is fully scaled by `1.0`. ```py import torch from diffusers.utils import load_iamge from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel controlnet = ControlNetModel.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 ) pipeline = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 ).to("cuda") canny_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/canny-cat.png") pipeline( "", image=canny_image, guess_mode=True ).images[0] ``` <div style="display: flex; gap: 10px; justify-content: space-around; align-items: flex-end;"> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/canny-cat.png" width="300" alt="Control image (Canny edges)"/> <figcaption style="text-align: center;">canny image</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guess_mode.png" width="300" alt="Generated image (Guess mode)"/> <figcaption style="text-align: center;">generated image</figcaption> </figure> </div>
diffusers/docs/source/en/using-diffusers/controlnet.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/controlnet.md", "repo_id": "diffusers", "token_count": 5021 }
127
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Model files and layouts [[open-in-colab]] Diffusion models are saved in various file types and organized in different layouts. Diffusers stores model weights as safetensors files in *Diffusers-multifolder* layout and it also supports loading files (like safetensors and ckpt files) from a *single-file* layout which is commonly used in the diffusion ecosystem. Each layout has its own benefits and use cases, and this guide will show you how to load the different files and layouts, and how to convert them. ## Files PyTorch model weights are typically saved with Python's [pickle](https://docs.python.org/3/library/pickle.html) utility as ckpt or bin files. However, pickle is not secure and pickled files may contain malicious code that can be executed. This vulnerability is a serious concern given the popularity of model sharing. To address this security issue, the [Safetensors](https://hf.co/docs/safetensors) library was developed as a secure alternative to pickle, which saves models as safetensors files. ### safetensors > [!TIP] > Learn more about the design decisions and why safetensor files are preferred for saving and loading model weights in the [Safetensors audited as really safe and becoming the default](https://blog.eleuther.ai/safetensors-security-audit/) blog post. [Safetensors](https://hf.co/docs/safetensors) is a safe and fast file format for securely storing and loading tensors. Safetensors restricts the header size to limit certain types of attacks, supports lazy loading (useful for distributed setups), and has generally faster loading speeds. Make sure you have the [Safetensors](https://hf.co/docs/safetensors) library installed. ```py !pip install safetensors ``` Safetensors stores weights in a safetensors file. Diffusers loads safetensors files by default if they're available and the Safetensors library is installed. There are two ways safetensors files can be organized: 1. Diffusers-multifolder layout: there may be several separate safetensors files, one for each pipeline component (text encoder, UNet, VAE), organized in subfolders (check out the [stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main) repository as an example) 2. single-file layout: all the model weights may be saved in a single file (check out the [WarriorMama777/OrangeMixs](https://hf.co/WarriorMama777/OrangeMixs/tree/main/Models/AbyssOrangeMix) repository as an example) <hfoptions id="safetensors"> <hfoption id="multifolder"> Use the [`~DiffusionPipeline.from_pretrained`] method to load a model with safetensors files stored in multiple folders. ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True ) ``` </hfoption> <hfoption id="single file"> Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to load a model with all the weights stored in a single safetensors file. ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_single_file( "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" ) ``` </hfoption> </hfoptions> #### LoRAs [LoRAs](../tutorials/using_peft_for_inference) are lightweight checkpoints fine-tuned to generate images or video in a specific style. If you are using a checkpoint trained with a Diffusers training script, the LoRA configuration is automatically saved as metadata in a safetensors file. When the safetensors file is loaded, the metadata is parsed to correctly configure the LoRA and avoids missing or incorrect LoRA configurations. The easiest way to inspect the metadata, if available, is by clicking on the Safetensors logo next to the weights. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/safetensors_lora.png"/> </div> For LoRAs that aren't trained with Diffusers, you can still save metadata with the `transformer_lora_adapter_metadata` and `text_encoder_lora_adapter_metadata` arguments in [`~loaders.FluxLoraLoaderMixin.save_lora_weights`] as long as it is a safetensors file. ```py import torch from diffusers import FluxPipeline pipeline = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ).to("cuda") pipeline.load_lora_weights("linoyts/yarn_art_Flux_LoRA") pipeline.save_lora_weights( transformer_lora_adapter_metadata={"r": 16, "lora_alpha": 16}, text_encoder_lora_adapter_metadata={"r": 8, "lora_alpha": 8} ) ``` ### ckpt > [!WARNING] > Pickled files may be unsafe because they can be exploited to execute malicious code. It is recommended to use safetensors files instead where possible, or convert the weights to safetensors files. PyTorch's [torch.save](https://pytorch.org/docs/stable/generated/torch.save.html) function uses Python's [pickle](https://docs.python.org/3/library/pickle.html) utility to serialize and save models. These files are saved as a ckpt file and they contain the entire model's weights. Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to directly load a ckpt file. ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_single_file( "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned.ckpt" ) ``` ## Storage layout There are two ways model files are organized, either in a Diffusers-multifolder layout or in a single-file layout. The Diffusers-multifolder layout is the default, and each component file (text encoder, UNet, VAE) is stored in a separate subfolder. Diffusers also supports loading models from a single-file layout where all the components are bundled together. ### Diffusers-multifolder The Diffusers-multifolder layout is the default storage layout for Diffusers. Each component's (text encoder, UNet, VAE) weights are stored in a separate subfolder. The weights can be stored as safetensors or ckpt files. <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-layout.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">multifolder layout</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-unet.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">UNet subfolder</figcaption> </div> </div> To load from Diffusers-multifolder layout, use the [`~DiffusionPipeline.from_pretrained`] method. ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` Benefits of using the Diffusers-multifolder layout include: 1. Faster to load each component file individually or in parallel. 2. Reduced memory usage because you only load the components you need. For example, models like [SDXL Turbo](https://hf.co/stabilityai/sdxl-turbo), [SDXL Lightning](https://hf.co/ByteDance/SDXL-Lightning), and [Hyper-SD](https://hf.co/ByteDance/Hyper-SD) have the same components except for the UNet. You can reuse their shared components with the [`~DiffusionPipeline.from_pipe`] method without consuming any additional memory (take a look at the [Reuse a pipeline](./loading#reuse-a-pipeline) guide) and only load the UNet. This way, you don't need to download redundant components and unnecessarily use more memory. ```py import torch from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler # download one model sdxl_pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") # switch UNet for another model unet = UNet2DConditionModel.from_pretrained( "stabilityai/sdxl-turbo", subfolder="unet", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) # reuse all the same components in new model except for the UNet turbo_pipeline = StableDiffusionXLPipeline.from_pipe( sdxl_pipeline, unet=unet, ).to("cuda") turbo_pipeline.scheduler = EulerDiscreteScheduler.from_config( turbo_pipeline.scheduler.config, timestep+spacing="trailing" ) image = turbo_pipeline( "an astronaut riding a unicorn on mars", num_inference_steps=1, guidance_scale=0.0, ).images[0] image ``` 3. Reduced storage requirements because if a component, such as the SDXL [VAE](https://hf.co/madebyollin/sdxl-vae-fp16-fix), is shared across multiple models, you only need to download and store a single copy of it instead of downloading and storing it multiple times. For 10 SDXL models, this can save ~3.5GB of storage. The storage savings is even greater for newer models like PixArt Sigma, where the [text encoder](https://hf.co/PixArt-alpha/PixArt-Sigma-XL-2-1024-MS/tree/main/text_encoder) alone is ~19GB! 4. Flexibility to replace a component in the model with a newer or better version. ```py from diffusers import DiffusionPipeline, AutoencoderKL vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` 5. More visibility and information about a model's components, which are stored in a [config.json](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json) file in each component subfolder. ### Single-file The single-file layout stores all the model weights in a single file. All the model components (text encoder, UNet, VAE) weights are kept together instead of separately in subfolders. This can be a safetensors or ckpt file. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/single-file-layout.png"/> </div> To load from a single-file layout, use the [`~loaders.FromSingleFileMixin.from_single_file`] method. ```py import torch from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` Benefits of using a single-file layout include: 1. Easy compatibility with diffusion interfaces such as [ComfyUI](https://github.com/comfyanonymous/ComfyUI) or [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) which commonly use a single-file layout. 2. Easier to manage (download and share) a single file. ### DDUF > [!WARNING] > DDUF is an experimental file format and APIs related to it can change in the future. DDUF (**D**DUF **D**iffusion **U**nified **F**ormat) is a file format designed to make storing, distributing, and using diffusion models much easier. Built on the ZIP file format, DDUF offers a standardized, efficient, and flexible way to package all parts of a diffusion model into a single, easy-to-manage file. It provides a balance between Diffusers multi-folder format and the widely popular single-file format. Learn more details about DDUF on the Hugging Face Hub [documentation](https://huggingface.co/docs/hub/dduf). Pass a checkpoint to the `dduf_file` parameter to load it in [`DiffusionPipeline`]. ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "DDUF/FLUX.1-dev-DDUF", dduf_file="FLUX.1-dev.dduf", torch_dtype=torch.bfloat16 ).to("cuda") image = pipe( "photo a cat holding a sign that says Diffusers", num_inference_steps=50, guidance_scale=3.5 ).images[0] image.save("cat.png") ``` To save a pipeline as a `.dduf` checkpoint, use the [`~huggingface_hub.export_folder_as_dduf`] utility, which takes care of all the necessary file-level validations. ```py from huggingface_hub import export_folder_as_dduf from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) save_folder = "flux-dev" pipe.save_pretrained("flux-dev") export_folder_as_dduf("flux-dev.dduf", folder_path=save_folder) > [!TIP] > Packaging and loading quantized checkpoints in the DDUF format is supported as long as they respect the multi-folder structure. ## Convert layout and files Diffusers provides many scripts and methods to convert storage layouts and file formats to enable broader support across the diffusion ecosystem. Take a look at the [diffusers/scripts](https://github.com/huggingface/diffusers/tree/main/scripts) collection to find a script that fits your conversion needs. > [!TIP] > Scripts that have "`to_diffusers`" appended at the end mean they convert a model to the Diffusers-multifolder layout. Each script has their own specific set of arguments for configuring the conversion, so make sure you check what arguments are available! For example, to convert a Stable Diffusion XL model stored in Diffusers-multifolder layout to a single-file layout, run the [convert_diffusers_to_original_sdxl.py](https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_sdxl.py) script. Provide the path to the model to convert, and the path to save the converted model to. You can optionally specify whether you want to save the model as a safetensors file and whether to save the model in half-precision. ```bash python convert_diffusers_to_original_sdxl.py --model_path path/to/model/to/convert --checkpoint_path path/to/save/model/to --use_safetensors ``` You can also save a model to Diffusers-multifolder layout with the [`~DiffusionPipeline.save_pretrained`] method. This creates a directory for you if it doesn't already exist, and it also saves the files as a safetensors file by default. ```py from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", ) pipeline.save_pretrained() ``` Lastly, there are also Spaces, such as [SD To Diffusers](https://hf.co/spaces/diffusers/sd-to-diffusers) and [SD-XL To Diffusers](https://hf.co/spaces/diffusers/sdxl-to-diffusers), that provide a more user-friendly interface for converting models to Diffusers-multifolder layout. This is the easiest and most convenient option for converting layouts, and it'll open a PR on your model repository with the converted files. However, this option is not as reliable as running a script, and the Space may fail for more complicated models. ## Single-file layout usage Now that you're familiar with the differences between the Diffusers-multifolder and single-file layout, this section shows you how to load models and pipeline components, customize configuration options for loading, and load local files with the [`~loaders.FromSingleFileMixin.from_single_file`] method. ### Load a pipeline or model Pass the file path of the pipeline or model to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load it. <hfoptions id="pipeline-model"> <hfoption id="pipeline"> ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path) ``` </hfoption> <hfoption id="model"> ```py from diffusers import StableCascadeUNet ckpt_path = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite.safetensors" model = StableCascadeUNet.from_single_file(ckpt_path) ``` </hfoption> </hfoptions> Customize components in the pipeline by passing them directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. For example, you can use a different scheduler in a pipeline. ```py from diffusers import StableDiffusionXLPipeline, DDIMScheduler ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" scheduler = DDIMScheduler() pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, scheduler=scheduler) ``` Or you could use a ControlNet model in the pipeline. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel ckpt_path = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipeline = StableDiffusionControlNetPipeline.from_single_file(ckpt_path, controlnet=controlnet) ``` ### Customize configuration options Models have a configuration file that define their attributes like the number of inputs in a UNet. Pipelines configuration options are available in the pipeline's class. For example, if you look at the [`StableDiffusionXLInstructPix2PixPipeline`] class, there is an option to scale the image latents with the `is_cosxl_edit` parameter. These configuration files can be found in the models Hub repository or another location from which the configuration file originated (for example, a GitHub repository or locally on your device). <hfoptions id="config-file"> <hfoption id="Hub configuration file"> > [!TIP] > The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically maps the checkpoint to the appropriate model repository, but there are cases where it is useful to use the `config` parameter. For example, if the model components in the checkpoint are different from the original checkpoint or if a checkpoint doesn't have the necessary metadata to correctly determine the configuration to use for the pipeline. The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically determines the configuration to use from the configuration file in the model repository. You could also explicitly specify the configuration to use by providing the repository id to the `config` parameter. ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/segmind/SSD-1B/blob/main/SSD-1B.safetensors" repo_id = "segmind/SSD-1B" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, config=repo_id) ``` The model loads the configuration file for the [UNet](https://huggingface.co/segmind/SSD-1B/blob/main/unet/config.json), [VAE](https://huggingface.co/segmind/SSD-1B/blob/main/vae/config.json), and [text encoder](https://huggingface.co/segmind/SSD-1B/blob/main/text_encoder/config.json) from their respective subfolders in the repository. </hfoption> <hfoption id="original configuration file"> The [`~loaders.FromSingleFileMixin.from_single_file`] method can also load the original configuration file of a pipeline that is stored elsewhere. Pass a local path or URL of the original configuration file to the `original_config` parameter. ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" original_config = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, original_config=original_config) ``` > [!TIP] > Diffusers attempts to infer the pipeline components based on the type signatures of the pipeline class when you use `original_config` with `local_files_only=True`, instead of fetching the configuration files from the model repository on the Hub. This prevents backward breaking changes in code that can't connect to the internet to fetch the necessary configuration files. > > This is not as reliable as providing a path to a local model repository with the `config` parameter, and might lead to errors during pipeline configuration. To avoid errors, run the pipeline with `local_files_only=False` once to download the appropriate pipeline configuration files to the local cache. </hfoption> </hfoptions> While the configuration files specify the pipeline or models default parameters, you can override them by providing the parameters directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. Any parameter supported by the model or pipeline class can be configured in this way. <hfoptions id="override"> <hfoption id="pipeline"> For example, to scale the image latents in [`StableDiffusionXLInstructPix2PixPipeline`] pass the `is_cosxl_edit` parameter. ```python from diffusers import StableDiffusionXLInstructPix2PixPipeline ckpt_path = "https://huggingface.co/stabilityai/cosxl/blob/main/cosxl_edit.safetensors" pipeline = StableDiffusionXLInstructPix2PixPipeline.from_single_file(ckpt_path, config="diffusers/sdxl-instructpix2pix-768", is_cosxl_edit=True) ``` </hfoption> <hfoption id="model"> For example, to upcast the attention dimensions in a [`UNet2DConditionModel`] pass the `upcast_attention` parameter. ```python from diffusers import UNet2DConditionModel ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" model = UNet2DConditionModel.from_single_file(ckpt_path, upcast_attention=True) ``` </hfoption> </hfoptions> ### Local files In Diffusers>=v0.28.0, the [`~loaders.FromSingleFileMixin.from_single_file`] method attempts to configure a pipeline or model by inferring the model type from the keys in the checkpoint file. The inferred model type is used to determine the appropriate model repository on the Hugging Face Hub to configure the model or pipeline. For example, any single file checkpoint based on the Stable Diffusion XL base model will use the [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) model repository to configure the pipeline. But if you're working in an environment with restricted internet access, you should download the configuration files with the [`~huggingface_hub.snapshot_download`] function, and the model checkpoint with the [`~huggingface_hub.hf_hub_download`] function. By default, these files are downloaded to the Hugging Face Hub [cache directory](https://huggingface.co/docs/huggingface_hub/en/guides/manage-cache), but you can specify a preferred directory to download the files to with the `local_dir` parameter. Pass the configuration and checkpoint paths to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load locally. <hfoptions id="local"> <hfoption id="Hub cache directory"> ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" ) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] ) pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ``` </hfoption> <hfoption id="specific local directory"> ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" local_dir="my_local_checkpoints" ) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] local_dir="my_local_config" ) pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ``` </hfoption> </hfoptions> #### Local files without symlink > [!TIP] > In huggingface_hub>=v0.23.0, the `local_dir_use_symlinks` argument isn't necessary for the [`~huggingface_hub.hf_hub_download`] and [`~huggingface_hub.snapshot_download`] functions. The [`~loaders.FromSingleFileMixin.from_single_file`] method relies on the [huggingface_hub](https://hf.co/docs/huggingface_hub/index) caching mechanism to fetch and store checkpoints and configuration files for models and pipelines. If you're working with a file system that does not support symlinking, you should download the checkpoint file to a local directory first, and disable symlinking with the `local_dir_use_symlink=False` parameter in the [`~huggingface_hub.hf_hub_download`] function and [`~huggingface_hub.snapshot_download`] functions. ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" local_dir="my_local_checkpoints", local_dir_use_symlinks=False ) print("My local checkpoint: ", my_local_checkpoint_path) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] local_dir_use_symlinks=False, ) print("My local config: ", my_local_config_path) ``` Then you can pass the local paths to the `pretrained_model_link_or_path` and `config` parameters. ```python pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ```
diffusers/docs/source/en/using-diffusers/other-formats.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/other-formats.md", "repo_id": "diffusers", "token_count": 8509 }
128
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Understanding pipelines, models and schedulers [[open-in-colab]] 🧨 Diffusers is designed to be a user-friendly and flexible toolbox for building diffusion systems tailored to your use-case. At the core of the toolbox are models and schedulers. While the [`DiffusionPipeline`] bundles these components together for convenience, you can also unbundle the pipeline and use the models and schedulers separately to create new diffusion systems. In this tutorial, you'll learn how to use models and schedulers to assemble a diffusion system for inference, starting with a basic pipeline and then progressing to the Stable Diffusion pipeline. ## Deconstruct a basic pipeline A pipeline is a quick and easy way to run a model for inference, requiring no more than four lines of code to generate an image: ```py >>> from diffusers import DDPMPipeline >>> ddpm = DDPMPipeline.from_pretrained("google/ddpm-cat-256", use_safetensors=True).to("cuda") >>> image = ddpm(num_inference_steps=25).images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ddpm-cat.png" alt="Image of cat created from DDPMPipeline"/> </div> That was super easy, but how did the pipeline do that? Let's breakdown the pipeline and take a look at what's happening under the hood. In the example above, the pipeline contains a [`UNet2DModel`] model and a [`DDPMScheduler`]. The pipeline denoises an image by taking random noise the size of the desired output and passing it through the model several times. At each timestep, the model predicts the *noise residual* and the scheduler uses it to predict a less noisy image. The pipeline repeats this process until it reaches the end of the specified number of inference steps. To recreate the pipeline with the model and scheduler separately, let's write our own denoising process. 1. Load the model and scheduler: ```py >>> from diffusers import DDPMScheduler, UNet2DModel >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") >>> model = UNet2DModel.from_pretrained("google/ddpm-cat-256", use_safetensors=True).to("cuda") ``` 2. Set the number of timesteps to run the denoising process for: ```py >>> scheduler.set_timesteps(50) ``` 3. Setting the scheduler timesteps creates a tensor with evenly spaced elements in it, 50 in this example. Each element corresponds to a timestep at which the model denoises an image. When you create the denoising loop later, you'll iterate over this tensor to denoise an image: ```py >>> scheduler.timesteps tensor([980, 960, 940, 920, 900, 880, 860, 840, 820, 800, 780, 760, 740, 720, 700, 680, 660, 640, 620, 600, 580, 560, 540, 520, 500, 480, 460, 440, 420, 400, 380, 360, 340, 320, 300, 280, 260, 240, 220, 200, 180, 160, 140, 120, 100, 80, 60, 40, 20, 0]) ``` 4. Create some random noise with the same shape as the desired output: ```py >>> import torch >>> sample_size = model.config.sample_size >>> noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") ``` 5. Now write a loop to iterate over the timesteps. At each timestep, the model does a [`UNet2DModel.forward`] pass and returns the noisy residual. The scheduler's [`~DDPMScheduler.step`] method takes the noisy residual, timestep, and input and it predicts the image at the previous timestep. This output becomes the next input to the model in the denoising loop, and it'll repeat until it reaches the end of the `timesteps` array. ```py >>> input = noise >>> for t in scheduler.timesteps: ... with torch.no_grad(): ... noisy_residual = model(input, t).sample ... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample ... input = previous_noisy_sample ``` This is the entire denoising process, and you can use this same pattern to write any diffusion system. 6. The last step is to convert the denoised output into an image: ```py >>> from PIL import Image >>> import numpy as np >>> image = (input / 2 + 0.5).clamp(0, 1).squeeze() >>> image = (image.permute(1, 2, 0) * 255).round().to(torch.uint8).cpu().numpy() >>> image = Image.fromarray(image) >>> image ``` In the next section, you'll put your skills to the test and breakdown the more complex Stable Diffusion pipeline. The steps are more or less the same. You'll initialize the necessary components, and set the number of timesteps to create a `timestep` array. The `timestep` array is used in the denoising loop, and for each element in this array, the model predicts a less noisy image. The denoising loop iterates over the `timestep`'s, and at each timestep, it outputs a noisy residual and the scheduler uses it to predict a less noisy image at the previous timestep. This process is repeated until you reach the end of the `timestep` array. Let's try it out! ## Deconstruct the Stable Diffusion pipeline Stable Diffusion is a text-to-image *latent diffusion* model. It is called a latent diffusion model because it works with a lower-dimensional representation of the image instead of the actual pixel space, which makes it more memory efficient. The encoder compresses the image into a smaller representation, and a decoder converts the compressed representation back into an image. For text-to-image models, you'll need a tokenizer and an encoder to generate text embeddings. From the previous example, you already know you need a UNet model and a scheduler. As you can see, this is already more complex than the DDPM pipeline which only contains a UNet model. The Stable Diffusion model has three separate pretrained models. <Tip> 💡 Read the [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) blog for more details about how the VAE, UNet, and text encoder models work. </Tip> Now that you know what you need for the Stable Diffusion pipeline, load all these components with the [`~ModelMixin.from_pretrained`] method. You can find them in the pretrained [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) checkpoint, and each component is stored in a separate subfolder: ```py >>> from PIL import Image >>> import torch >>> from transformers import CLIPTextModel, CLIPTokenizer >>> from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler >>> vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", use_safetensors=True) >>> tokenizer = CLIPTokenizer.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="tokenizer") >>> text_encoder = CLIPTextModel.from_pretrained( ... "CompVis/stable-diffusion-v1-4", subfolder="text_encoder", use_safetensors=True ... ) >>> unet = UNet2DConditionModel.from_pretrained( ... "CompVis/stable-diffusion-v1-4", subfolder="unet", use_safetensors=True ... ) ``` Instead of the default [`PNDMScheduler`], exchange it for the [`UniPCMultistepScheduler`] to see how easy it is to plug a different scheduler in: ```py >>> from diffusers import UniPCMultistepScheduler >>> scheduler = UniPCMultistepScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") ``` To speed up inference, move the models to a GPU since, unlike the scheduler, they have trainable weights: ```py >>> torch_device = "cuda" >>> vae.to(torch_device) >>> text_encoder.to(torch_device) >>> unet.to(torch_device) ``` ### Create text embeddings The next step is to tokenize the text to generate embeddings. The text is used to condition the UNet model and steer the diffusion process towards something that resembles the input prompt. <Tip> 💡 The `guidance_scale` parameter determines how much weight should be given to the prompt when generating an image. </Tip> Feel free to choose any prompt you like if you want to generate something else! ```py >>> prompt = ["a photograph of an astronaut riding a horse"] >>> height = 512 # default height of Stable Diffusion >>> width = 512 # default width of Stable Diffusion >>> num_inference_steps = 25 # Number of denoising steps >>> guidance_scale = 7.5 # Scale for classifier-free guidance >>> generator = torch.manual_seed(0) # Seed generator to create the initial latent noise >>> batch_size = len(prompt) ``` Tokenize the text and generate the embeddings from the prompt: ```py >>> text_input = tokenizer( ... prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt" ... ) >>> with torch.no_grad(): ... text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] ``` You'll also need to generate the *unconditional text embeddings* which are the embeddings for the padding token. These need to have the same shape (`batch_size` and `seq_length`) as the conditional `text_embeddings`: ```py >>> max_length = text_input.input_ids.shape[-1] >>> uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt") >>> uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] ``` Let's concatenate the conditional and unconditional embeddings into a batch to avoid doing two forward passes: ```py >>> text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) ``` ### Create random noise Next, generate some initial random noise as a starting point for the diffusion process. This is the latent representation of the image, and it'll be gradually denoised. At this point, the `latent` image is smaller than the final image size but that's okay though because the model will transform it into the final 512x512 image dimensions later. <Tip> 💡 The height and width are divided by 8 because the `vae` model has 3 down-sampling layers. You can check by running the following: ```py 2 ** (len(vae.config.block_out_channels) - 1) == 8 ``` </Tip> ```py >>> latents = torch.randn( ... (batch_size, unet.config.in_channels, height // 8, width // 8), ... generator=generator, ... device=torch_device, ... ) ``` ### Denoise the image Start by scaling the input with the initial noise distribution, *sigma*, the noise scale value, which is required for improved schedulers like [`UniPCMultistepScheduler`]: ```py >>> latents = latents * scheduler.init_noise_sigma ``` The last step is to create the denoising loop that'll progressively transform the pure noise in `latents` to an image described by your prompt. Remember, the denoising loop needs to do three things: 1. Set the scheduler's timesteps to use during denoising. 2. Iterate over the timesteps. 3. At each timestep, call the UNet model to predict the noise residual and pass it to the scheduler to compute the previous noisy sample. ```py >>> from tqdm.auto import tqdm >>> scheduler.set_timesteps(num_inference_steps) >>> for t in tqdm(scheduler.timesteps): ... # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. ... latent_model_input = torch.cat([latents] * 2) ... latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t) ... # predict the noise residual ... with torch.no_grad(): ... noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample ... # perform guidance ... noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) ... noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) ... # compute the previous noisy sample x_t -> x_t-1 ... latents = scheduler.step(noise_pred, t, latents).prev_sample ``` ### Decode the image The final step is to use the `vae` to decode the latent representation into an image and get the decoded output with `sample`: ```py # scale and decode the image latents with vae latents = 1 / 0.18215 * latents with torch.no_grad(): image = vae.decode(latents).sample ``` Lastly, convert the image to a `PIL.Image` to see your generated image! ```py >>> image = (image / 2 + 0.5).clamp(0, 1).squeeze() >>> image = (image.permute(1, 2, 0) * 255).to(torch.uint8).cpu().numpy() >>> image = Image.fromarray(image) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/blog/assets/98_stable_diffusion/stable_diffusion_k_lms.png"/> </div> ## Next steps From basic to complex pipelines, you've seen that all you really need to write your own diffusion system is a denoising loop. The loop should set the scheduler's timesteps, iterate over them, and alternate between calling the UNet model to predict the noise residual and passing it to the scheduler to compute the previous noisy sample. This is really what 🧨 Diffusers is designed for: to make it intuitive and easy to write your own diffusion system using models and schedulers. For your next steps, feel free to: * Learn how to [build and contribute a pipeline](../using-diffusers/contribute_pipeline) to 🧨 Diffusers. We can't wait and see what you'll come up with! * Explore [existing pipelines](../api/pipelines/overview) in the library, and see if you can deconstruct and build a pipeline from scratch using the models and schedulers separately.
diffusers/docs/source/en/using-diffusers/write_own_pipeline.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/write_own_pipeline.md", "repo_id": "diffusers", "token_count": 4156 }
129
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 설치 사용하시는 라이브러리에 맞는 🤗 Diffusers를 설치하세요. 🤗 Diffusers는 Python 3.8+, PyTorch 1.7.0+ 및 flax에서 테스트되었습니다. 사용중인 딥러닝 라이브러리에 대한 아래의 설치 안내를 따르세요. - [PyTorch 설치 안내](https://pytorch.org/get-started/locally/) - [Flax 설치 안내](https://flax.readthedocs.io/en/latest/) ## pip를 이용한 설치 [가상 환경](https://docs.python.org/3/library/venv.html)에 🤗 Diffusers를 설치해야 합니다. Python 가상 환경에 익숙하지 않은 경우 [가상환경 pip 설치 가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 살펴보세요. 가상 환경을 사용하면 서로 다른 프로젝트를 더 쉽게 관리하고, 종속성간의 호환성 문제를 피할 수 있습니다. 프로젝트 디렉토리에 가상 환경을 생성하는 것으로 시작하세요: ```bash python -m venv .env ``` 그리고 가상 환경을 활성화합니다: ```bash source .env/bin/activate ``` 이제 다음의 명령어로 🤗 Diffusers를 설치할 준비가 되었습니다: **PyTorch의 경우** ```bash pip install diffusers["torch"] ``` **Flax의 경우** ```bash pip install diffusers["flax"] ``` ## 소스로부터 설치 소스에서 `diffusers`를 설치하기 전에, `torch` 및 `accelerate`이 설치되어 있는지 확인하세요. `torch` 설치에 대해서는 [torch docs](https://pytorch.org/get-started/locally/#start-locally)를 참고하세요. 다음과 같이 `accelerate`을 설치하세요. ```bash pip install accelerate ``` 다음 명령어를 사용하여 소스에서 🤗 Diffusers를 설치하세요: ```bash pip install git+https://github.com/huggingface/diffusers ``` 이 명령어는 최신 `stable` 버전이 아닌 최첨단 `main` 버전을 설치합니다. `main` 버전은 최신 개발 정보를 최신 상태로 유지하는 데 유용합니다. 예를 들어 마지막 공식 릴리즈 이후 버그가 수정되었지만, 새 릴리즈가 아직 출시되지 않은 경우입니다. 그러나 이는 `main` 버전이 항상 안정적이지 않을 수 있음을 의미합니다. 우리는 `main` 버전이 지속적으로 작동하도록 노력하고 있으며, 대부분의 문제는 보통 몇 시간 또는 하루 안에 해결됩니다. 문제가 발생하면 더 빨리 해결할 수 있도록 [Issue](https://github.com/huggingface/transformers/issues)를 열어주세요! ## 편집가능한 설치 다음을 수행하려면 편집가능한 설치가 필요합니다: * 소스 코드의 `main` 버전을 사용 * 🤗 Diffusers에 기여 (코드의 변경 사항을 테스트하기 위해 필요) 저장소를 복제하고 다음 명령어를 사용하여 🤗 Diffusers를 설치합니다: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` **PyTorch의 경우** ```sh pip install -e ".[torch]" ``` **Flax의 경우** ```sh pip install -e ".[flax]" ``` 이러한 명령어들은 저장소를 복제한 폴더와 Python 라이브러리 경로를 연결합니다. Python은 이제 일반 라이브러리 경로에 더하여 복제한 폴더 내부를 살펴봅니다. 예를들어 Python 패키지가 `~/anaconda3/envs/main/lib/python3.10/site-packages/`에 설치되어 있는 경우 Python은 복제한 폴더인 `~/diffusers/`도 검색합니다. <Tip warning={true}> 라이브러리를 계속 사용하려면 `diffusers` 폴더를 유지해야 합니다. </Tip> 이제 다음 명령어를 사용하여 최신 버전의 🤗 Diffusers로 쉽게 업데이트할 수 있습니다: ```bash cd ~/diffusers/ git pull ``` 이렇게 하면, 다음에 실행할 때 Python 환경이 🤗 Diffusers의 `main` 버전을 찾게 됩니다. ## 텔레메트리 로깅에 대한 알림 우리 라이브러리는 `from_pretrained()` 요청 중에 텔레메트리 정보를 원격으로 수집합니다. 이 데이터에는 Diffusers 및 PyTorch/Flax의 버전, 요청된 모델 또는 파이프라인 클래스, 그리고 허브에서 호스팅되는 경우 사전학습된 체크포인트에 대한 경로를 포함합니다. 이 사용 데이터는 문제를 디버깅하고 새로운 기능의 우선순위를 지정하는데 도움이 됩니다. 텔레메트리는 HuggingFace 허브에서 모델과 파이프라인을 불러올 때만 전송되며, 로컬 사용 중에는 수집되지 않습니다. 우리는 추가 정보를 공유하지 않기를 원하는 사람이 있다는 것을 이해하고 개인 정보를 존중하므로, 터미널에서 `DISABLE_TELEMETRY` 환경 변수를 설정하여 텔레메트리 수집을 비활성화할 수 있습니다. Linux/MacOS에서: ```bash export DISABLE_TELEMETRY=YES ``` Windows에서: ```bash set DISABLE_TELEMETRY=YES ```
diffusers/docs/source/ko/installation.md/0
{ "file_path": "diffusers/docs/source/ko/installation.md", "repo_id": "diffusers", "token_count": 3689 }
130
# 여러 GPU를 사용한 분산 추론 분산 설정에서는 여러 개의 프롬프트를 동시에 생성할 때 유용한 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 또는 [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html)를 사용하여 여러 GPU에서 추론을 실행할 수 있습니다. 이 가이드에서는 분산 추론을 위해 🤗 Accelerate와 PyTorch Distributed를 사용하는 방법을 보여드립니다. ## 🤗 Accelerate 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index)는 분산 설정에서 추론을 쉽게 훈련하거나 실행할 수 있도록 설계된 라이브러리입니다. 분산 환경 설정 프로세스를 간소화하여 PyTorch 코드에 집중할 수 있도록 해줍니다. 시작하려면 Python 파일을 생성하고 [`accelerate.PartialState`]를 초기화하여 분산 환경을 생성하면, 설정이 자동으로 감지되므로 `rank` 또는 `world_size`를 명시적으로 정의할 필요가 없습니다. ['DiffusionPipeline`]을 `distributed_state.device`로 이동하여 각 프로세스에 GPU를 할당합니다. 이제 컨텍스트 관리자로 [`~accelerate.PartialState.split_between_processes`] 유틸리티를 사용하여 프로세스 수에 따라 프롬프트를 자동으로 분배합니다. ```py from accelerate import PartialState from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipeline.to(distributed_state.device) with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result = pipeline(prompt).images[0] result.save(f"result_{distributed_state.process_index}.png") ``` Use the `--num_processes` argument to specify the number of GPUs to use, and call `accelerate launch` to run the script: ```bash accelerate launch run_distributed.py --num_processes=2 ``` <Tip>자세한 내용은 [🤗 Accelerate를 사용한 분산 추론](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) 가이드를 참조하세요. </Tip> ## Pytoerch 분산 PyTorch는 데이터 병렬 처리를 가능하게 하는 [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html)을 지원합니다. 시작하려면 Python 파일을 생성하고 `torch.distributed` 및 `torch.multiprocessing`을 임포트하여 분산 프로세스 그룹을 설정하고 각 GPU에서 추론용 프로세스를 생성합니다. 그리고 [`DiffusionPipeline`]도 초기화해야 합니다: 확산 파이프라인을 `rank`로 이동하고 `get_rank`를 사용하여 각 프로세스에 GPU를 할당하면 각 프로세스가 다른 프롬프트를 처리합니다: ```py import torch import torch.distributed as dist import torch.multiprocessing as mp from diffusers import DiffusionPipeline sd = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16) ``` 사용할 백엔드 유형, 현재 프로세스의 `rank`, `world_size` 또는 참여하는 프로세스 수로 분산 환경 생성을 처리하는 함수[`init_process_group`]를 만들어 추론을 실행해야 합니다. 2개의 GPU에서 추론을 병렬로 실행하는 경우 `world_size`는 2입니다. ```py def run_inference(rank, world_size): dist.init_process_group("nccl", rank=rank, world_size=world_size) sd.to(rank) if torch.distributed.get_rank() == 0: prompt = "a dog" elif torch.distributed.get_rank() == 1: prompt = "a cat" image = sd(prompt).images[0] image.save(f"./{'_'.join(prompt)}.png") ``` 분산 추론을 실행하려면 [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn)을 호출하여 `world_size`에 정의된 GPU 수에 대해 `run_inference` 함수를 실행합니다: ```py def main(): world_size = 2 mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True) if __name__ == "__main__": main() ``` 추론 스크립트를 완료했으면 `--nproc_per_node` 인수를 사용하여 사용할 GPU 수를 지정하고 `torchrun`을 호출하여 스크립트를 실행합니다: ```bash torchrun run_distributed.py --nproc_per_node=2 ```
diffusers/docs/source/ko/training/distributed_inference.md/0
{ "file_path": "diffusers/docs/source/ko/training/distributed_inference.md", "repo_id": "diffusers", "token_count": 2616 }
131
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided 이미지 인페인팅(inpainting) [[open-in-colab]] [`StableDiffusionInpaintPipeline`]은 마스크와 텍스트 프롬프트를 제공하여 이미지의 특정 부분을 편집할 수 있도록 합니다. 이 기능은 인페인팅 작업을 위해 특별히 훈련된 [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)과 같은 Stable Diffusion 버전을 사용합니다. 먼저 [`StableDiffusionInpaintPipeline`] 인스턴스를 불러옵니다: ```python import PIL import requests import torch from io import BytesIO from diffusers import StableDiffusionInpaintPipeline pipeline = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, ) pipeline = pipeline.to("cuda") ``` 나중에 교체할 강아지 이미지와 마스크를 다운로드하세요: ```python def download_image(url): response = requests.get(url) return PIL.Image.open(BytesIO(response.content)).convert("RGB") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = download_image(img_url).resize((512, 512)) mask_image = download_image(mask_url).resize((512, 512)) ``` 이제 마스크를 다른 것으로 교체하라는 프롬프트를 만들 수 있습니다: ```python prompt = "Face of a yellow cat, high resolution, sitting on a park bench" image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] ``` `image` | `mask_image` | `prompt` | output | :-------------------------:|:-------------------------:|:-------------------------:|-------------------------:| <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="250"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="250"/> | ***Face of a yellow cat, high resolution, sitting on a park bench*** | <img src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/in_paint/yellow_cat_sitting_on_a_park_bench.png" alt="drawing" width="250"/> | <Tip warning={true}> 이전의 실험적인 인페인팅 구현에서는 품질이 낮은 다른 프로세스를 사용했습니다. 이전 버전과의 호환성을 보장하기 위해 새 모델이 포함되지 않은 사전학습된 파이프라인을 불러오면 이전 인페인팅 방법이 계속 적용됩니다. </Tip> 아래 Space에서 이미지 인페인팅을 직접 해보세요! <iframe src="https://runwayml-stable-diffusion-inpainting.hf.space" frameborder="0" width="850" height="500" ></iframe>
diffusers/docs/source/ko/using-diffusers/inpaint.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/inpaint.md", "repo_id": "diffusers", "token_count": 1656 }
132
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/> <br> </p> # Diffusers 🤗 Diffusers é uma biblioteca de modelos de difusão de última geração para geração de imagens, áudio e até mesmo estruturas 3D de moléculas. Se você está procurando uma solução de geração simples ou queira treinar seu próprio modelo de difusão, 🤗 Diffusers é uma modular caixa de ferramentas que suporta ambos. Nossa biblioteca é desenhada com foco em [usabilidade em vez de desempenho](conceptual/philosophy#usability-over-performance), [simples em vez de fácil](conceptual/philosophy#simple-over-easy) e [customizável em vez de abstrações](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction). A Biblioteca tem três componentes principais: - Pipelines de última geração para a geração em poucas linhas de código. Têm muitos pipelines no 🤗 Diffusers, veja a tabela no pipeline [Visão geral](api/pipelines/overview) para uma lista completa de pipelines disponíveis e as tarefas que eles resolvem. - Intercambiáveis [agendadores de ruído](api/schedulers/overview) para balancear as compensações entre velocidade e qualidade de geração. - [Modelos](api/models) pré-treinados que podem ser usados como se fossem blocos de construção, e combinados com agendadores, para criar seu próprio sistema de difusão de ponta a ponta. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutoriais</div> <p class="text-gray-700">Aprenda as competências fundamentais que precisa para iniciar a gerar saídas, construa seu próprio sistema de difusão, e treine um modelo de difusão. Nós recomendamos começar por aqui se você está utilizando o 🤗 Diffusers pela primeira vez!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Guias de utilização</div> <p class="text-gray-700">Guias práticos para ajudar você carregar pipelines, modelos, e agendadores. Você também aprenderá como usar os pipelines para tarefas específicas, controlar como as saídas são geradas, otimizar a velocidade de geração, e outras técnicas diferentes de treinamento.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Guias conceituais</div> <p class="text-gray-700">Compreenda porque a biblioteca foi desenhada da forma que ela é, e aprenda mais sobre as diretrizes éticas e implementações de segurança para o uso da biblioteca.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models/overview" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Referência</div> <p class="text-gray-700">Descrições técnicas de como funcionam as classes e métodos do 🤗 Diffusers</p> </a> </div> </div>
diffusers/docs/source/pt/index.md/0
{ "file_path": "diffusers/docs/source/pt/index.md", "repo_id": "diffusers", "token_count": 1653 }
133
<!--版权所有 2025 The HuggingFace Team。保留所有权利。 根据 Apache 许可证 2.0 版("许可证")授权;除非遵守许可证,否则不得使用此文件。 您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,根据许可证分发的软件按"原样"分发,不附带任何明示或暗示的担保或条件。请参阅许可证了解具体的语言管理权限和限制。 --> # 引导器 [Classifier-free guidance](https://huggingface.co/papers/2207.12598) 引导模型生成更好地匹配提示,通常用于提高生成质量、控制和提示的遵循度。有不同类型的引导方法,在 Diffusers 中,它们被称为*引导器*。与块类似,可以轻松切换和使用不同的引导器以适应不同的用例,而无需重写管道。 本指南将向您展示如何切换引导器、调整引导器参数,以及将它们加载并共享到 Hub。 ## 切换引导器 [`ClassifierFreeGuidance`] 是默认引导器,在使用 [`~ModularPipelineBlocks.init_pipeline`] 初始化管道时创建。它通过 `from_config` 创建,这意味着它不需要从模块化存储库加载规范。引导器不会列在 `modular_model_index.json` 中。 使用 [`~ModularPipeline.get_component_spec`] 来检查引导器。 ```py t2i_pipeline.get_component_spec("guider") ComponentSpec(name='guider', type_hint=<class 'diffusers.guiders.classifier_free_guidance.ClassifierFreeGuidance'>, description=None, config=FrozenDict([('guidance_scale', 7.5), ('guidance_rescale', 0.0), ('use_original_formulation', False), ('start', 0.0), ('stop', 1.0), ('_use_default_values', ['start', 'guidance_rescale', 'stop', 'use_original_formulation'])]), repo=None, subfolder=None, variant=None, revision=None, default_creation_method='from_config') ``` 通过将新引导器传递给 [`~ModularPipeline.update_components`] 来切换到不同的引导器。 > [!TIP] > 更改引导器将返回文本,让您知道您正在更改引导器类型。 > ```bash > ModularPipeline.update_components: 添加具有新类型的引导器: PerturbedAttentionGuidance, 先前类型: ClassifierFreeGuidance > ``` ```py from diffusers import LayerSkipConfig, PerturbedAttentionGuidance config = LayerSkipConfig(indices=[2, 9], fqn="mid_block.attentions.0.transformer_blocks", skip_attention=False, skip_attention_scores=True, skip_ff=False) guider = PerturbedAttentionGuidance( guidance_scale=5.0, perturbed_guidance_scale=2.5, perturbed_guidance_config=config ) t2i_pipeline.update_components(guider=guider) ``` 再次使用 [`~ModularPipeline.get_component_spec`] 来验证引导器类型是否不同。 ```py t2i_pipeline.get_component_spec("guider") ComponentSpec(name='guider', type_hint=<class 'diffusers.guiders.perturbed_attention_guidance.PerturbedAttentionGuidance'>, description=None, config=FrozenDict([('guidance_scale', 5.0), ('perturbed_guidance_scale', 2.5), ('perturbed_guidance_start', 0.01), ('perturbed_guidance_stop', 0.2), ('perturbed_guidance_layers', None), ('perturbed_guidance_config', LayerSkipConfig(indices=[2, 9], fqn='mid_block.attentions.0.transformer_blocks', skip_attention=False, skip_attention_scores=True, skip_ff=False, dropout=1.0)), ('guidance_rescale', 0.0), ('use_original_formulation', False), ('start', 0.0), ('stop', 1.0), ('_use_default_values', ['perturbed_guidance_start', 'use_original_formulation', 'perturbed_guidance_layers', 'stop', 'start', 'guidance_rescale', 'perturbed_guidance_stop']), ('_class_name', 'PerturbedAttentionGuidance'), ('_diffusers_version', '0.35.0.dev0')]), repo=None, subfolder=None, variant=None, revision=None, default_creation_method='from_config') ``` ## 加载自定义引导器 已经在 Hub 上保存并带有 `modular_model_index.json` 文件的引导器现在被视为 `from_pretrained` 组件,而不是 `from_config` 组件。 ```json { "guider": [ null, null, { "repo": "YiYiXu/modular-loader-t2i-guider", "revision": null, "subfolder": "pag_guider", "type_hint": [ "diffusers", "PerturbedAttentionGuidance" ], "variant": null } ] } ``` 引导器只有在调用 [`~ModularPipeline.load_default_components`] 之后才会创建,基于 `modular_model_index.json` 中的加载规范。 ```py t2i_pipeline = t2i_blocks.init_pipeline("YiYiXu/modular-doc-guider") # 在初始化时未创建 assert t2i_pipeline.guider is None t2i_pipeline.load_default_components() # 加载为 PAG 引导器 t2i_pipeline.guider ``` ## 更改引导器参数 引导器参数可以通过 [`~ComponentSpec.create`] 方法或 [`~ModularPipeline.update_components`] 方法进行调整。下面的示例更改了 `guidance_scale` 值。 <hfoptions id="switch"> <hfoption id="create"> ```py guider_spec = t2i_pipeline.get_component_spec("guider") guider = guider_spec.create(guidance_scale=10) t2i_pipeline.update_components(guider=guider) ``` </hfoption> <hfoption id="update_components"> ```py guider_spec = t2i_pipeline.get_component_spec("guider") guider_spec.config["guidance_scale"] = 10 t2i_pipeline.update_components(guider=guider_spec) ``` </hfoption> </hfoptions> ## 上传自定义引导器 在自定义引导器上调用 [`~utils.PushToHubMixin.push_to_hub`] 方法,将其分享到 Hub。 ```py guider.push_to_hub("YiYiXu/modular-loader-t2i-guider", subfolder="pag_guider") ``` 要使此引导器可用于管道,可以修改 `modular_model_index.json` 文件或使用 [`~ModularPipeline.update_components`] 方法。 <hfoptions id="upload"> <hfoption id="modular_model_index.json"> 编辑 `modular_model_index.json` 文件,并添加引导器的加载规范,指向包含引导器配置的文件夹 例如。 ```json { "guider": [ "diffusers", "PerturbedAttentionGuidance", { "repo": "YiYiXu/modular-loader-t2i-guider", "revision": null, "subfolder": "pag_guider", "type_hint": [ "diffusers", "PerturbedAttentionGuidance" ], "variant": null } ], ``` </hfoption> <hfoption id="update_components"> 将 [`~ComponentSpec.default_creation_method`] 更改为 `from_pretrained` 并使用 [`~ModularPipeline.update_components`] 来更新引导器和组件规范以及管道配置。 > [!TIP] > 更改创建方法将返回文本,告知您正在将创建类型更改为 `from_pretrained`。 > ```bash > ModularPipeline.update_components: 将引导器的 default_creation_method 从 from_config 更改为 from_pretrained。 > ``` ```py guider_spec = t2i_pipeline.get_component_spec("guider") guider_spec.default_creation_method="from_pretrained" guider_spec.repo="YiYiXu/modular-loader-t2i-guider" guider_spec.subfolder="pag_guider" pag_guider = guider_spec.load() t2i_pipeline.update_components(guider=pag_guider) ``` 要使其成为管道的默认引导器,请调用 [`~utils.PushToHubMixin.push_to_hub`]。这是一个可选步骤,如果您仅在本地进行实验,则不需要。 ```py t2i_pipeline.push_to_hub("YiYiXu/modular-doc-guider") ``` </hfoption> </hfoptions>
diffusers/docs/source/zh/modular_diffusers/guiders.md/0
{ "file_path": "diffusers/docs/source/zh/modular_diffusers/guiders.md", "repo_id": "diffusers", "token_count": 3669 }
134
<!--Copyright 2025 The HuggingFace Team. All rights reserved. 根据 Apache License 2.0 许可证(以下简称"许可证")授权,除非符合许可证要求,否则不得使用本文件。您可以通过以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或以书面形式同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言权限和限制。 --> # ONNX Runtime 🤗 [Optimum](https://github.com/huggingface/optimum) 提供了兼容 ONNX Runtime 的 Stable Diffusion 流水线。您需要运行以下命令安装支持 ONNX Runtime 的 🤗 Optimum: ```bash pip install -q optimum["onnxruntime"] ``` 本指南将展示如何使用 ONNX Runtime 运行 Stable Diffusion 和 Stable Diffusion XL (SDXL) 流水线。 ## Stable Diffusion 要加载并运行推理,请使用 [`~optimum.onnxruntime.ORTStableDiffusionPipeline`]。若需加载 PyTorch 模型并实时转换为 ONNX 格式,请设置 `export=True`: ```python from optimum.onnxruntime import ORTStableDiffusionPipeline model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] pipeline.save_pretrained("./onnx-stable-diffusion-v1-5") ``` <Tip warning={true}> 当前批量生成多个提示可能会占用过高内存。在问题修复前,建议采用迭代方式而非批量处理。 </Tip> 如需离线导出 ONNX 格式流水线供后续推理使用,请使用 [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) 命令: ```bash optimum-cli export onnx --model stable-diffusion-v1-5/stable-diffusion-v1-5 sd_v15_onnx/ ``` 随后进行推理时(无需再次指定 `export=True`): ```python from optimum.onnxruntime import ORTStableDiffusionPipeline model_id = "sd_v15_onnx" pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/stable_diffusion_v1_5_ort_sail_boat.png"> </div> 您可以在 🤗 Optimum [文档](https://huggingface.co/docs/optimum/) 中找到更多示例,Stable Diffusion 支持文生图、图生图和图像修复任务。 ## Stable Diffusion XL 要加载并运行 SDXL 推理,请使用 [`~optimum.onnxruntime.ORTStableDiffusionXLPipeline`]: ```python from optimum.onnxruntime import ORTStableDiffusionXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = ORTStableDiffusionXLPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] ``` 如需导出 ONNX 格式流水线供后续推理使用,请运行: ```bash optimum-cli export onnx --model stabilityai/stable-diffusion-xl-base-1.0 --task stable-diffusion-xl sd_xl_onnx/ ``` SDXL 的 ONNX 格式目前支持文生图和图生图任务。
diffusers/docs/source/zh/optimization/onnx.md/0
{ "file_path": "diffusers/docs/source/zh/optimization/onnx.md", "repo_id": "diffusers", "token_count": 1653 }
135
<!--版权所有 2025 HuggingFace 团队。保留所有权利。 根据 Apache 许可证 2.0 版本("许可证")授权;除非遵守许可证,否则您不得使用此文件。您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,否则根据许可证分发的软件按"原样"分发,不附带任何明示或暗示的担保或条件。请参阅许可证以了解具体的语言管理权限和限制。 --> # Kandinsky 2.2 <Tip warning={true}> 此脚本是实验性的,容易过拟合并遇到灾难性遗忘等问题。尝试探索不同的超参数以在您的数据集上获得最佳结果。 </Tip> Kandinsky 2.2 是一个多语言文本到图像模型,能够生成更逼真的图像。该模型包括一个图像先验模型,用于从文本提示创建图像嵌入,以及一个解码器模型,基于先验模型的嵌入生成图像。这就是为什么在 Diffusers 中您会找到两个独立的脚本用于 Kandinsky 2.2,一个用于训练先验模型,另一个用于训练解码器模型。您可以分别训练这两个模型,但为了获得最佳结果,您应该同时训练先验和解码器模型。 根据您的 GPU,您可能需要启用 `gradient_checkpointing`(⚠️ 不支持先验模型!)、`mixed_precision` 和 `gradient_accumulation_steps` 来帮助将模型装入内存并加速训练。您可以通过启用 [xFormers](../optimization/xformers) 的内存高效注意力来进一步减少内存使用(版本 [v0.0.16](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212) 在某些 GPU 上训练时失败,因此您可能需要安装开发版本)。 本指南探讨了 [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py) 和 [train_text_to_image_decoder.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py) 脚本,以帮助您更熟悉它,以及如何根据您的用例进行调整。 在运行脚本之前,请确保从源代码安装库: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` 然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项: ```bash cd examples/kandinsky2_2/text_to_image pip install -r requirements.txt ``` <Tip> 🤗 Accelerate 是一个帮助您在多个 GPU/TPU 上或使用混合精度进行训练的库。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate 的 [快速入门](https://huggingface.co/docs/accelerate/quicktour ) 了解更多。 </Tip> 初始化一个 🤗 Accelerate 环境: ```bash accelerate config ``` 要设置一个默认的 🤗 Accelerate 环境而不选择任何配置: ```bash accelerate config default ``` 或者,如果您的环境不支持交互式 shell,比如 notebook,您可以使用: ```py from accelerate.utils import write_basic_config write_basic_config() ``` 最后,如果您想在自己的数据集上训练模型,请查看 [创建用于训练的数据集](create_dataset) 指南,了解如何创建与训练脚本兼容的数据集。 <Tip> 以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读脚本,并让我们知道您有任何疑问或顾虑。 </Tip> ## 脚本参数 训练脚本提供了许多参数来帮助您自定义训练运行。所有参数及其描述都可以在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L190) 函数中找到。训练脚本为每个参数提供了默认值,例如训练批次大小和学习率,但如果您愿意,也可以在训练命令中设置自己的值。 例如,要使用 fp16 格式的混合精度加速训练,请在训练命令中添加 `--mixed_precision` 参数: ```bash accelerate launch train_text_to_image_prior.py \ --mixed_precision="fp16" ``` 大多数参数与 [文本到图像](text2image#script-parameters) 训练指南中的参数相同,所以让我们直接进入 Kandinsky 训练脚本的 walkthrough! ### Min-SNR 加权 [Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略可以通过重新平衡损失来帮助训练,实现更快的收敛。训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,但 Min-SNR 与两种预测类型都兼容。此加权策略仅由 PyTorch 支持,在 Flax 训练脚本中不可用。 添加 `--snr_gamma` 参数并将其设置为推荐值 5.0: ```bash accelerate launch train_text_to_image_prior.py \ --snr_gamma=5.0 ``` ## 训练脚本 训练脚本也类似于 [文本到图像](text2image#training-script) 训练指南,但已修改以支持训练 prior 和 decoder 模型。本指南重点介绍 Kandinsky 2.2 训练脚本中独特的代码。 <hfoptions id="script"> <hfoption id="prior model"> [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L441) 函数包含代码 f 或准备数据集和训练模型。 您会立即注意到的主要区别之一是,训练脚本除了调度器和分词器外,还加载了一个 [`~transformers.CLIPImageProcessor`] 用于预处理图像,以及一个 [`~transformers.CLIPVisionModelWithProjection`] 模型用于编码图像: ```py noise_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2", prediction_type="sample") image_processor = CLIPImageProcessor.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_processor" ) tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="tokenizer") with ContextManagers(deepspeed_zero_init_disabled_context_manager()): image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() text_encoder = CLIPTextModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="text_encoder", torch_dtype=weight_dtype ).eval() ``` Kandinsky 使用一个 [`PriorTransformer`] 来生成图像嵌入,因此您需要设置优化器来学习先验模型的参数。 ```py prior = PriorTransformer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior") prior.train() optimizer = optimizer_cls( prior.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` 接下来,输入标题被分词,图像由 [`~transformers.CLIPImageProcessor`] [预处理](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L632): ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples) return examples ``` 最后,[训练循环](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L718) 将输入图像转换为潜在表示,向图像嵌入添加噪声,并进行预测: ```py model_pred = prior( noisy_latents, timestep=timesteps, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding ``` 如果您想了解更多关于训练循环的工作原理,请查看 [理解管道、模型和调度器](../using-diffusers/write_own_pipeline) 教程,该教程分解了去噪过程的基本模式。 </hfoption> <hfoption id="decoder model"> The [`main()`](https://github.com/huggingface/di ffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L440) 函数包含准备数据集和训练模型的代码。 与之前的模型不同,解码器初始化一个 [`VQModel`] 来将潜在变量解码为图像,并使用一个 [`UNet2DConditionModel`]: ```py with ContextManagers(deepspeed_zero_init_disabled_context_manager()): vae = VQModel.from_pretrained( args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype ).eval() image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") ``` 接下来,脚本包括几个图像变换和一个用于对图像应用变换并返回像素值的[预处理](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L622)函数: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values return examples ``` 最后,[训练循环](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L706)处理将图像转换为潜在变量、添加噪声和预测噪声残差。 如果您想了解更多关于训练循环如何工作的信息,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。 ```py model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4] ``` </hfoption> </hfoptions> ## 启动脚本 一旦您完成了所有更改或接受默认配置,就可以启动训练脚本了!🚀 您将在[Naruto BLIP 字幕](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions)数据集上进行训练,以生成您自己的Naruto角色,但您也可以通过遵循[创建用于训练的数据集](create_dataset)指南来创建和训练您自己的数据集。将环境变量 `DATASET_NAME` 设置为Hub上数据集的名称,或者如果您在自己的文件上训练,将环境变量 `TRAIN_DIR` 设置为数据集的路径。 如果您在多个GPU上训练,请在 `accelerate launch` 命令中添加 `--multi_gpu` 参数。 <Tip> 要使用Weights & Biases监控训练进度,请在训练命令中添加 `--report_to=wandb` 参数。您还需要 建议在训练命令中添加 `--validation_prompt` 以跟踪结果。这对于调试模型和查看中间结果非常有用。 </Tip> <hfoptions id="training-inference"> <hfoption id="prior model"> ```bash export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --validation_prompts="A robot naruto, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-prior-naruto-model" ``` </hfoption> <hfoption id="decoder model"> ```bash export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --validation_prompts="A robot naruto, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-decoder-naruto-model" ``` </hfoption> </hfoptions> 训练完成后,您可以使用新训练的模型进行推理! <hfoptions id="training-inference"> <hfoption id="prior model"> ```py from diffusers import AutoPipelineForText2Image, DiffusionPipeline import torch prior_pipeline = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16) prior_components = {"prior_" + k: v for k,v in prior_pipeline.components.items()} pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt="A robot naruto, 4k photo" image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[0] ``` <Tip> 可以随意将 `kandinsky-community/kandinsky-2-2-decoder` 替换为您自己训练的 decoder 检查点! </Tip> </hfoption> <hfoption id="decoder model"> ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt="A robot naruto, 4k photo" image = pipeline(prompt=prompt).images[0] ``` 对于 decoder 模型,您还可以从保存的检查点进行推理,这对于查看中间结果很有用。在这种情况下,将检查点加载到 UNet 中: ```py from diffusers import AutoPipelineForText2Image, UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("path/to/saved/model" + "/checkpoint-<N>/unet") pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() image = pipeline(prompt="A robot naruto, 4k photo").images[0] ``` </hfoption> </hfoptions> ## 后续步骤 恭喜您训练了一个 Kandinsky 2.2 模型!要了解更多关于如何使用您的新模型的信息,以下指南可能会有所帮助: - 阅读 [Kandinsky](../using-diffusers/kandinsky) 指南,学习如何将其用于各种不同的任务(文本到图像、图像到图像、修复、插值),以及如何与 ControlNet 结合使用。 - 查看 [DreamBooth](dreambooth) 和 [LoRA](lora) 训练指南,学习如何使用少量示例图像训练个性化的 Kandinsky 模型。这两种训练技术甚至可以结合使用!
diffusers/docs/source/zh/training/kandinsky.md/0
{ "file_path": "diffusers/docs/source/zh/training/kandinsky.md", "repo_id": "diffusers", "token_count": 8088 }
136
import inspect from typing import List, Optional, Union import torch from torch import nn from torch.nn import functional as F from torchvision import transforms from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput class MakeCutouts(nn.Module): def __init__(self, cut_size, cut_power=1.0): super().__init__() self.cut_size = cut_size self.cut_power = cut_power def forward(self, pixel_values, num_cutouts): sideY, sideX = pixel_values.shape[2:4] max_size = min(sideX, sideY) min_size = min(sideX, sideY, self.cut_size) cutouts = [] for _ in range(num_cutouts): size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size) offsetx = torch.randint(0, sideX - size + 1, ()) offsety = torch.randint(0, sideY - size + 1, ()) cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size] cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size)) return torch.cat(cutouts) def spherical_dist_loss(x, y): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def set_requires_grad(model, value): for param in model.parameters(): param.requires_grad = value class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin): """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000 - https://github.com/Jack000/glid-3-xl - https://github.dev/crowsonkb/k-diffusion """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, clip_model: CLIPModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPImageProcessor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, clip_model=clip_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) self.cut_out_size = ( feature_extractor.size if isinstance(feature_extractor.size, int) else feature_extractor.size["shortest_edge"] ) self.make_cutouts = MakeCutouts(self.cut_out_size) set_requires_grad(self.text_encoder, False) set_requires_grad(self.clip_model, False) def freeze_vae(self): set_requires_grad(self.vae, False) def unfreeze_vae(self): set_requires_grad(self.vae, True) def freeze_unet(self): set_requires_grad(self.unet, False) def unfreeze_unet(self): set_requires_grad(self.unet, True) @torch.enable_grad() def cond_fn( self, latents, timestep, index, text_embeddings, noise_pred_original, text_embeddings_clip, clip_guidance_scale, num_cutouts, use_cutouts=True, ): latents = latents.detach().requires_grad_() latent_model_input = self.scheduler.scale_model_input(latents, timestep) # predict the noise residual noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502 pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) fac = torch.sqrt(beta_prod_t) sample = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, LMSDiscreteScheduler): sigma = self.scheduler.sigmas[index] sample = latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler)} not supported") sample = 1 / self.vae.config.scaling_factor * sample image = self.vae.decode(sample).sample image = (image / 2 + 0.5).clamp(0, 1) if use_cutouts: image = self.make_cutouts(image, num_cutouts) else: image = transforms.Resize(self.cut_out_size)(image) image = self.normalize(image).to(latents.dtype) image_embeddings_clip = self.clip_model.get_image_features(image) image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) if use_cutouts: dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip) dists = dists.view([num_cutouts, sample.shape[0], -1]) loss = dists.sum(2).mean(0).sum() * clip_guidance_scale else: loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale grads = -torch.autograd.grad(loss, latents)[0] if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents.detach() + grads * (sigma**2) noise_pred = noise_pred_original else: noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads return noise_pred, latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = 512, width: Optional[int] = 512, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, clip_guidance_scale: Optional[float] = 100, clip_prompt: Optional[Union[str, List[str]]] = None, num_cutouts: Optional[int] = 4, use_cutouts: Optional[bool] = True, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # get prompt text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) if clip_guidance_scale > 0: if clip_prompt is not None: clip_text_input = self.tokenizer( clip_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ).input_ids.to(self.device) else: clip_text_input = text_input.input_ids.to(self.device) text_embeddings_clip = self.clip_model.get_text_features(clip_text_input) text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True) # duplicate text embeddings clip for each generation per prompt text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: max_length = text_input.input_ids.shape[-1] uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt") uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # set timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform classifier free guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: text_embeddings_for_guidance = ( text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings ) noise_pred, latents = self.cond_fn( latents, t, i, text_embeddings_for_guidance, noise_pred, text_embeddings_clip, clip_guidance_scale, num_cutouts, use_cutouts, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # scale and decode the image latents with vae latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/clip_guided_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/clip_guided_stable_diffusion.py", "repo_id": "diffusers", "token_count": 6482 }
137
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.configuration_utils import register_to_config from diffusers.image_processor import VaeImageProcessor from diffusers.models.autoencoders import AutoencoderKL from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel, UNet2DConditionOutput from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers logger = logging.get_logger(__name__) # pylint: disable=invalid-name class UNet2DConditionModelHighResFix(UNet2DConditionModel): r""" A conditional 2D UNet model that applies Kohya fix proposed for high resolution image generation. This model inherits from [`UNet2DConditionModel`]. Check the superclass documentation for learning about all the parameters. Parameters: high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`): Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num. """ _supports_gradient_checkpointing = True @register_to_config def __init__(self, high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}], **kwargs): super().__init__(**kwargs) if high_res_fix: self.config.high_res_fix = sorted(high_res_fix, key=lambda x: x["timestep"], reverse=True) @classmethod def _resize(cls, sample, target=None, scale_factor=1, mode="bicubic"): dtype = sample.dtype if dtype == torch.bfloat16: sample = sample.to(torch.float32) if target is not None: if sample.shape[-2:] != target.shape[-2:]: sample = nn.functional.interpolate(sample, size=target.shape[-2:], mode=mode, align_corners=False) elif scale_factor != 1: sample = nn.functional.interpolate(sample, scale_factor=scale_factor, mode=mode, align_corners=False) return sample.to(dtype) def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DConditionOutput, Tuple]: r""" The [`UNet2DConditionModel`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed through the `self.time_embedding` layer to obtain the timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). added_cond_kwargs: (`dict`, *optional*): A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that are passed along to the UNet blocks. down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): A tuple of tensors that if specified are added to the residuals of down unet blocks. mid_block_additional_residual: (`torch.Tensor`, *optional*): A tensor that if specified is added to the residual of the middle unet block. down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) encoder_attention_mask (`torch.Tensor`): A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None for dim in sample.shape[-2:]: if dim % default_overall_up_factor != 0: # Forward upsample size to force interpolation output size. forward_upsample_size = True break # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time t_emb = self.get_time_embed(sample=sample, timestep=timestep) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None class_emb = self.get_class_embed(sample=sample, class_labels=class_labels) if class_emb is not None: if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb aug_emb = self.get_aug_embed( emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs ) if self.config.addition_embed_type == "image_hint": aug_emb, hint = aug_emb sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) encoder_hidden_states = self.process_encoder_hidden_states( encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs ) # 2. pre-process sample = self.conv_in(sample) # 2.5 GLIGEN position net if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None: cross_attention_kwargs = cross_attention_kwargs.copy() gligen_args = cross_attention_kwargs.pop("gligen") cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} # 3. down # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated # to the internal blocks and will raise deprecation warnings. this will be confusing for our users. if cross_attention_kwargs is not None: cross_attention_kwargs = cross_attention_kwargs.copy() lora_scale = cross_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets is_adapter = down_intrablock_additional_residuals is not None # maintain backward compatibility for legacy usage, where # T2I-Adapter and ControlNet both use down_block_additional_residuals arg # but can only use one or the other if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None: deprecate( "T2I should not use down_block_additional_residuals", "1.3.0", "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", standard_warn=False, ) down_intrablock_additional_residuals = down_block_additional_residuals is_adapter = True down_block_res_samples = (sample,) for down_i, downsample_block in enumerate(self.down_blocks): if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D additional_residuals = {} if is_adapter and len(down_intrablock_additional_residuals) > 0: additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0) sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, **additional_residuals, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) if is_adapter and len(down_intrablock_additional_residuals) > 0: sample += down_intrablock_additional_residuals.pop(0) down_block_res_samples += res_samples # kohya high res fix if self.config.high_res_fix: for high_res_fix in self.config.high_res_fix: if timestep > high_res_fix["timestep"] and down_i == high_res_fix["block_num"]: sample = self.__class__._resize(sample, scale_factor=high_res_fix["scale_factor"]) break if is_controlnet: new_down_block_res_samples = () for down_block_res_sample, down_block_additional_residual in zip( down_block_res_samples, down_block_additional_residuals ): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) else: sample = self.mid_block(sample, emb) # To support T2I-Adapter-XL if ( is_adapter and len(down_intrablock_additional_residuals) > 0 and sample.shape == down_intrablock_additional_residuals[0].shape ): sample += down_intrablock_additional_residuals.pop(0) if is_controlnet: sample = sample + mid_block_additional_residual # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # up scaling of kohya high res fix if self.config.high_res_fix is not None: if res_samples[0].shape[-2:] != sample.shape[-2:]: sample = self.__class__._resize(sample, target=res_samples[0]) res_samples_up_sampled = (res_samples[0],) for res_sample in res_samples[1:]: res_samples_up_sampled += (self.__class__._resize(res_sample, target=res_samples[0]),) res_samples = res_samples_up_sampled # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample) @classmethod def from_unet(cls, unet: UNet2DConditionModel, high_res_fix: list): config = dict((unet.config)) config["high_res_fix"] = high_res_fix unet_high_res = cls(**config) unet_high_res.load_state_dict(unet.state_dict()) unet_high_res.to(unet.dtype) return unet_high_res EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="kohya_hires_fix", torch_dtype=torch.float16, high_res_fix=[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt, height=1000, width=1600).images[0] ``` """ class StableDiffusionHighResFixPipeline(StableDiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion with Kohya fix for high resolution generation. This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods. The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`): Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}], ): super().__init__( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, requires_safety_checker=requires_safety_checker, ) unet = UNet2DConditionModelHighResFix.from_unet(unet=unet, high_res_fix=high_res_fix) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker)
diffusers/examples/community/kohya_hires_fix.py/0
{ "file_path": "diffusers/examples/community/kohya_hires_fix.py", "repo_id": "diffusers", "token_count": 10596 }
138
# Copyright 2025 The DEVAIEXP Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from PIL import Image from transformers import ( CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, ) from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import ( AutoencoderKL, ControlNetModel, ControlNetUnionModel, MultiControlNetModel, UNet2DConditionModel, ) from diffusers.models.attention_processor import ( AttnProcessor2_0, XFormersAttnProcessor, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.import_utils import is_invisible_watermark_available from diffusers.utils.torch_utils import is_compiled_module, randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from diffusers.utils import is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py import torch from diffusers import DiffusionPipeline, ControlNetUnionModel, AutoencoderKL, UniPCMultistepScheduler from diffusers.utils import load_image from PIL import Image device = "cuda" # Initialize the models and pipeline controlnet = ControlNetUnionModel.from_pretrained( "brad-twinkl/controlnet-union-sdxl-1.0-promax", torch_dtype=torch.float16 ).to(device=device) vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to(device=device) model_id = "SG161222/RealVisXL_V5.0" pipe = StableDiffusionXLControlNetTileSRPipeline.from_pretrained( model_id, controlnet=controlnet, vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16" ).to(device) pipe.enable_model_cpu_offload() # << Enable this if you have limited VRAM pipe.enable_vae_tiling() # << Enable this if you have limited VRAM pipe.enable_vae_slicing() # << Enable this if you have limited VRAM # Set selected scheduler pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # Load image control_image = load_image("https://huggingface.co/datasets/DEVAIEXP/assets/resolve/main/1.jpg") original_height = control_image.height original_width = control_image.width print(f"Current resolution: H:{original_height} x W:{original_width}") # Pre-upscale image for tiling resolution = 4096 tile_gaussian_sigma = 0.3 max_tile_size = 1024 # or 1280 current_size = max(control_image.size) scale_factor = max(2, resolution / current_size) new_size = (int(control_image.width * scale_factor), int(control_image.height * scale_factor)) image = control_image.resize(new_size, Image.LANCZOS) # Update target height and width target_height = image.height target_width = image.width print(f"Target resolution: H:{target_height} x W:{target_width}") # Calculate overlap size normal_tile_overlap, border_tile_overlap = calculate_overlap(target_width, target_height) # Set other params tile_weighting_method = TileWeightingMethod.COSINE.value guidance_scale = 4 num_inference_steps = 35 denoising_strenght = 0.65 controlnet_strength = 1.0 prompt = "high-quality, noise-free edges, high quality, 4k, hd, 8k" negative_prompt = "blurry, pixelated, noisy, low resolution, artifacts, poor details" # Image generation control_image = pipe( image=image, control_image=control_image, control_mode=[6], controlnet_conditioning_scale=float(controlnet_strength), prompt=prompt, negative_prompt=negative_prompt, normal_tile_overlap=normal_tile_overlap, border_tile_overlap=border_tile_overlap, height=target_height, width=target_width, original_size=(original_width, original_height), target_size=(target_width, target_height), guidance_scale=guidance_scale, strength=float(denoising_strenght), tile_weighting_method=tile_weighting_method, max_tile_size=max_tile_size, tile_gaussian_sigma=float(tile_gaussian_sigma), num_inference_steps=num_inference_steps, )["images"][0] ``` """ # This function was copied and adapted from https://huggingface.co/spaces/gokaygokay/TileUpscalerV2, licensed under Apache 2.0. def _adaptive_tile_size(image_size, base_tile_size=512, max_tile_size=1280): """ Calculate the adaptive tile size based on the image dimensions, ensuring the tile respects the aspect ratio and stays within the specified size limits. """ width, height = image_size aspect_ratio = width / height if aspect_ratio > 1: # Landscape orientation tile_width = min(width, max_tile_size) tile_height = min(int(tile_width / aspect_ratio), max_tile_size) else: # Portrait or square orientation tile_height = min(height, max_tile_size) tile_width = min(int(tile_height * aspect_ratio), max_tile_size) # Ensure the tile size is not smaller than the base_tile_size tile_width = max(tile_width, base_tile_size) tile_height = max(tile_height, base_tile_size) return tile_width, tile_height # Copied and adapted from https://github.com/huggingface/diffusers/blob/main/examples/community/mixture_tiling.py def _tile2pixel_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height ): """Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image Returns a tuple with: - Starting coordinates of rows in pixel space - Ending coordinates of rows in pixel space - Starting coordinates of columns in pixel space - Ending coordinates of columns in pixel space """ # Calculate initial indices px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap) px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap) # Calculate end indices px_row_end = px_row_init + tile_height px_col_end = px_col_init + tile_width # Ensure the last tile does not exceed the image dimensions px_row_end = min(px_row_end, image_height) px_col_end = min(px_col_end, image_width) return px_row_init, px_row_end, px_col_init, px_col_end # Copied and adapted from https://github.com/huggingface/diffusers/blob/main/examples/community/mixture_tiling.py def _tile2latent_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height ): """Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image Returns a tuple with: - Starting coordinates of rows in latent space - Ending coordinates of rows in latent space - Starting coordinates of columns in latent space - Ending coordinates of columns in latent space """ # Get pixel indices px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height ) # Convert to latent space latent_row_init = px_row_init // 8 latent_row_end = px_row_end // 8 latent_col_init = px_col_init // 8 latent_col_end = px_col_end // 8 latent_height = image_height // 8 latent_width = image_width // 8 # Ensure the last tile does not exceed the latent dimensions latent_row_end = min(latent_row_end, latent_height) latent_col_end = min(latent_col_end, latent_width) return latent_row_init, latent_row_end, latent_col_init, latent_col_end # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class StableDiffusionXLControlNetTileSRPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, ): r""" Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. controlnet ([`ControlNetUnionModel`]): Provides additional conditioning to the unet during the denoising process. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: ControlNetUnionModel, scheduler: KarrasDiffusionSchedulers, requires_aesthetics_score: bool = False, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() if not isinstance(controlnet, ControlNetUnionModel): raise ValueError("Expected `controlnet` to be of type `ControlNetUnionModel`.") self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) def calculate_overlap(self, width, height, base_overlap=128): """ Calculates dynamic overlap based on the image's aspect ratio. Args: width (int): Width of the image in pixels. height (int): Height of the image in pixels. base_overlap (int, optional): Base overlap value in pixels. Defaults to 128. Returns: tuple: A tuple containing: - row_overlap (int): Overlap between tiles in consecutive rows. - col_overlap (int): Overlap between tiles in consecutive columns. """ ratio = height / width if ratio < 1: # Image is wider than tall return base_overlap // 2, base_overlap else: # Image is taller than wide return base_overlap, base_overlap * 2 class TileWeightingMethod(Enum): """Mode in which the tile weights will be generated""" COSINE = "Cosine" GAUSSIAN = "Gaussian" # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) dtype = text_encoders[0].dtype if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) text_encoder.to(dtype) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, image, strength, num_inference_steps, normal_tile_overlap, border_tile_overlap, max_tile_size, tile_gaussian_sigma, tile_weighting_method, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if num_inference_steps is None: raise ValueError("`num_inference_steps` cannot be None.") elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError( f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" f" {type(num_inference_steps)}." ) if normal_tile_overlap is None: raise ValueError("`normal_tile_overlap` cannot be None.") elif not isinstance(normal_tile_overlap, int) or normal_tile_overlap < 64: raise ValueError( f"`normal_tile_overlap` has to be greater than 64 but is {normal_tile_overlap} of type" f" {type(normal_tile_overlap)}." ) if border_tile_overlap is None: raise ValueError("`border_tile_overlap` cannot be None.") elif not isinstance(border_tile_overlap, int) or border_tile_overlap < 128: raise ValueError( f"`border_tile_overlap` has to be greater than 128 but is {border_tile_overlap} of type" f" {type(border_tile_overlap)}." ) if max_tile_size is None: raise ValueError("`max_tile_size` cannot be None.") elif not isinstance(max_tile_size, int) or max_tile_size not in (1024, 1280): raise ValueError( f"`max_tile_size` has to be in 1024 or 1280 but is {max_tile_size} of type {type(max_tile_size)}." ) if tile_gaussian_sigma is None: raise ValueError("`tile_gaussian_sigma` cannot be None.") elif not isinstance(tile_gaussian_sigma, float) or tile_gaussian_sigma <= 0: raise ValueError( f"`tile_gaussian_sigma` has to be a positive float but is {tile_gaussian_sigma} of type" f" {type(tile_gaussian_sigma)}." ) if tile_weighting_method is None: raise ValueError("`tile_weighting_method` cannot be None.") elif not isinstance(tile_weighting_method, str) or tile_weighting_method not in [ t.value for t in self.TileWeightingMethod ]: raise ValueError( f"`tile_weighting_method` has to be a string in ({[t.value for t in self.TileWeightingMethod]}) but is {tile_weighting_method} of type" f" {type(tile_weighting_method)}." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(image, prompt) elif ( isinstance(self.controlnet, ControlNetUnionModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetUnionModel) ): self.check_image(image, prompt) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetUnionModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetUnionModel) ) or ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image def check_image(self, image, prompt): image_is_pil = isinstance(image, Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents def prepare_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True ): if not isinstance(image, (torch.Tensor, Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) latents_mean = latents_std = None if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " ) init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None, ): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list( negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) ) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if ( expected_add_embed_dim > passed_add_embed_dim and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." ) elif ( expected_add_embed_dim < passed_add_embed_dim and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." ) elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return add_time_ids, add_neg_time_ids def _generate_cosine_weights(self, tile_width, tile_height, nbatches, device, dtype): """ Generates cosine weights as a PyTorch tensor for blending tiles. Args: tile_width (int): Width of the tile in pixels. tile_height (int): Height of the tile in pixels. nbatches (int): Number of batches. device (torch.device): Device where the tensor will be allocated (e.g., 'cuda' or 'cpu'). dtype (torch.dtype): Data type of the tensor (e.g., torch.float32). Returns: torch.Tensor: A tensor containing cosine weights for blending tiles, expanded to match batch and channel dimensions. """ # Convert tile dimensions to latent space latent_width = tile_width // 8 latent_height = tile_height // 8 # Generate x and y coordinates in latent space x = np.arange(0, latent_width) y = np.arange(0, latent_height) # Calculate midpoints midpoint_x = (latent_width - 1) / 2 midpoint_y = (latent_height - 1) / 2 # Compute cosine probabilities for x and y x_probs = np.cos(np.pi * (x - midpoint_x) / latent_width) y_probs = np.cos(np.pi * (y - midpoint_y) / latent_height) # Create a 2D weight matrix using the outer product weights_np = np.outer(y_probs, x_probs) # Convert to a PyTorch tensor with the correct device and dtype weights_torch = torch.tensor(weights_np, device=device, dtype=dtype) # Expand for batch and channel dimensions tile_weights_expanded = torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1)) return tile_weights_expanded def _generate_gaussian_weights(self, tile_width, tile_height, nbatches, device, dtype, sigma=0.05): """ Generates Gaussian weights as a PyTorch tensor for blending tiles in latent space. Args: tile_width (int): Width of the tile in pixels. tile_height (int): Height of the tile in pixels. nbatches (int): Number of batches. device (torch.device): Device where the tensor will be allocated (e.g., 'cuda' or 'cpu'). dtype (torch.dtype): Data type of the tensor (e.g., torch.float32). sigma (float, optional): Standard deviation of the Gaussian distribution. Controls the smoothness of the weights. Defaults to 0.05. Returns: torch.Tensor: A tensor containing Gaussian weights for blending tiles, expanded to match batch and channel dimensions. """ # Convert tile dimensions to latent space latent_width = tile_width // 8 latent_height = tile_height // 8 # Generate Gaussian weights in latent space x = np.linspace(-1, 1, latent_width) y = np.linspace(-1, 1, latent_height) xx, yy = np.meshgrid(x, y) gaussian_weight = np.exp(-(xx**2 + yy**2) / (2 * sigma**2)) # Convert to a PyTorch tensor with the correct device and dtype weights_torch = torch.tensor(gaussian_weight, device=device, dtype=dtype) # Expand for batch and channel dimensions weights_expanded = weights_torch.unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions weights_expanded = weights_expanded.expand(nbatches, -1, -1, -1) # Expand to the number of batches return weights_expanded def _get_num_tiles(self, height, width, tile_height, tile_width, normal_tile_overlap, border_tile_overlap): """ Calculates the number of tiles needed to cover an image, choosing the appropriate formula based on the ratio between the image size and the tile size. This function automatically selects between two formulas: 1. A universal formula for typical cases (image-to-tile ratio <= 6:1). 2. A specialized formula with border tile overlap for larger or atypical cases (image-to-tile ratio > 6:1). Args: height (int): Height of the image in pixels. width (int): Width of the image in pixels. tile_height (int): Height of each tile in pixels. tile_width (int): Width of each tile in pixels. normal_tile_overlap (int): Overlap between tiles in pixels for normal (non-border) tiles. border_tile_overlap (int): Overlap between tiles in pixels for border tiles. Returns: tuple: A tuple containing: - grid_rows (int): Number of rows in the tile grid. - grid_cols (int): Number of columns in the tile grid. Notes: - The function uses the universal formula (without border_tile_overlap) for typical cases where the image-to-tile ratio is 6:1 or smaller. - For larger or atypical cases (image-to-tile ratio > 6:1), it uses a specialized formula that includes border_tile_overlap to ensure complete coverage of the image, especially at the edges. """ # Calculate the ratio between the image size and the tile size height_ratio = height / tile_height width_ratio = width / tile_width # If the ratio is greater than 6:1, use the formula with border_tile_overlap if height_ratio > 6 or width_ratio > 6: grid_rows = int(np.ceil((height - border_tile_overlap) / (tile_height - normal_tile_overlap))) + 1 grid_cols = int(np.ceil((width - border_tile_overlap) / (tile_width - normal_tile_overlap))) + 1 else: # Otherwise, use the universal formula grid_rows = int(np.ceil((height - normal_tile_overlap) / (tile_height - normal_tile_overlap))) grid_cols = int(np.ceil((width - normal_tile_overlap) / (tile_width - normal_tile_overlap))) return grid_rows, grid_cols def prepare_tiles( self, grid_rows, grid_cols, tile_weighting_method, tile_width, tile_height, normal_tile_overlap, border_tile_overlap, width, height, tile_sigma, batch_size, device, dtype, ): """ Processes image tiles by dynamically adjusting overlap and calculating Gaussian or cosine weights. Args: grid_rows (int): Number of rows in the tile grid. grid_cols (int): Number of columns in the tile grid. tile_weighting_method (str): Method for weighting tiles. Options: "Gaussian" or "Cosine". tile_width (int): Width of each tile in pixels. tile_height (int): Height of each tile in pixels. normal_tile_overlap (int): Overlap between tiles in pixels for normal tiles. border_tile_overlap (int): Overlap between tiles in pixels for border tiles. width (int): Width of the image in pixels. height (int): Height of the image in pixels. tile_sigma (float): Sigma parameter for Gaussian weighting. batch_size (int): Batch size for weight tiles. device (torch.device): Device where tensors will be allocated (e.g., 'cuda' or 'cpu'). dtype (torch.dtype): Data type of the tensors (e.g., torch.float32). Returns: tuple: A tuple containing: - tile_weights (np.ndarray): Array of weights for each tile. - tile_row_overlaps (np.ndarray): Array of row overlaps for each tile. - tile_col_overlaps (np.ndarray): Array of column overlaps for each tile. """ # Create arrays to store dynamic overlaps and weights tile_row_overlaps = np.full((grid_rows, grid_cols), normal_tile_overlap) tile_col_overlaps = np.full((grid_rows, grid_cols), normal_tile_overlap) tile_weights = np.empty((grid_rows, grid_cols), dtype=object) # Stores Gaussian or cosine weights # Iterate over tiles to adjust overlap and calculate weights for row in range(grid_rows): for col in range(grid_cols): # Calculate the size of the current tile px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( row, col, tile_width, tile_height, normal_tile_overlap, normal_tile_overlap, width, height ) current_tile_width = px_col_end - px_col_init current_tile_height = px_row_end - px_row_init sigma = tile_sigma # Adjust overlap for smaller tiles if current_tile_width < tile_width: px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( row, col, tile_width, tile_height, border_tile_overlap, border_tile_overlap, width, height ) current_tile_width = px_col_end - px_col_init tile_col_overlaps[row, col] = border_tile_overlap sigma = tile_sigma * 1.2 if current_tile_height < tile_height: px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( row, col, tile_width, tile_height, border_tile_overlap, border_tile_overlap, width, height ) current_tile_height = px_row_end - px_row_init tile_row_overlaps[row, col] = border_tile_overlap sigma = tile_sigma * 1.2 # Calculate weights for the current tile if tile_weighting_method == self.TileWeightingMethod.COSINE.value: tile_weights[row, col] = self._generate_cosine_weights( tile_width=current_tile_width, tile_height=current_tile_height, nbatches=batch_size, device=device, dtype=torch.float32, ) else: tile_weights[row, col] = self._generate_gaussian_weights( tile_width=current_tile_width, tile_height=current_tile_height, nbatches=batch_size, device=device, dtype=dtype, sigma=sigma, ) return tile_weights, tile_row_overlaps, tile_col_overlaps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, control_image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.9999, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, control_mode: Optional[Union[int, List[int]]] = None, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, clip_skip: Optional[int] = None, normal_tile_overlap: int = 64, border_tile_overlap: int = 128, max_tile_size: int = 1024, tile_gaussian_sigma: float = 0.05, tile_weighting_method: str = "Cosine", **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`, *optional*): The initial image to be used as the starting point for the image generation process. Can also accept image latents as `image`, if passing latents directly, they will not be encoded again. control_image (`PipelineImageInput`, *optional*): The ControlNet input condition. ControlNet uses this input condition to generate guidance for Unet. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image default to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in init, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*): The height in pixels of the generated image. If not provided, defaults to the height of `control_image`. width (`int`, *optional*): The width in pixels of the generated image. If not provided, defaults to the width of `control_image`. strength (`float`, *optional*, defaults to 0.9999): Indicates the extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point, and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum, and the denoising process runs for the full number of iterations specified in `num_inference_steps`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages generating images closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original UNet. If multiple ControlNets are specified in init, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): In this mode, the ControlNet encoder will try to recognize the content of the input image even if you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. control_mode (`int` or `List[int]`, *optional*): The mode of ControlNet guidance. Can be used to specify different behaviors for multiple ControlNets. original_size (`Tuple[int, int]`, *optional*): If `original_size` is not the same as `target_size`, the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning. crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning. target_size (`Tuple[int, int]`, *optional*): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified, it will default to `(height, width)`. Part of SDXL's micro-conditioning. negative_original_size (`Tuple[int, int]`, *optional*): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning. negative_crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning. negative_target_size (`Tuple[int, int]`, *optional*): To negatively condition the generation process based on a target image resolution. It should be the same as the `target_size` for most cases. Part of SDXL's micro-conditioning. aesthetic_score (`float`, *optional*, defaults to 6.0): Used to simulate an aesthetic score of the generated image by influencing the positive text condition. Part of SDXL's micro-conditioning. negative_aesthetic_score (`float`, *optional*, defaults to 2.5): Used to simulate an aesthetic score of the generated image by influencing the negative text condition. Part of SDXL's micro-conditioning. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. normal_tile_overlap (`int`, *optional*, defaults to 64): Number of overlapping pixels between tiles in consecutive rows. border_tile_overlap (`int`, *optional*, defaults to 128): Number of overlapping pixels between tiles at the borders. max_tile_size (`int`, *optional*, defaults to 1024): Maximum size of a tile in pixels. tile_gaussian_sigma (`float`, *optional*, defaults to 0.3): Sigma parameter for Gaussian weighting of tiles. tile_weighting_method (`str`, *optional*, defaults to "Cosine"): Method for weighting tiles. Options: "Cosine" or "Gaussian". Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple` containing the output images. """ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] if not isinstance(control_image, list): control_image = [control_image] else: control_image = control_image.copy() if control_mode is None or isinstance(control_mode, list) and len(control_mode) == 0: raise ValueError("The value for `control_mode` is expected!") if not isinstance(control_mode, list): control_mode = [control_mode] if len(control_image) != len(control_mode): raise ValueError("Expected len(control_image) == len(control_mode)") num_control_type = controlnet.config.num_control_type # 0. Set internal use parameters height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) negative_original_size = negative_original_size or original_size negative_target_size = negative_target_size or target_size control_type = [0 for _ in range(num_control_type)] control_type = torch.Tensor(control_type) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False batch_size = 1 device = self._execution_device global_pool_conditions = controlnet.config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions # 1. Check inputs for _image, control_idx in zip(control_image, control_mode): control_type[control_idx] = 1 self.check_inputs( prompt, height, width, _image, strength, num_inference_steps, normal_tile_overlap, border_tile_overlap, max_tile_size, tile_gaussian_sigma, tile_weighting_method, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2 Get tile width and tile height size tile_width, tile_height = _adaptive_tile_size((width, height), max_tile_size=max_tile_size) # 2.1 Calculate the number of tiles needed grid_rows, grid_cols = self._get_num_tiles( height, width, tile_height, tile_width, normal_tile_overlap, border_tile_overlap ) # 2.2 Expand prompt to number of tiles if not isinstance(prompt, list): prompt = [[prompt] * grid_cols] * grid_rows # 2.3 Update height and width tile size by tile size and tile overlap size width = (grid_cols - 1) * (tile_width - normal_tile_overlap) + min( tile_width, width - (grid_cols - 1) * (tile_width - normal_tile_overlap) ) height = (grid_rows - 1) * (tile_height - normal_tile_overlap) + min( tile_height, height - (grid_rows - 1) * (tile_height - normal_tile_overlap) ) # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) text_embeddings = [ [ self.encode_prompt( prompt=col, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) for col in row ] for row in prompt ] # 4. Prepare latent image image_tensor = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) # 4.1 Prepare controlnet_conditioning_image control_image = self.prepare_control_image( image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) control_type = ( control_type.reshape(1, -1) .to(device, dtype=controlnet.dtype) .repeat(batch_size * num_images_per_prompt * 2, 1) ) # 5. Prepare timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 self.scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) # 6. Prepare latent variables dtype = text_embeddings[0][0][0].dtype if latents is None: latents = self.prepare_latents( image_tensor, latent_timestep, batch_size, num_images_per_prompt, dtype, device, generator, True, ) # if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents * self.scheduler.sigmas[0] # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): controlnet_keep.append( 1.0 - float(i / len(timesteps) < control_guidance_start or (i + 1) / len(timesteps) > control_guidance_end) ) # 8.1 Prepare added time ids & embeddings # text_embeddings order: prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds embeddings_and_added_time = [] crops_coords_top_left = negative_crops_coords_top_left = (tile_width, tile_height) for row in range(grid_rows): addition_embed_type_row = [] for col in range(grid_cols): # extract generated values prompt_embeds = text_embeddings[row][col][0] negative_prompt_embeds = text_embeddings[row][col][1] pooled_prompt_embeds = text_embeddings[row][col][2] negative_pooled_prompt_embeds = text_embeddings[row][col][3] if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) addition_embed_type_row.append((prompt_embeds, add_text_embeds, add_time_ids)) embeddings_and_added_time.append(addition_embed_type_row) # 9. Prepare tiles weights and latent overlaps size to denoising process tile_weights, tile_row_overlaps, tile_col_overlaps = self.prepare_tiles( grid_rows, grid_cols, tile_weighting_method, tile_width, tile_height, normal_tile_overlap, border_tile_overlap, width, height, tile_gaussian_sigma, batch_size, device, dtype, ) # 10. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Diffuse each tile noise_preds = [] for row in range(grid_rows): noise_preds_row = [] for col in range(grid_cols): if self.interrupt: continue tile_row_overlap = tile_row_overlaps[row, col] tile_col_overlap = tile_col_overlaps[row, col] px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height ) tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end] # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([tile_latents] * 2) if self.do_classifier_free_guidance else tile_latents # 1, 4, ... ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = { "text_embeds": embeddings_and_added_time[row][col][1], "time_ids": embeddings_and_added_time[row][col][2], } # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = tile_latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = embeddings_and_added_time[row][col][0].chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": embeddings_and_added_time[row][col][1].chunk(2)[1], "time_ids": embeddings_and_added_time[row][col][2].chunk(2)[1], } else: control_model_input = latent_model_input controlnet_prompt_embeds = embeddings_and_added_time[row][col][0] controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] px_row_init_pixel, px_row_end_pixel, px_col_init_pixel, px_col_end_pixel = _tile2pixel_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height ) tile_control_image = control_image[ :, :, px_row_init_pixel:px_row_end_pixel, px_col_init_pixel:px_col_end_pixel ] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=[tile_control_image], control_type=control_type, control_type_idx=control_mode, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [ torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples ] mid_block_res_sample = torch.cat( [torch.zeros_like(mid_block_res_sample), mid_block_res_sample] ) # predict the noise residual with torch.amp.autocast(device.type, dtype=dtype, enabled=dtype != self.unet.dtype): noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=embeddings_and_added_time[row][col][0], cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_tile = noise_pred_uncond + guidance_scale * ( noise_pred_text - noise_pred_uncond ) noise_preds_row.append(noise_pred_tile) noise_preds.append(noise_preds_row) # Stitch noise predictions for all tiles noise_pred = torch.zeros(latents.shape, device=device) contributors = torch.zeros(latents.shape, device=device) # Add each tile contribution to overall latents for row in range(grid_rows): for col in range(grid_cols): tile_row_overlap = tile_row_overlaps[row, col] tile_col_overlap = tile_col_overlaps[row, col] px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height ) tile_weights_resized = tile_weights[row, col] noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += ( noise_preds[row][col] * tile_weights_resized ) contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights_resized # Average overlapping areas with more than 1 contributor noise_pred /= contributors noise_pred = noise_pred.to(dtype) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) # update progress bar if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) else: image = latents # Offload all models self.maybe_free_model_hooks() result = StableDiffusionXLPipelineOutput(images=image) if not return_dict: return (image,) return result
diffusers/examples/community/mod_controlnet_tile_sr_sdxl.py/0
{ "file_path": "diffusers/examples/community/mod_controlnet_tile_sr_sdxl.py", "repo_id": "diffusers", "token_count": 42040 }
139
# Copyright 2025 The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import cv2 import numpy as np import PIL.Image import torch import torch.nn as nn from diffusers import StableDiffusionXLControlNetImg2ImgPipeline from diffusers.image_processor import PipelineImageInput from diffusers.models import ControlNetModel from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput from diffusers.utils import ( deprecate, logging, replace_example_docstring, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module, is_torch_version try: import xformers import xformers.ops xformers_available = True except Exception: xformers_available = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name logger.warning( "To use instant id pipelines, please make sure you have the `insightface` library installed: `pip install insightface`." "Please refer to: https://huggingface.co/InstantX/InstantID for further instructions regarding inference" ) def FeedForward(dim, mult=4): inner_dim = int(dim * mult) return nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, inner_dim, bias=False), nn.GELU(), nn.Linear(inner_dim, dim, bias=False), ) def reshape_tensor(x, heads): bs, length, width = x.shape # (bs, length, width) --> (bs, length, n_heads, dim_per_head) x = x.view(bs, length, heads, -1) # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) x = x.transpose(1, 2) # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) x = x.reshape(bs, heads, length, -1) return x class PerceiverAttention(nn.Module): def __init__(self, *, dim, dim_head=64, heads=8): super().__init__() self.scale = dim_head**-0.5 self.dim_head = dim_head self.heads = heads inner_dim = dim_head * heads self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias=False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) self.to_out = nn.Linear(inner_dim, dim, bias=False) def forward(self, x, latents): """ Args: x (torch.Tensor): image features shape (b, n1, D) latent (torch.Tensor): latent features shape (b, n2, D) """ x = self.norm1(x) latents = self.norm2(latents) b, l, _ = latents.shape q = self.to_q(latents) kv_input = torch.cat((x, latents), dim=-2) k, v = self.to_kv(kv_input).chunk(2, dim=-1) q = reshape_tensor(q, self.heads) k = reshape_tensor(k, self.heads) v = reshape_tensor(v, self.heads) # attention scale = 1 / math.sqrt(math.sqrt(self.dim_head)) weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) out = weight @ v out = out.permute(0, 2, 1, 3).reshape(b, l, -1) return self.to_out(out) class Resampler(nn.Module): def __init__( self, dim=1024, depth=8, dim_head=64, heads=16, num_queries=8, embedding_dim=768, output_dim=1024, ff_mult=4, ): super().__init__() self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) self.proj_in = nn.Linear(embedding_dim, dim) self.proj_out = nn.Linear(dim, output_dim) self.norm_out = nn.LayerNorm(output_dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append( nn.ModuleList( [ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), FeedForward(dim=dim, mult=ff_mult), ] ) ) def forward(self, x): latents = self.latents.repeat(x.size(0), 1, 1) x = self.proj_in(x) for attn, ff in self.layers: latents = attn(x, latents) + latents latents = ff(latents) + latents latents = self.proj_out(latents) return self.norm_out(latents) class AttnProcessor(nn.Module): r""" Default processor for performing attention-related computations. """ def __init__( self, hidden_size=None, cross_attention_dim=None, ): super().__init__() def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class IPAttnProcessor(nn.Module): r""" Attention processor for IP-Adapater. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. scale (`float`, defaults to 1.0): the weight scale of image prompt. num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16): The context length of the image features. """ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.scale = scale self.num_tokens = num_tokens self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states else: # get encoder_hidden_states, ip_hidden_states end_pos = encoder_hidden_states.shape[1] - self.num_tokens encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], encoder_hidden_states[:, end_pos:, :], ) if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) if xformers_available: hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask) else: attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # for ip-adapter ip_key = self.to_k_ip(ip_hidden_states) ip_value = self.to_v_ip(ip_hidden_states) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) if xformers_available: ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None) else: ip_attention_probs = attn.get_attention_scores(query, ip_key, None) ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states) hidden_states = hidden_states + self.scale * ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def _memory_efficient_attention_xformers(self, query, key, value, attention_mask): # TODO attention_mask query = query.contiguous() key = key.contiguous() value = value.contiguous() hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask) return hidden_states EXAMPLE_DOC_STRING = """ Examples: ```py >>> # !pip install opencv-python transformers accelerate insightface >>> import diffusers >>> from diffusers.utils import load_image >>> from diffusers.models import ControlNetModel >>> import cv2 >>> import torch >>> import numpy as np >>> from PIL import Image >>> from insightface.app import FaceAnalysis >>> from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps >>> # download 'antelopev2' under ./models >>> app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) >>> app.prepare(ctx_id=0, det_size=(640, 640)) >>> # download models under ./checkpoints >>> face_adapter = f'./checkpoints/ip-adapter.bin' >>> controlnet_path = f'./checkpoints/ControlNetModel' >>> # load IdentityNet >>> controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) >>> pipe = StableDiffusionXLInstantIDPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 ... ) >>> pipe.cuda() >>> # load adapter >>> pipe.load_ip_adapter_instantid(face_adapter) >>> prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality" >>> negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured" >>> # load an image >>> image = load_image("your-example.jpg") >>> face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))[-1] >>> face_emb = face_info['embedding'] >>> face_kps = draw_kps(face_image, face_info['kps']) >>> pipe.set_ip_adapter_scale(0.8) >>> # generate image >>> image = pipe( ... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8 ... ).images[0] ``` """ def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]): stickwidth = 4 limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]]) kps = np.array(kps) w, h = image_pil.size out_img = np.zeros([h, w, 3]) for i in range(len(limbSeq)): index = limbSeq[i] color = color_list[index[0]] x = kps[index][:, 0] y = kps[index][:, 1] length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5 angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1])) polygon = cv2.ellipse2Poly( (int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1 ) out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color) out_img = (out_img * 0.6).astype(np.uint8) for idx_kp, kp in enumerate(kps): color = color_list[idx_kp] x, y = kp out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1) out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8)) return out_img_pil class StableDiffusionXLInstantIDImg2ImgPipeline(StableDiffusionXLControlNetImg2ImgPipeline): def cuda(self, dtype=torch.float16, use_xformers=False): self.to("cuda", dtype) if hasattr(self, "image_proj_model"): self.image_proj_model.to(self.unet.device).to(self.unet.dtype) if use_xformers: if is_xformers_available(): import xformers from packaging import version xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) self.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") def load_ip_adapter_instantid(self, model_ckpt, image_emb_dim=512, num_tokens=16, scale=0.5): self.set_image_proj_model(model_ckpt, image_emb_dim, num_tokens) self.set_ip_adapter(model_ckpt, num_tokens, scale) def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16): image_proj_model = Resampler( dim=1280, depth=4, dim_head=64, heads=20, num_queries=num_tokens, embedding_dim=image_emb_dim, output_dim=self.unet.config.cross_attention_dim, ff_mult=4, ) image_proj_model.eval() self.image_proj_model = image_proj_model.to(self.device, dtype=self.dtype) state_dict = torch.load(model_ckpt, map_location="cpu") if "image_proj" in state_dict: state_dict = state_dict["image_proj"] self.image_proj_model.load_state_dict(state_dict) self.image_proj_model_in_features = image_emb_dim def set_ip_adapter(self, model_ckpt, num_tokens, scale): unet = self.unet attn_procs = {} for name in unet.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] if cross_attention_dim is None: attn_procs[name] = AttnProcessor().to(unet.device, dtype=unet.dtype) else: attn_procs[name] = IPAttnProcessor( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=scale, num_tokens=num_tokens, ).to(unet.device, dtype=unet.dtype) unet.set_attn_processor(attn_procs) state_dict = torch.load(model_ckpt, map_location="cpu") ip_layers = torch.nn.ModuleList(self.unet.attn_processors.values()) if "ip_adapter" in state_dict: state_dict = state_dict["ip_adapter"] ip_layers.load_state_dict(state_dict) def set_ip_adapter_scale(self, scale): unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet for attn_processor in unet.attn_processors.values(): if isinstance(attn_processor, IPAttnProcessor): attn_processor.scale = scale def _encode_prompt_image_emb(self, prompt_image_emb, device, dtype, do_classifier_free_guidance): if isinstance(prompt_image_emb, torch.Tensor): prompt_image_emb = prompt_image_emb.clone().detach() else: prompt_image_emb = torch.tensor(prompt_image_emb) prompt_image_emb = prompt_image_emb.to(device=device, dtype=dtype) prompt_image_emb = prompt_image_emb.reshape([1, -1, self.image_proj_model_in_features]) if do_classifier_free_guidance: prompt_image_emb = torch.cat([torch.zeros_like(prompt_image_emb), prompt_image_emb], dim=0) else: prompt_image_emb = torch.cat([prompt_image_emb], dim=0) prompt_image_emb = self.image_proj_model(prompt_image_emb) return prompt_image_emb @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, control_image: PipelineImageInput = None, strength: float = 0.8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled text embeddings are generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input argument. image_embeds (`torch.Tensor`, *optional*): Pre-generated image embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned containing the output images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, control_image, strength, num_inference_steps, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, None, None, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions # 3.1 Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # 3.2 Encode image prompt prompt_image_emb = self._encode_prompt_image_emb( image_embeds, device, self.unet.dtype, self.do_classifier_free_guidance ) bs_embed, seq_len, _ = prompt_image_emb.shape prompt_image_emb = prompt_image_emb.repeat(1, num_images_per_prompt, 1) prompt_image_emb = prompt_image_emb.view(bs_embed * num_images_per_prompt, seq_len, -1) # 4. Prepare image and controlnet_conditioning_image image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) height, width = control_image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images height, width = control_image[0].shape[-2:] else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) # 6. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, True, ) # # 6.5 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 7.2 Prepare added time ids & embeddings if isinstance(control_image, list): original_size = original_size or control_image[0].shape[-2:] else: original_size = original_size or control_image.shape[-2:] target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) encoder_hidden_states = torch.cat([prompt_embeds, prompt_image_emb], dim=1) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Relevant thread: # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": add_text_embeds.chunk(2)[1], "time_ids": add_time_ids.chunk(2)[1], } else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=prompt_image_emb, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=encoder_hidden_states, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/pipeline_stable_diffusion_xl_instandid_img2img.py/0
{ "file_path": "diffusers/examples/community/pipeline_stable_diffusion_xl_instandid_img2img.py", "repo_id": "diffusers", "token_count": 22983 }
140
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import warnings from typing import Callable, List, Optional, Union import torch from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from diffusers import DiffusionPipeline, LMSDiscreteScheduler, StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ModelWrapper: def __init__(self, model, alphas_cumprod): self.model = model self.alphas_cumprod = alphas_cumprod def apply_model(self, *args, **kwargs): if len(args) == 3: encoder_hidden_states = args[-1] args = args[:2] if kwargs.get("cond", None) is not None: encoder_hidden_states = kwargs.pop("cond") return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) # get correct sigmas from LMS scheduler = LMSDiscreteScheduler.from_config(scheduler.config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) model = ModelWrapper(unet, scheduler.alphas_cumprod) if scheduler.config.prediction_type == "v_prediction": self.k_diffusion_model = CompVisVDenoiser(model) else: self.k_diffusion_model = CompVisDenoiser(model) def set_sampler(self, scheduler_type: str): warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.") return self.set_scheduler(scheduler_type) def set_scheduler(self, scheduler_type: str): library = importlib.import_module("k_diffusion") sampling = getattr(library, "sampling") self.sampler = getattr(sampling, scheduler_type) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // 8, width // 8) if latents is None: if device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device) else: latents = torch.randn(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = True if guidance_scale <= 1.0: raise ValueError("has to use guidance_scale") # 3. Encode input prompt text_embeddings = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device) sigmas = self.scheduler.sigmas sigmas = sigmas.to(text_embeddings.dtype) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, text_embeddings.dtype, device, generator, latents, ) latents = latents * sigmas[0] self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) def model_fn(x, t): latent_model_input = torch.cat([x] * 2) noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred latents = self.sampler(model_fn, latents, sigmas) # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) # 10. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/sd_text2img_k_diffusion.py/0
{ "file_path": "diffusers/examples/community/sd_text2img_k_diffusion.py", "repo_id": "diffusers", "token_count": 8535 }
141
# Based on stable_diffusion_xl_reference.py and stable_diffusion_controlnet_reference.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from diffusers import StableDiffusionXLControlNetPipeline from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.image_processor import PipelineImageInput from diffusers.models import ControlNetModel from diffusers.models.attention import BasicTransformerBlock from diffusers.models.unets.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.utils import PIL_INTERPOLATION, deprecate, logging, replace_example_docstring from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> # !pip install opencv-python transformers accelerate >>> from diffusers import ControlNetModel, AutoencoderKL >>> from diffusers.schedulers import UniPCMultistepScheduler >>> from diffusers.utils import load_image >>> import numpy as np >>> import torch >>> import cv2 >>> from PIL import Image >>> # download an image for the Canny controlnet >>> canny_image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_reference_input_cat.jpg" ... ) >>> # download an image for the Reference controlnet >>> ref_image = load_image( ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" ... ) >>> # initialize the models and pipeline >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization >>> controlnet = ControlNetModel.from_pretrained( ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 ... ) >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) >>> pipe = StableDiffusionXLControlNetReferencePipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 ... ).to("cuda:0") >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) >>> # get canny image >>> image = np.array(canny_image) >>> image = cv2.Canny(image, 100, 200) >>> image = image[:, :, None] >>> image = np.concatenate([image, image, image], axis=2) >>> canny_image = Image.fromarray(image) >>> # generate image >>> image = pipe( ... prompt="a cat", ... num_inference_steps=20, ... controlnet_conditioning_scale=controlnet_conditioning_scale, ... image=canny_image, ... ref_image=ref_image, ... reference_attn=True, ... reference_adain=True ... style_fidelity=1.0, ... generator=torch.Generator("cuda").manual_seed(42) ... ).images[0] ``` """ def torch_dfs(model: torch.nn.Module): result = [model] for child in model.children(): result += torch_dfs(child) return result # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class StableDiffusionXLControlNetReferencePipeline(StableDiffusionXLControlNetPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): Second frozen text-encoder ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. tokenizer_2 ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): Provides additional conditioning to the `unet` during the denoising process. If you set multiple ControlNets as a list, the outputs from each ControlNet are added together to create one combined additional conditioning. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings should always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no watermarker is used. """ def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance): refimage = refimage.to(device=device) needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) if refimage.dtype != self.vae.dtype: refimage = refimage.to(dtype=self.vae.dtype) # encode the mask image into latents space so we can concatenate it to the latents if isinstance(generator, list): ref_image_latents = [ self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i]) for i in range(batch_size) ] ref_image_latents = torch.cat(ref_image_latents, dim=0) else: ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator) ref_image_latents = self.vae.config.scaling_factor * ref_image_latents # duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method if ref_image_latents.shape[0] < batch_size: if not batch_size % ref_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1) ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents # aligning device to prevent device errors when concating it with the latent model input ref_image_latents = ref_image_latents.to(device=device, dtype=dtype) # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) return ref_image_latents def prepare_ref_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): if not isinstance(image, torch.Tensor): if isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): images = [] for image_ in image: image_ = image_.convert("RGB") image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]) image_ = np.array(image_) image_ = image_[None, :] images.append(image_) image = images image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = (image - 0.5) / 0.5 image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.stack(image, dim=0) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image def check_ref_inputs( self, ref_image, reference_guidance_start, reference_guidance_end, style_fidelity, reference_attn, reference_adain, ): ref_image_is_pil = isinstance(ref_image, PIL.Image.Image) ref_image_is_tensor = isinstance(ref_image, torch.Tensor) if not ref_image_is_pil and not ref_image_is_tensor: raise TypeError( f"ref image must be passed and be one of PIL image or torch tensor, but is {type(ref_image)}" ) if not reference_attn and not reference_adain: raise ValueError("`reference_attn` or `reference_adain` must be True.") if style_fidelity < 0.0: raise ValueError(f"style fidelity: {style_fidelity} can't be smaller than 0.") if style_fidelity > 1.0: raise ValueError(f"style fidelity: {style_fidelity} can't be larger than 1.0.") if reference_guidance_start >= reference_guidance_end: raise ValueError( f"reference guidance start: {reference_guidance_start} cannot be larger or equal to reference guidance end: {reference_guidance_end}." ) if reference_guidance_start < 0.0: raise ValueError(f"reference guidance start: {reference_guidance_start} can't be smaller than 0.") if reference_guidance_end > 1.0: raise ValueError(f"reference guidance end: {reference_guidance_end} can't be larger than 1.0.") @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, ref_image: Union[torch.Tensor, PIL.Image.Image] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, sigmas: List[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], attention_auto_machine_weight: float = 1.0, gn_auto_machine_weight: float = 1.0, reference_guidance_start: float = 0.0, reference_guidance_end: float = 1.0, style_fidelity: float = 0.5, reference_attn: bool = True, reference_adain: bool = True, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. ref_image (`torch.Tensor`, `PIL.Image.Image`): The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If the type is specified as `Torch.Tensor`, it is passed to Reference Control as is. `PIL.Image.Image` can also be accepted as an image. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled text embeddings are generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. attention_auto_machine_weight (`float`): Weight of using reference query for self attention's context. If attention_auto_machine_weight=1.0, use reference query for all self attention's context. gn_auto_machine_weight (`float`): Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins. reference_guidance_start (`float`, *optional*, defaults to 0.0): The percentage of total steps at which the reference ControlNet starts applying. reference_guidance_end (`float`, *optional*, defaults to 1.0): The percentage of total steps at which the reference ControlNet stops applying. style_fidelity (`float`): style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important, elif style_fidelity=0.0, prompt more important, else balanced. reference_attn (`bool`): Whether to use reference query for self attention's context. reference_adain (`bool`): Whether to use reference adain. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned containing the output images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, image, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, negative_pooled_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self.check_ref_inputs( ref_image, reference_guidance_start, reference_guidance_end, style_fidelity, reference_attn, reference_adain, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions # 3.1 Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # 3.2 Encode ip_adapter_image if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 4. Prepare image if isinstance(controlnet, ControlNetModel): image = self.prepare_image( image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) height, width = image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): images = [] for image_ in image: image_ = self.prepare_image( image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) images.append(image_) image = images height, width = image[0].shape[-2:] else: assert False # 5. Preprocess reference image ref_image = self.prepare_ref_image( image=ref_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=prompt_embeds.dtype, ) # 6. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) self._num_timesteps = len(timesteps) # 7. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7.5 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 8. Prepare reference latent variables ref_image_latents = self.prepare_ref_latents( ref_image, batch_size * num_images_per_prompt, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance, ) # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 9.1 Create tensor stating which controlnets to keep controlnet_keep = [] reference_keeps = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) reference_keep = 1.0 - float( i / len(timesteps) < reference_guidance_start or (i + 1) / len(timesteps) > reference_guidance_end ) reference_keeps.append(reference_keep) # 9.2 Modify self attention and group norm MODE = "write" uc_mask = ( torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt) .type_as(ref_image_latents) .bool() ) do_classifier_free_guidance = self.do_classifier_free_guidance def hacked_basic_transformer_inner_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, class_labels: Optional[torch.LongTensor] = None, ): if self.use_ada_layer_norm: norm_hidden_states = self.norm1(hidden_states, timestep) elif self.use_ada_layer_norm_zero: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype ) else: norm_hidden_states = self.norm1(hidden_states) # 1. Self-Attention cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if self.only_cross_attention: attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) else: if MODE == "write": self.bank.append(norm_hidden_states.detach().clone()) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) if MODE == "read": if attention_auto_machine_weight > self.attn_weight: attn_output_uc = self.attn1( norm_hidden_states, encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1), # attention_mask=attention_mask, **cross_attention_kwargs, ) attn_output_c = attn_output_uc.clone() if do_classifier_free_guidance and style_fidelity > 0: attn_output_c[uc_mask] = self.attn1( norm_hidden_states[uc_mask], encoder_hidden_states=norm_hidden_states[uc_mask], **cross_attention_kwargs, ) attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc self.bank.clear() else: attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) if self.use_ada_layer_norm_zero: attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = attn_output + hidden_states if self.attn2 is not None: norm_hidden_states = ( self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) ) # 2. Cross-Attention attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # 3. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self.use_ada_layer_norm_zero: norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) if self.use_ada_layer_norm_zero: ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = ff_output + hidden_states return hidden_states def hacked_mid_forward(self, *args, **kwargs): eps = 1e-6 x = self.original_forward(*args, **kwargs) if MODE == "write": if gn_auto_machine_weight >= self.gn_weight: var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0) self.mean_bank.append(mean) self.var_bank.append(var) if MODE == "read": if len(self.mean_bank) > 0 and len(self.var_bank) > 0: var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0) std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 mean_acc = sum(self.mean_bank) / float(len(self.mean_bank)) var_acc = sum(self.var_bank) / float(len(self.var_bank)) std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 x_uc = (((x - mean) / std) * std_acc) + mean_acc x_c = x_uc.clone() if do_classifier_free_guidance and style_fidelity > 0: x_c[uc_mask] = x[uc_mask] x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc self.mean_bank = [] self.var_bank = [] return x def hack_CrossAttnDownBlock2D_forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ): eps = 1e-6 # TODO(Patrick, William) - attention mask is not used output_states = () for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] if MODE == "write": if gn_auto_machine_weight >= self.gn_weight: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) self.mean_bank.append([mean]) self.var_bank.append([var]) if MODE == "read": if len(self.mean_bank) > 0 and len(self.var_bank) > 0: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc hidden_states_c = hidden_states_uc.clone() if do_classifier_free_guidance and style_fidelity > 0: hidden_states_c[uc_mask] = hidden_states[uc_mask] hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc output_states = output_states + (hidden_states,) if MODE == "read": self.mean_bank = [] self.var_bank = [] if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states def hacked_DownBlock2D_forward(self, hidden_states, temb=None, *args, **kwargs): eps = 1e-6 output_states = () for i, resnet in enumerate(self.resnets): hidden_states = resnet(hidden_states, temb) if MODE == "write": if gn_auto_machine_weight >= self.gn_weight: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) self.mean_bank.append([mean]) self.var_bank.append([var]) if MODE == "read": if len(self.mean_bank) > 0 and len(self.var_bank) > 0: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc hidden_states_c = hidden_states_uc.clone() if do_classifier_free_guidance and style_fidelity > 0: hidden_states_c[uc_mask] = hidden_states[uc_mask] hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc output_states = output_states + (hidden_states,) if MODE == "read": self.mean_bank = [] self.var_bank = [] if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states def hacked_CrossAttnUpBlock2D_forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ): eps = 1e-6 # TODO(Patrick, William) - attention mask is not used for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] if MODE == "write": if gn_auto_machine_weight >= self.gn_weight: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) self.mean_bank.append([mean]) self.var_bank.append([var]) if MODE == "read": if len(self.mean_bank) > 0 and len(self.var_bank) > 0: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc hidden_states_c = hidden_states_uc.clone() if do_classifier_free_guidance and style_fidelity > 0: hidden_states_c[uc_mask] = hidden_states[uc_mask] hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc if MODE == "read": self.mean_bank = [] self.var_bank = [] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states def hacked_UpBlock2D_forward( self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, *args, **kwargs ): eps = 1e-6 for i, resnet in enumerate(self.resnets): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) if MODE == "write": if gn_auto_machine_weight >= self.gn_weight: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) self.mean_bank.append([mean]) self.var_bank.append([var]) if MODE == "read": if len(self.mean_bank) > 0 and len(self.var_bank) > 0: var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0) std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5 mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i])) var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i])) std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5 hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc hidden_states_c = hidden_states_uc.clone() if do_classifier_free_guidance and style_fidelity > 0: hidden_states_c[uc_mask] = hidden_states[uc_mask] hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc if MODE == "read": self.mean_bank = [] self.var_bank = [] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states if reference_attn: attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)] attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) for i, module in enumerate(attn_modules): module._original_inner_forward = module.forward module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock) module.bank = [] module.attn_weight = float(i) / float(len(attn_modules)) if reference_adain: gn_modules = [self.unet.mid_block] self.unet.mid_block.gn_weight = 0 down_blocks = self.unet.down_blocks for w, module in enumerate(down_blocks): module.gn_weight = 1.0 - float(w) / float(len(down_blocks)) gn_modules.append(module) up_blocks = self.unet.up_blocks for w, module in enumerate(up_blocks): module.gn_weight = float(w) / float(len(up_blocks)) gn_modules.append(module) for i, module in enumerate(gn_modules): if getattr(module, "original_forward", None) is None: module.original_forward = module.forward if i == 0: # mid_block module.forward = hacked_mid_forward.__get__(module, torch.nn.Module) elif isinstance(module, CrossAttnDownBlock2D): module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D) elif isinstance(module, DownBlock2D): module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D) elif isinstance(module, CrossAttnUpBlock2D): module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D) elif isinstance(module, UpBlock2D): module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D) module.mean_bank = [] module.var_bank = [] module.gn_weight *= 2 # 9.2 Prepare added time ids & embeddings if isinstance(image, list): original_size = original_size or image[0].shape[-2:] else: original_size = original_size or image.shape[-2:] target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) # 10. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order # 10.1 Apply denoising_end if ( self.denoising_end is not None and isinstance(self.denoising_end, float) and self.denoising_end > 0 and self.denoising_end < 1 ): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # Relevant thread: # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": add_text_embeds.chunk(2)[1], "time_ids": add_time_ids.chunk(2)[1], } else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs["image_embeds"] = image_embeds # ref only part if reference_keeps[i] > 0: noise = randn_tensor( ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype ) ref_xt = self.scheduler.add_noise( ref_image_latents, noise, t.reshape( 1, ), ) ref_xt = self.scheduler.scale_model_input(ref_xt, t) MODE = "write" self.unet( ref_xt, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, ) # predict the noise residual MODE = "read" noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/stable_diffusion_xl_controlnet_reference.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_xl_controlnet_reference.py", "repo_id": "diffusers", "token_count": 34134 }
142
import numpy as np import torch import torch.nn as nn from torch.nn import functional from torch.nn.init import ones_, trunc_normal_, zeros_ def drop_path(x, drop_prob=0.0, training=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... """ if drop_prob == 0.0 or not training: return x keep_prob = torch.tensor(1 - drop_prob) shape = (x.size()[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype) random_tensor = torch.floor(random_tensor) # binarize output = x.divide(keep_prob) * random_tensor return output class Swish(nn.Module): def __int__(self): super(Swish, self).__int__() def forward(self, x): return x * torch.sigmoid(x) class ConvBNLayer(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, bias_attr=False, groups=1, act=nn.GELU ): super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, # weight_attr=paddle.ParamAttr(initializer=nn.initializer.KaimingUniform()), bias=bias_attr, ) self.norm = nn.BatchNorm2d(out_channels) self.act = act() def forward(self, inputs): out = self.conv(inputs) out = self.norm(out) out = self.act(out) return out class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, input): return input class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) if isinstance(act_layer, str): self.act = Swish() else: self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class ConvMixer(nn.Module): def __init__( self, dim, num_heads=8, HW=(8, 25), local_k=(3, 3), ): super().__init__() self.HW = HW self.dim = dim self.local_mixer = nn.Conv2d( dim, dim, local_k, 1, (local_k[0] // 2, local_k[1] // 2), groups=num_heads, # weight_attr=ParamAttr(initializer=KaimingNormal()) ) def forward(self, x): h = self.HW[0] w = self.HW[1] x = x.transpose([0, 2, 1]).reshape([0, self.dim, h, w]) x = self.local_mixer(x) x = x.flatten(2).transpose([0, 2, 1]) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, mixer="Global", HW=(8, 25), local_k=(7, 11), qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.HW = HW if HW is not None: H = HW[0] W = HW[1] self.N = H * W self.C = dim if mixer == "Local" and HW is not None: hk = local_k[0] wk = local_k[1] mask = torch.ones([H * W, H + hk - 1, W + wk - 1]) for h in range(0, H): for w in range(0, W): mask[h * W + w, h : h + hk, w : w + wk] = 0.0 mask_paddle = mask[:, hk // 2 : H + hk // 2, wk // 2 : W + wk // 2].flatten(1) mask_inf = torch.full([H * W, H * W], fill_value=float("-inf")) mask = torch.where(mask_paddle < 1, mask_paddle, mask_inf) self.mask = mask[None, None, :] # self.mask = mask.unsqueeze([0, 1]) self.mixer = mixer def forward(self, x): if self.HW is not None: N = self.N C = self.C else: _, N, C = x.shape qkv = self.qkv(x).reshape((-1, N, 3, self.num_heads, C // self.num_heads)).permute((2, 0, 3, 1, 4)) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = q.matmul(k.permute((0, 1, 3, 2))) if self.mixer == "Local": attn += self.mask attn = functional.softmax(attn, dim=-1) attn = self.attn_drop(attn) x = (attn.matmul(v)).permute((0, 2, 1, 3)).reshape((-1, N, C)) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mixer="Global", local_mixer=(7, 11), HW=(8, 25), mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer="nn.LayerNorm", epsilon=1e-6, prenorm=True, ): super().__init__() if isinstance(norm_layer, str): self.norm1 = eval(norm_layer)(dim, eps=epsilon) else: self.norm1 = norm_layer(dim) if mixer == "Global" or mixer == "Local": self.mixer = Attention( dim, num_heads=num_heads, mixer=mixer, HW=HW, local_k=local_mixer, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, ) elif mixer == "Conv": self.mixer = ConvMixer(dim, num_heads=num_heads, HW=HW, local_k=local_mixer) else: raise TypeError("The mixer must be one of [Global, Local, Conv]") self.drop_path = DropPath(drop_path) if drop_path > 0.0 else Identity() if isinstance(norm_layer, str): self.norm2 = eval(norm_layer)(dim, eps=epsilon) else: self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp_ratio = mlp_ratio self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.prenorm = prenorm def forward(self, x): if self.prenorm: x = self.norm1(x + self.drop_path(self.mixer(x))) x = self.norm2(x + self.drop_path(self.mlp(x))) else: x = x + self.drop_path(self.mixer(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """Image to Patch Embedding""" def __init__(self, img_size=(32, 100), in_channels=3, embed_dim=768, sub_num=2): super().__init__() num_patches = (img_size[1] // (2**sub_num)) * (img_size[0] // (2**sub_num)) self.img_size = img_size self.num_patches = num_patches self.embed_dim = embed_dim self.norm = None if sub_num == 2: self.proj = nn.Sequential( ConvBNLayer( in_channels=in_channels, out_channels=embed_dim // 2, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ConvBNLayer( in_channels=embed_dim // 2, out_channels=embed_dim, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ) if sub_num == 3: self.proj = nn.Sequential( ConvBNLayer( in_channels=in_channels, out_channels=embed_dim // 4, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ConvBNLayer( in_channels=embed_dim // 4, out_channels=embed_dim // 2, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ConvBNLayer( in_channels=embed_dim // 2, out_channels=embed_dim, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ) def forward(self, x): B, C, H, W = x.shape assert H == self.img_size[0] and W == self.img_size[1], ( f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." ) x = self.proj(x).flatten(2).permute(0, 2, 1) return x class SubSample(nn.Module): def __init__(self, in_channels, out_channels, types="Pool", stride=(2, 1), sub_norm="nn.LayerNorm", act=None): super().__init__() self.types = types if types == "Pool": self.avgpool = nn.AvgPool2d(kernel_size=(3, 5), stride=stride, padding=(1, 2)) self.maxpool = nn.MaxPool2d(kernel_size=(3, 5), stride=stride, padding=(1, 2)) self.proj = nn.Linear(in_channels, out_channels) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, # weight_attr=ParamAttr(initializer=KaimingNormal()) ) self.norm = eval(sub_norm)(out_channels) if act is not None: self.act = act() else: self.act = None def forward(self, x): if self.types == "Pool": x1 = self.avgpool(x) x2 = self.maxpool(x) x = (x1 + x2) * 0.5 out = self.proj(x.flatten(2).permute((0, 2, 1))) else: x = self.conv(x) out = x.flatten(2).permute((0, 2, 1)) out = self.norm(out) if self.act is not None: out = self.act(out) return out class SVTRNet(nn.Module): def __init__( self, img_size=[48, 100], in_channels=3, embed_dim=[64, 128, 256], depth=[3, 6, 3], num_heads=[2, 4, 8], mixer=["Local"] * 6 + ["Global"] * 6, # Local atten, Global atten, Conv local_mixer=[[7, 11], [7, 11], [7, 11]], patch_merging="Conv", # Conv, Pool, None mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0.0, last_drop=0.1, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer="nn.LayerNorm", sub_norm="nn.LayerNorm", epsilon=1e-6, out_channels=192, out_char_num=25, block_unit="Block", act="nn.GELU", last_stage=True, sub_num=2, prenorm=True, use_lenhead=False, **kwargs, ): super().__init__() self.img_size = img_size self.embed_dim = embed_dim self.out_channels = out_channels self.prenorm = prenorm patch_merging = None if patch_merging != "Conv" and patch_merging != "Pool" else patch_merging self.patch_embed = PatchEmbed( img_size=img_size, in_channels=in_channels, embed_dim=embed_dim[0], sub_num=sub_num ) num_patches = self.patch_embed.num_patches self.HW = [img_size[0] // (2**sub_num), img_size[1] // (2**sub_num)] self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim[0])) # self.pos_embed = self.create_parameter( # shape=[1, num_patches, embed_dim[0]], default_initializer=zeros_) # self.add_parameter("pos_embed", self.pos_embed) self.pos_drop = nn.Dropout(p=drop_rate) Block_unit = eval(block_unit) dpr = np.linspace(0, drop_path_rate, sum(depth)) self.blocks1 = nn.ModuleList( [ Block_unit( dim=embed_dim[0], num_heads=num_heads[0], mixer=mixer[0 : depth[0]][i], HW=self.HW, local_mixer=local_mixer[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=eval(act), attn_drop=attn_drop_rate, drop_path=dpr[0 : depth[0]][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm, ) for i in range(depth[0]) ] ) if patch_merging is not None: self.sub_sample1 = SubSample( embed_dim[0], embed_dim[1], sub_norm=sub_norm, stride=[2, 1], types=patch_merging ) HW = [self.HW[0] // 2, self.HW[1]] else: HW = self.HW self.patch_merging = patch_merging self.blocks2 = nn.ModuleList( [ Block_unit( dim=embed_dim[1], num_heads=num_heads[1], mixer=mixer[depth[0] : depth[0] + depth[1]][i], HW=HW, local_mixer=local_mixer[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=eval(act), attn_drop=attn_drop_rate, drop_path=dpr[depth[0] : depth[0] + depth[1]][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm, ) for i in range(depth[1]) ] ) if patch_merging is not None: self.sub_sample2 = SubSample( embed_dim[1], embed_dim[2], sub_norm=sub_norm, stride=[2, 1], types=patch_merging ) HW = [self.HW[0] // 4, self.HW[1]] else: HW = self.HW self.blocks3 = nn.ModuleList( [ Block_unit( dim=embed_dim[2], num_heads=num_heads[2], mixer=mixer[depth[0] + depth[1] :][i], HW=HW, local_mixer=local_mixer[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=eval(act), attn_drop=attn_drop_rate, drop_path=dpr[depth[0] + depth[1] :][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm, ) for i in range(depth[2]) ] ) self.last_stage = last_stage if last_stage: self.avg_pool = nn.AdaptiveAvgPool2d((1, out_char_num)) self.last_conv = nn.Conv2d( in_channels=embed_dim[2], out_channels=self.out_channels, kernel_size=1, stride=1, padding=0, bias=False, ) self.hardswish = nn.Hardswish() self.dropout = nn.Dropout(p=last_drop) if not prenorm: self.norm = eval(norm_layer)(embed_dim[-1], epsilon=epsilon) self.use_lenhead = use_lenhead if use_lenhead: self.len_conv = nn.Linear(embed_dim[2], self.out_channels) self.hardswish_len = nn.Hardswish() self.dropout_len = nn.Dropout(p=last_drop) trunc_normal_(self.pos_embed, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: zeros_(m.bias) elif isinstance(m, nn.LayerNorm): zeros_(m.bias) ones_(m.weight) def forward_features(self, x): x = self.patch_embed(x) x = x + self.pos_embed x = self.pos_drop(x) for blk in self.blocks1: x = blk(x) if self.patch_merging is not None: x = self.sub_sample1(x.permute([0, 2, 1]).reshape([-1, self.embed_dim[0], self.HW[0], self.HW[1]])) for blk in self.blocks2: x = blk(x) if self.patch_merging is not None: x = self.sub_sample2(x.permute([0, 2, 1]).reshape([-1, self.embed_dim[1], self.HW[0] // 2, self.HW[1]])) for blk in self.blocks3: x = blk(x) if not self.prenorm: x = self.norm(x) return x def forward(self, x): x = self.forward_features(x) if self.use_lenhead: len_x = self.len_conv(x.mean(1)) len_x = self.dropout_len(self.hardswish_len(len_x)) if self.last_stage: if self.patch_merging is not None: h = self.HW[0] // 4 else: h = self.HW[0] x = self.avg_pool(x.permute([0, 2, 1]).reshape([-1, self.embed_dim[2], h, self.HW[1]])) x = self.last_conv(x) x = self.hardswish(x) x = self.dropout(x) if self.use_lenhead: return x, len_x return x if __name__ == "__main__": a = torch.rand(1, 3, 48, 100) svtr = SVTRNet() out = svtr(a) print(svtr) print(out.size())
diffusers/examples/research_projects/anytext/ocr_recog/RecSVTR.py/0
{ "file_path": "diffusers/examples/research_projects/anytext/ocr_recog/RecSVTR.py", "repo_id": "diffusers", "token_count": 10944 }
143
<jupyter_start><jupyter_text>IntroductionThis colab is design to run the pretrained models from [GeoDiff](https://github.com/MinkaiXu/GeoDiff).The visualization code is inspired by this PyMol [colab](https://colab.research.google.com/gist/iwatobipen/2ec7faeafe5974501e69fcc98c122922/pymol.ipynbscrollTo=Hm4kY7CaZSlw).The goal is to generate physically accurate molecules. Given the input of a molecule graph (atom and bond structures with their connectivity -- in the form of a 2d graph). What we want to generate is a stable 3d structure of the molecule.This colab uses GEOM datasets that have multiple 3d targets per configuration, which provide more compelling targets for generative methods.> Colab made by [natolambert](https://twitter.com/natolambert).![diffusers_library](https://github.com/huggingface/diffusers/raw/main/docs/source/imgs/diffusers_library.jpg) Installations Install Conda Here we check the `cuda` version of colab. When this was built, the version was always 11.1, which impacts some installation decisions below.<jupyter_code>!nvcc --version<jupyter_output>nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2021 NVIDIA Corporation Built on Sun_Feb_14_21:12:58_PST_2021 Cuda compilation tools, release 11.2, V11.2.152 Build cuda_11.2.r11.2/compiler.29618528_0<jupyter_text>Install Conda for some more complex dependencies for geometric networks.<jupyter_code>!pip install -q condacolab<jupyter_output>WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv <jupyter_text>Setup Conda<jupyter_code>import condacolab condacolab.install()<jupyter_output>✨🍰✨ Everything looks OK!<jupyter_text>Install pytorch requirements (this takes a few minutes, go grab yourself a coffee 🤗)<jupyter_code>!conda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch-lts -c nvidia # !conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cudatoolkit=11.1 -c pytorch -c conda-forge<jupyter_output>Collecting package metadata (current_repodata.json): - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / done Solving environment: \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ done ## Package Plan ## environment location: /usr/local added / updated specs: - cudatoolkit=11.1 - pytorch - torchaudio - torchvision The following packages will be downloaded: package | build ---------------------------|----------------- conda-22.9.0 | py37h89c1867_1 960 KB conda-forge ------------------------------------------------------------ Total: 960 KB The following packages will be UPDATED: conda 4.14.0-py37h89c1867_0 --> 22.9.0-py37h89c1867_1 Downloading and E[...]<jupyter_text>Need to remove a pathspec for colab that specifies the incorrect cuda version.<jupyter_code>!rm /usr/local/conda-meta/pinned<jupyter_output>rm: cannot remove '/usr/local/conda-meta/pinned': No such file or directory<jupyter_text>Install torch geometric (used in the model later)<jupyter_code>!conda install -c rusty1s pytorch-geometric=1.7.2<jupyter_output>Collecting package metadata (current_repodata.json): - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - done Solving environment: | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ | / - \ done ## Package Plan ## environment location: /usr/local added / updated specs: - pytorch-geometric=1.7.2 The following packages will be downloaded: package | build ---------------------------|----------------- decorator-[...]<jupyter_text>Install Diffusers<jupyter_code>%cd /content # install latest HF diffusers (will update to the release once added) !git clone https://github.com/huggingface/diffusers.git !pip install -q /content/diffusers # dependencies for diffusers !pip install -q datasets transformers<jupyter_output>/content Cloning into 'diffusers'... remote: Enumerating objects: 9298, done. remote: Counting objects: 100% (40/40), done. remote: Compressing objects: 100% (23/23), done. remote: Total 9298 (delta 17), reused 23 (delta 11), pack-reused 9258 Receiving objects: 100% (9298/9298), 7.38 MiB | 5.28 MiB/s, done. Resolving deltas: 100% (6168/6168), done. Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.toml) ... [?25l[?25hdone  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 757.0/757.0 kB 52.8 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 163.5/163.5 kB 21.9 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40.8/40.8 kB 5.5 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 596.3/596.3 kB 51.7 MB/s eta [...]<jupyter_text>Check that torch is installed correctly and utilizing the GPU in the colab<jupyter_code>import torch print(torch.cuda.is_available()) torch.__version__<jupyter_output>True<jupyter_text>Install Chemistry-specific DependenciesInstall RDKit, a tool for working with and visualizing chemsitry in python (you use this to visualize the generate models later).<jupyter_code>!pip install rdkit<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting rdkit Downloading rdkit-2022.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (36.8 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 36.8/36.8 MB 34.6 MB/s eta 0:00:00 [?25hRequirement already satisfied: Pillow in /usr/local/lib/python3.7/site-packages (from rdkit) (9.2.0) Requirement already satisfied: numpy in /usr/local/lib/python3.7/site-packages (from rdkit) (1.21.6) Installing collected packages: rdkit Successfully installed rdkit-2022.3.5 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv <jupyter_text>Get viewer from nglviewThe model you will use outputs a position matrix tensor. This pytorch geometric data object will have many features (positions, known features, edge features -- all tensors).The data we give to the model will also have a rdmol object (which can extract features to geometric if needed).The rdmol in this object is a source of ground truth for the generated molecules.You will use one rendering function from nglviewer later!<jupyter_code>!pip install nglview<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting nglview Downloading nglview-3.0.3.tar.gz (5.7 MB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.7/5.7 MB 91.2 MB/s eta 0:00:00 [?25h Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.toml) ... [?25l[?25hdone Requirement already satisfied: numpy in /usr/local/lib/python3.7/site-packages (from nglview) (1.21.6) Collecting jupyterlab-widgets Downloading jupyterlab_widgets-3.0.3-py3-none-any.whl (384 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 384.1/384.1 kB 40.6 MB/s eta 0:00:00 [?25hCollecting ipywidgets>=7 Downloading ipywidgets-8.0.2-py3-none-any.whl (134 kB)  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 134.4/134.4 kB 21.2 MB/s eta 0:00:00 [?25hCollecti[...]<jupyter_text>Create a diffusion model Model class(es) Imports<jupyter_code># Model adapted from GeoDiff https://github.com/MinkaiXu/GeoDiff # Model inspired by https://github.com/DeepGraphLearning/torchdrug/tree/master/torchdrug/models from dataclasses import dataclass from typing import Callable, Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import Embedding, Linear, Module, ModuleList, Sequential from torch_geometric.nn import MessagePassing, radius, radius_graph from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size from torch_geometric.utils import dense_to_sparse, to_dense_adj from torch_scatter import scatter_add from torch_sparse import SparseTensor, coalesce from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.modeling_utils import ModelMixin from diffusers.utils import BaseOutput<jupyter_output><empty_output><jupyter_text>Helper classes<jupyter_code>@dataclass class MoleculeGNNOutput(BaseOutput): """ Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Hidden states output. Output of last layer of model. """ sample: torch.Tensor class MultiLayerPerceptron(nn.Module): """ Multi-layer Perceptron. Note there is no activation or dropout in the last layer. Args: input_dim (int): input dimension hidden_dim (list of int): hidden dimensions activation (str or function, optional): activation function dropout (float, optional): dropout rate """ def __init__(self, input_dim, hidden_dims, activation="relu", dropout=0): super(MultiLayerPerceptron, self).__init__() self.dims = [input_dim] + hidden_dims if isinstance(activation, str): self.activation = getattr(F, activation) else: print(f"Warning, activation passed {activation} is not string and ignored") self.activation = None if dropout > 0: self.dropout = nn.Dropout(dropout) else: self.dropout = None self.layers = nn.ModuleList() for i in range(len(self.dims) - 1): self.layers.append(nn.Linear(self.dims[i], self.dims[i + 1])) def forward(self, x): """""" for i, layer in enumerate(self.layers): x = layer(x) if i < len(self.layers) - 1: if self.activation: x = self.activation(x) if self.dropout: x = self.dropout(x) return x class ShiftedSoftplus(torch.nn.Module): def __init__(self): super(ShiftedSoftplus, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, x): return F.softplus(x) - self.shift class CFConv(MessagePassing): def __init__(self, in_channels, out_channels, num_filters, mlp, cutoff, smooth): super(CFConv, self).__init__(aggr="add") self.lin1 = Linear(in_channels, num_filters, bias=False) self.lin2 = Linear(num_filters, out_channels) self.nn = mlp self.cutoff = cutoff self.smooth = smooth self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.lin1.weight) torch.nn.init.xavier_uniform_(self.lin2.weight) self.lin2.bias.data.fill_(0) def forward(self, x, edge_index, edge_length, edge_attr): if self.smooth: C = 0.5 * (torch.cos(edge_length * np.pi / self.cutoff) + 1.0) C = C * (edge_length <= self.cutoff) * (edge_length >= 0.0) # Modification: cutoff else: C = (edge_length <= self.cutoff).float() W = self.nn(edge_attr) * C.view(-1, 1) x = self.lin1(x) x = self.propagate(edge_index, x=x, W=W) x = self.lin2(x) return x def message(self, x_j: torch.Tensor, W) -> torch.Tensor: return x_j * W class InteractionBlock(torch.nn.Module): def __init__(self, hidden_channels, num_gaussians, num_filters, cutoff, smooth): super(InteractionBlock, self).__init__() mlp = Sequential( Linear(num_gaussians, num_filters), ShiftedSoftplus(), Linear(num_filters, num_filters), ) self.conv = CFConv(hidden_channels, hidden_channels, num_filters, mlp, cutoff, smooth) self.act = ShiftedSoftplus() self.lin = Linear(hidden_channels, hidden_channels) def forward(self, x, edge_index, edge_length, edge_attr): x = self.conv(x, edge_index, edge_length, edge_attr) x = self.act(x) x = self.lin(x) return x class SchNetEncoder(Module): def __init__( self, hidden_channels=128, num_filters=128, num_interactions=6, edge_channels=100, cutoff=10.0, smooth=False ): super().__init__() self.hidden_channels = hidden_channels self.num_filters = num_filters self.num_interactions = num_interactions self.cutoff = cutoff self.embedding = Embedding(100, hidden_channels, max_norm=10.0) self.interactions = ModuleList() for _ in range(num_interactions): block = InteractionBlock(hidden_channels, edge_channels, num_filters, cutoff, smooth) self.interactions.append(block) def forward(self, z, edge_index, edge_length, edge_attr, embed_node=True): if embed_node: assert z.dim() == 1 and z.dtype == torch.long h = self.embedding(z) else: h = z for interaction in self.interactions: h = h + interaction(h, edge_index, edge_length, edge_attr) return h class GINEConv(MessagePassing): """ Custom class of the graph isomorphism operator from the "How Powerful are Graph Neural Networks? https://huggingface.co/papers/1810.00826 paper. Note that this implementation has the added option of a custom activation. """ def __init__(self, mlp: Callable, eps: float = 0.0, train_eps: bool = False, activation="softplus", **kwargs): super(GINEConv, self).__init__(aggr="add", **kwargs) self.nn = mlp self.initial_eps = eps if isinstance(activation, str): self.activation = getattr(F, activation) else: self.activation = None if train_eps: self.eps = torch.nn.Parameter(torch.Tensor([eps])) else: self.register_buffer("eps", torch.Tensor([eps])) def forward( self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_attr: OptTensor = None, size: Size = None ) -> torch.Tensor: """""" if isinstance(x, torch.Tensor): x: OptPairTensor = (x, x) # Node and edge feature dimensionalites need to match. if isinstance(edge_index, torch.Tensor): assert edge_attr is not None assert x[0].size(-1) == edge_attr.size(-1) elif isinstance(edge_index, SparseTensor): assert x[0].size(-1) == edge_index.size(-1) # propagate_type: (x: OptPairTensor, edge_attr: OptTensor) out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size) x_r = x[1] if x_r is not None: out += (1 + self.eps) * x_r return self.nn(out) def message(self, x_j: torch.Tensor, edge_attr: torch.Tensor) -> torch.Tensor: if self.activation: return self.activation(x_j + edge_attr) else: return x_j + edge_attr def __repr__(self): return "{}(nn={})".format(self.__class__.__name__, self.nn) class GINEncoder(torch.nn.Module): def __init__(self, hidden_dim, num_convs=3, activation="relu", short_cut=True, concat_hidden=False): super().__init__() self.hidden_dim = hidden_dim self.num_convs = num_convs self.short_cut = short_cut self.concat_hidden = concat_hidden self.node_emb = nn.Embedding(100, hidden_dim) if isinstance(activation, str): self.activation = getattr(F, activation) else: self.activation = None self.convs = nn.ModuleList() for i in range(self.num_convs): self.convs.append( GINEConv( MultiLayerPerceptron(hidden_dim, [hidden_dim, hidden_dim], activation=activation), activation=activation, ) ) def forward(self, z, edge_index, edge_attr): """ Input: data: (torch_geometric.data.Data): batched graph edge_index: bond indices of the original graph (num_node, hidden) edge_attr: edge feature tensor with shape (num_edge, hidden) Output: node_feature: graph feature """ node_attr = self.node_emb(z) # (num_node, hidden) hiddens = [] conv_input = node_attr # (num_node, hidden) for conv_idx, conv in enumerate(self.convs): hidden = conv(conv_input, edge_index, edge_attr) if conv_idx < len(self.convs) - 1 and self.activation is not None: hidden = self.activation(hidden) assert hidden.shape == conv_input.shape if self.short_cut and hidden.shape == conv_input.shape: hidden += conv_input hiddens.append(hidden) conv_input = hidden if self.concat_hidden: node_feature = torch.cat(hiddens, dim=-1) else: node_feature = hiddens[-1] return node_feature class MLPEdgeEncoder(Module): def __init__(self, hidden_dim=100, activation="relu"): super().__init__() self.hidden_dim = hidden_dim self.bond_emb = Embedding(100, embedding_dim=self.hidden_dim) self.mlp = MultiLayerPerceptron(1, [self.hidden_dim, self.hidden_dim], activation=activation) @property def out_channels(self): return self.hidden_dim def forward(self, edge_length, edge_type): """ Input: edge_length: The length of edges, shape=(E, 1). edge_type: The type pf edges, shape=(E,) Returns: edge_attr: The representation of edges. (E, 2 * num_gaussians) """ d_emb = self.mlp(edge_length) # (num_edge, hidden_dim) edge_attr = self.bond_emb(edge_type) # (num_edge, hidden_dim) return d_emb * edge_attr # (num_edge, hidden) def assemble_atom_pair_feature(node_attr, edge_index, edge_attr): h_row, h_col = node_attr[edge_index[0]], node_attr[edge_index[1]] h_pair = torch.cat([h_row * h_col, edge_attr], dim=-1) # (E, 2H) return h_pair def _extend_graph_order(num_nodes, edge_index, edge_type, order=3): """ Args: num_nodes: Number of atoms. edge_index: Bond indices of the original graph. edge_type: Bond types of the original graph. order: Extension order. Returns: new_edge_index: Extended edge indices. new_edge_type: Extended edge types. """ def binarize(x): return torch.where(x > 0, torch.ones_like(x), torch.zeros_like(x)) def get_higher_order_adj_matrix(adj, order): """ Args: adj: (N, N) type_mat: (N, N) Returns: Following attributes will be updated: - edge_index - edge_type Following attributes will be added to the data object: - bond_edge_index: Original edge_index. """ adj_mats = [ torch.eye(adj.size(0), dtype=torch.long, device=adj.device), binarize(adj + torch.eye(adj.size(0), dtype=torch.long, device=adj.device)), ] for i in range(2, order + 1): adj_mats.append(binarize(adj_mats[i - 1] @ adj_mats[1])) order_mat = torch.zeros_like(adj) for i in range(1, order + 1): order_mat += (adj_mats[i] - adj_mats[i - 1]) * i return order_mat num_types = 22 # given from len(BOND_TYPES), where BOND_TYPES = {t: i for i, t in enumerate(BT.names.values())} # from rdkit.Chem.rdchem import BondType as BT N = num_nodes adj = to_dense_adj(edge_index).squeeze(0) adj_order = get_higher_order_adj_matrix(adj, order) # (N, N) type_mat = to_dense_adj(edge_index, edge_attr=edge_type).squeeze(0) # (N, N) type_highorder = torch.where(adj_order > 1, num_types + adj_order - 1, torch.zeros_like(adj_order)) assert (type_mat * type_highorder == 0).all() type_new = type_mat + type_highorder new_edge_index, new_edge_type = dense_to_sparse(type_new) _, edge_order = dense_to_sparse(adj_order) # data.bond_edge_index = data.edge_index # Save original edges new_edge_index, new_edge_type = coalesce(new_edge_index, new_edge_type.long(), N, N) # modify data return new_edge_index, new_edge_type def _extend_to_radius_graph(pos, edge_index, edge_type, cutoff, batch, unspecified_type_number=0, is_sidechain=None): assert edge_type.dim() == 1 N = pos.size(0) bgraph_adj = torch.sparse.LongTensor(edge_index, edge_type, torch.Size([N, N])) if is_sidechain is None: rgraph_edge_index = radius_graph(pos, r=cutoff, batch=batch) # (2, E_r) else: # fetch sidechain and its batch index is_sidechain = is_sidechain.bool() dummy_index = torch.arange(pos.size(0), device=pos.device) sidechain_pos = pos[is_sidechain] sidechain_index = dummy_index[is_sidechain] sidechain_batch = batch[is_sidechain] assign_index = radius(x=pos, y=sidechain_pos, r=cutoff, batch_x=batch, batch_y=sidechain_batch) r_edge_index_x = assign_index[1] r_edge_index_y = assign_index[0] r_edge_index_y = sidechain_index[r_edge_index_y] rgraph_edge_index1 = torch.stack((r_edge_index_x, r_edge_index_y)) # (2, E) rgraph_edge_index2 = torch.stack((r_edge_index_y, r_edge_index_x)) # (2, E) rgraph_edge_index = torch.cat((rgraph_edge_index1, rgraph_edge_index2), dim=-1) # (2, 2E) # delete self loop rgraph_edge_index = rgraph_edge_index[:, (rgraph_edge_index[0] != rgraph_edge_index[1])] rgraph_adj = torch.sparse.LongTensor( rgraph_edge_index, torch.ones(rgraph_edge_index.size(1)).long().to(pos.device) * unspecified_type_number, torch.Size([N, N]), ) composed_adj = (bgraph_adj + rgraph_adj).coalesce() # Sparse (N, N, T) new_edge_index = composed_adj.indices() new_edge_type = composed_adj.values().long() return new_edge_index, new_edge_type def extend_graph_order_radius( num_nodes, pos, edge_index, edge_type, batch, order=3, cutoff=10.0, extend_order=True, extend_radius=True, is_sidechain=None, ): if extend_order: edge_index, edge_type = _extend_graph_order( num_nodes=num_nodes, edge_index=edge_index, edge_type=edge_type, order=order ) if extend_radius: edge_index, edge_type = _extend_to_radius_graph( pos=pos, edge_index=edge_index, edge_type=edge_type, cutoff=cutoff, batch=batch, is_sidechain=is_sidechain ) return edge_index, edge_type def get_distance(pos, edge_index): return (pos[edge_index[0]] - pos[edge_index[1]]).norm(dim=-1) def graph_field_network(score_d, pos, edge_index, edge_length): """ Transformation to make the epsilon predicted from the diffusion model roto-translational equivariant. See equations 5-7 of the GeoDiff Paper https://huggingface.co/papers/2203.02923 """ N = pos.size(0) dd_dr = (1.0 / edge_length) * (pos[edge_index[0]] - pos[edge_index[1]]) # (E, 3) score_pos = scatter_add(dd_dr * score_d, edge_index[0], dim=0, dim_size=N) + scatter_add( -dd_dr * score_d, edge_index[1], dim=0, dim_size=N ) # (N, 3) return score_pos def clip_norm(vec, limit, p=2): norm = torch.norm(vec, dim=-1, p=2, keepdim=True) denom = torch.where(norm > limit, limit / norm, torch.ones_like(norm)) return vec * denom def is_local_edge(edge_type): return edge_type > 0<jupyter_output><empty_output><jupyter_text>Main model class!<jupyter_code>class MoleculeGNN(ModelMixin, ConfigMixin): @register_to_config def __init__( self, hidden_dim=128, num_convs=6, num_convs_local=4, cutoff=10.0, mlp_act="relu", edge_order=3, edge_encoder="mlp", smooth_conv=True, ): super().__init__() self.cutoff = cutoff self.edge_encoder = edge_encoder self.edge_order = edge_order """ edge_encoder: Takes both edge type and edge length as input and outputs a vector [Note]: node embedding is done in SchNetEncoder """ self.edge_encoder_global = MLPEdgeEncoder(hidden_dim, mlp_act) # get_edge_encoder(config) self.edge_encoder_local = MLPEdgeEncoder(hidden_dim, mlp_act) # get_edge_encoder(config) """ The graph neural network that extracts node-wise features. """ self.encoder_global = SchNetEncoder( hidden_channels=hidden_dim, num_filters=hidden_dim, num_interactions=num_convs, edge_channels=self.edge_encoder_global.out_channels, cutoff=cutoff, smooth=smooth_conv, ) self.encoder_local = GINEncoder( hidden_dim=hidden_dim, num_convs=num_convs_local, ) """ `output_mlp` takes a mixture of two nodewise features and edge features as input and outputs gradients w.r.t. edge_length (out_dim = 1). """ self.grad_global_dist_mlp = MultiLayerPerceptron( 2 * hidden_dim, [hidden_dim, hidden_dim // 2, 1], activation=mlp_act ) self.grad_local_dist_mlp = MultiLayerPerceptron( 2 * hidden_dim, [hidden_dim, hidden_dim // 2, 1], activation=mlp_act ) """ Incorporate parameters together """ self.model_global = nn.ModuleList([self.edge_encoder_global, self.encoder_global, self.grad_global_dist_mlp]) self.model_local = nn.ModuleList([self.edge_encoder_local, self.encoder_local, self.grad_local_dist_mlp]) def _forward( self, atom_type, pos, bond_index, bond_type, batch, time_step, # NOTE, model trained without timestep performed best edge_index=None, edge_type=None, edge_length=None, return_edges=False, extend_order=True, extend_radius=True, is_sidechain=None, ): """ Args: atom_type: Types of atoms, (N, ). bond_index: Indices of bonds (not extended, not radius-graph), (2, E). bond_type: Bond types, (E, ). batch: Node index to graph index, (N, ). """ N = atom_type.size(0) if edge_index is None or edge_type is None or edge_length is None: edge_index, edge_type = extend_graph_order_radius( num_nodes=N, pos=pos, edge_index=bond_index, edge_type=bond_type, batch=batch, order=self.edge_order, cutoff=self.cutoff, extend_order=extend_order, extend_radius=extend_radius, is_sidechain=is_sidechain, ) edge_length = get_distance(pos, edge_index).unsqueeze(-1) # (E, 1) local_edge_mask = is_local_edge(edge_type) # (E, ) # with the parameterization of NCSNv2 # DDPM loss implicit handle the noise variance scale conditioning sigma_edge = torch.ones(size=(edge_index.size(1), 1), device=pos.device) # (E, 1) # Encoding global edge_attr_global = self.edge_encoder_global(edge_length=edge_length, edge_type=edge_type) # Embed edges # Global node_attr_global = self.encoder_global( z=atom_type, edge_index=edge_index, edge_length=edge_length, edge_attr=edge_attr_global, ) # Assemble pairwise features h_pair_global = assemble_atom_pair_feature( node_attr=node_attr_global, edge_index=edge_index, edge_attr=edge_attr_global, ) # (E_global, 2H) # Invariant features of edges (radius graph, global) edge_inv_global = self.grad_global_dist_mlp(h_pair_global) * (1.0 / sigma_edge) # (E_global, 1) # Encoding local edge_attr_local = self.edge_encoder_global(edge_length=edge_length, edge_type=edge_type) # Embed edges # edge_attr += temb_edge # Local node_attr_local = self.encoder_local( z=atom_type, edge_index=edge_index[:, local_edge_mask], edge_attr=edge_attr_local[local_edge_mask], ) # Assemble pairwise features h_pair_local = assemble_atom_pair_feature( node_attr=node_attr_local, edge_index=edge_index[:, local_edge_mask], edge_attr=edge_attr_local[local_edge_mask], ) # (E_local, 2H) # Invariant features of edges (bond graph, local) if isinstance(sigma_edge, torch.Tensor): edge_inv_local = self.grad_local_dist_mlp(h_pair_local) * ( 1.0 / sigma_edge[local_edge_mask] ) # (E_local, 1) else: edge_inv_local = self.grad_local_dist_mlp(h_pair_local) * (1.0 / sigma_edge) # (E_local, 1) if return_edges: return edge_inv_global, edge_inv_local, edge_index, edge_type, edge_length, local_edge_mask else: return edge_inv_global, edge_inv_local def forward( self, sample, timestep: Union[torch.Tensor, float, int], return_dict: bool = True, sigma=1.0, global_start_sigma=0.5, w_global=1.0, extend_order=False, extend_radius=True, clip_local=None, clip_global=1000.0, ) -> Union[MoleculeGNNOutput, Tuple]: r""" Args: sample: packed torch geometric object timestep (`torch.Tensor` or `float` or `int): TODO verify type and shape (batch) timesteps return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.molecule_gnn.MoleculeGNNOutput`] instead of a plain tuple. Returns: [`~models.molecule_gnn.MoleculeGNNOutput`] or `tuple`: [`~models.molecule_gnn.MoleculeGNNOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ # unpack sample atom_type = sample.atom_type bond_index = sample.edge_index bond_type = sample.edge_type num_graphs = sample.num_graphs pos = sample.pos timesteps = torch.full(size=(num_graphs,), fill_value=timestep, dtype=torch.long, device=pos.device) edge_inv_global, edge_inv_local, edge_index, edge_type, edge_length, local_edge_mask = self._forward( atom_type=atom_type, pos=sample.pos, bond_index=bond_index, bond_type=bond_type, batch=sample.batch, time_step=timesteps, return_edges=True, extend_order=extend_order, extend_radius=extend_radius, ) # (E_global, 1), (E_local, 1) # Important equation in the paper for equivariant features - eqns 5-7 of GeoDiff node_eq_local = graph_field_network( edge_inv_local, pos, edge_index[:, local_edge_mask], edge_length[local_edge_mask] ) if clip_local is not None: node_eq_local = clip_norm(node_eq_local, limit=clip_local) # Global if sigma < global_start_sigma: edge_inv_global = edge_inv_global * (1 - local_edge_mask.view(-1, 1).float()) node_eq_global = graph_field_network(edge_inv_global, pos, edge_index, edge_length) node_eq_global = clip_norm(node_eq_global, limit=clip_global) else: node_eq_global = 0 # Sum eps_pos = node_eq_local + node_eq_global * w_global if not return_dict: return (-eps_pos,) return MoleculeGNNOutput(sample=torch.Tensor(-eps_pos).to(pos.device))<jupyter_output><empty_output><jupyter_text>Load pretrained model Load a modelThe model used is a design anequivariant convolutional layer, named graph field network (GFN).The warning about `betas` and `alphas` can be ignored, those were moved to the scheduler.<jupyter_code>DEVICE = "cuda" model = MoleculeGNN.from_pretrained("fusing/gfn-molecule-gen-drugs").to(DEVICE)<jupyter_output><empty_output><jupyter_text>The warnings above are because the pre-trained model was uploaded before cleaning the code! Create schedulerNote, other schedulers are used in the paper for slightly improved performance over DDPM.<jupyter_code>from diffusers import DDPMScheduler num_timesteps = 1000 scheduler = DDPMScheduler( num_train_timesteps=num_timesteps, beta_schedule="sigmoid", beta_start=1e-7, beta_end=2e-3, clip_sample=False )<jupyter_output><empty_output><jupyter_text>Get a dataset Grab a google tool so we can upload our data directly. Note you need to download the data from ***this [file](https://huggingface.co/datasets/fusing/geodiff-example-data/blob/main/data/molecules.pkl)***(direct downloading from the hub does not yet work for this datatype)<jupyter_code># from google.colab import files # uploaded = files.upload()<jupyter_output><empty_output><jupyter_text>Load the dataset with torch.<jupyter_code>import torch !wget https://huggingface.co/datasets/fusing/geodiff-example-data/resolve/main/data/molecules.pkl dataset = torch.load("/content/molecules.pkl")<jupyter_output>--2022-10-12 18:32:19-- https://huggingface.co/datasets/fusing/geodiff-example-data/resolve/main/data/molecules.pkl Resolving huggingface.co (huggingface.co)... 44.195.102.200, 52.5.54.249, 54.210.225.113, ... Connecting to huggingface.co (huggingface.co)|44.195.102.200|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 127774 (125K) [application/octet-stream] Saving to: ‘molecules.pkl’ molecules.pkl 100%[===================>] 124.78K 180KB/s in 0.7s 2022-10-12 18:32:20 (180 KB/s) - ‘molecules.pkl’ saved [127774/127774]<jupyter_text>Print out one entry of the dataset, it contains molecular formulas, atom types, positions, and more.<jupyter_code>dataset[0]<jupyter_output><empty_output><jupyter_text>Run the diffusion process Helper Functions<jupyter_code>import copy import os from torch_geometric.data import Batch, Data from torch_scatter import scatter_mean from tqdm import tqdm def repeat_data(data: Data, num_repeat) -> Batch: datas = [copy.deepcopy(data) for i in range(num_repeat)] return Batch.from_data_list(datas) def repeat_batch(batch: Batch, num_repeat) -> Batch: datas = batch.to_data_list() new_data = [] for i in range(num_repeat): new_data += copy.deepcopy(datas) return Batch.from_data_list(new_data)<jupyter_output><empty_output><jupyter_text>Constants<jupyter_code>num_samples = 1 # solutions per molecule num_molecules = 3 DEVICE = "cuda" sampling_type = "ddpm_noisy" #'' # paper also uses "generalize" and "ld" # constants for inference w_global = 0.5 # 0,.3 for qm9 global_start_sigma = 0.5 eta = 1.0 clip_local = None clip_pos = None # constands for data handling save_traj = False save_data = False output_dir = "/content/"<jupyter_output><empty_output><jupyter_text>Generate samples!Note that the 3d representation of a molecule is referred to as the **conformation**<jupyter_code>import pickle results = [] # define sigmas sigmas = torch.tensor(1.0 - scheduler.alphas_cumprod).sqrt() / torch.tensor(scheduler.alphas_cumprod).sqrt() sigmas = sigmas.to(DEVICE) for count, data in enumerate(tqdm(dataset)): num_samples = max(data.pos_ref.size(0) // data.num_nodes, 1) data_input = data.clone() data_input["pos_ref"] = None batch = repeat_data(data_input, num_samples).to(DEVICE) # initial configuration pos_init = torch.randn(batch.num_nodes, 3).to(DEVICE) # for logging animation of denoising pos_traj = [] with torch.no_grad(): # scale initial sample pos = pos_init * sigmas[-1] for t in scheduler.timesteps: batch.pos = pos # generate geometry with model, then filter it epsilon = model.forward(batch, t, sigma=sigmas[t], return_dict=False)[0] # Update reconstructed_pos = scheduler.step(epsilon, t, pos)["prev_sample"].to(DEVICE) pos = reconstructed_pos if torch.isnan(pos).any(): print("NaN detected. Please restart.") raise FloatingPointError() # recenter graph of positions for next iteration pos = pos - scatter_mean(pos, batch.batch, dim=0)[batch.batch] # optional clipping if clip_pos is not None: pos = torch.clamp(pos, min=-clip_pos, max=clip_pos) pos_traj.append(pos.clone().cpu()) pos_gen = pos.cpu() if save_traj: pos_gen_traj = pos_traj.cpu() data.pos_gen = torch.stack(pos_gen_traj) else: data.pos_gen = pos_gen results.append(data) if save_data: save_path = os.path.join(output_dir, "samples_all.pkl") with open(save_path, "wb") as f: pickle.dump(results, f)<jupyter_output>/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). after removing the cwd from sys.path. 100%|██████████| 5/5 [00:55<00:00, 11.06s/it]<jupyter_text>Render the results! This function allows us to render 3d in colab.<jupyter_code>from google.colab import output output.enable_custom_widget_manager()<jupyter_output><empty_output><jupyter_text>Helper functions Here is a helper function for copying the generated tensors into a format used by RDKit & NGLViewer.<jupyter_code>from copy import deepcopy def set_rdmol_positions(rdkit_mol, pos): """ Args: rdkit_mol: An `rdkit.Chem.rdchem.Mol` object. pos: (N_atoms, 3) """ mol = deepcopy(rdkit_mol) set_rdmol_positions_(mol, pos) return mol def set_rdmol_positions_(mol, pos): """ Args: rdkit_mol: An `rdkit.Chem.rdchem.Mol` object. pos: (N_atoms, 3) """ for i in range(pos.shape[0]): mol.GetConformer(0).SetAtomPosition(i, pos[i].tolist()) return mol<jupyter_output><empty_output><jupyter_text>Process the generated data to make it easy to view.<jupyter_code># the model can generate multiple conformations per 2d geometry num_gen = results[0]["pos_gen"].shape[0] # init storage objects mols_gen = [] mols_orig = [] for to_process in results: # store the reference 3d position to_process["pos_ref"] = to_process["pos_ref"].reshape(-1, to_process["rdmol"].GetNumAtoms(), 3) # store the generated 3d position to_process["pos_gen"] = to_process["pos_gen"].reshape(-1, to_process["rdmol"].GetNumAtoms(), 3) # copy data to new object new_mol = set_rdmol_positions(to_process.rdmol, to_process["pos_gen"][0]) # append results mols_gen.append(new_mol) mols_orig.append(to_process.rdmol) print(f"collect {len(mols_gen)} generated molecules in `mols`")<jupyter_output>collect 5 generated molecules in `mols`<jupyter_text>Import tools to visualize the 2d chemical diagram of the molecule.<jupyter_code>from IPython.display import SVG, display from rdkit import Chem from rdkit.Chem.Draw import rdMolDraw2D as MD2<jupyter_output><empty_output><jupyter_text>Select molecule to visualize<jupyter_code>idx = 0 assert idx < len(results), "selected molecule that was not generated"<jupyter_output><empty_output><jupyter_text>Viewing This 2D rendering is the equivalent of the **input to the model**!<jupyter_code>mc = Chem.MolFromSmiles(dataset[0]["smiles"]) molSize = (450, 300) drawer = MD2.MolDraw2DSVG(molSize[0], molSize[1]) drawer.DrawMolecule(mc) drawer.FinishDrawing() svg = drawer.GetDrawingText() display(SVG(svg.replace("svg:", "")))<jupyter_output><empty_output><jupyter_text>Generate the 3d molecule!<jupyter_code>from nglview import show_rdkit as show # new molecule show(mols_gen[idx])<jupyter_output><empty_output>
diffusers/examples/research_projects/geodiff/geodiff_molecule_conformation.ipynb/0
{ "file_path": "diffusers/examples/research_projects/geodiff/geodiff_molecule_conformation.ipynb", "repo_id": "diffusers", "token_count": 18663 }
144
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import random import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from transformers.utils import ContextManagers import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, deprecate, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "lambdalabs/naruto-blip-captions": ("image", "text"), } def save_model_card( args, repo_id: str, images: list = None, repo_folder: str = None, ): img_str = "" if len(images) > 0: image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" model_description = f""" # Text-to-image finetuning - {repo_id} This pipeline was finetuned from **{args.pretrained_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n {img_str} ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("{repo_id}", torch_dtype=torch.float16) prompt = "{args.validation_prompts[0]}" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Epochs: {args.num_train_epochs} * Learning rate: {args.learning_rate} * Batch size: {args.train_batch_size} * Gradient accumulation steps: {args.gradient_accumulation_steps} * Image resolution: {args.resolution} * Mixed-precision: {args.mixed_precision} """ wandb_info = "" if is_wandb_available(): wandb_run_url = None if wandb.run is not None: wandb_run_url = wandb.run.url if wandb_run_url is not None: wandb_info = f""" More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). """ model_description += wandb_info model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=args.pretrained_model_name_or_path, model_description=model_description, inference=True, ) tags = ["stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers", "diffusers-training"] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=accelerator.unwrap_model(vae), text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, unet=accelerator.unwrap_model(unet), safety_checker=None, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() return images def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="sd-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://huggingface.co/papers/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--loss_type", type=str, default="l2", choices=["l2", "huber", "smooth_l1"], help="The type of loss to use and whether it's timestep-scheduled. See Issue #7488 for more info.", ) parser.add_argument( "--huber_schedule", type=str, default="snr", choices=["constant", "exponential", "snr"], help="The schedule to use for the huber losses parameter", ) parser.add_argument( "--huber_c", type=float, default=0.1, help="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type.", ) parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args # NOTE: if you're using the scheduled version, huber_c has to depend on the timesteps already def conditional_loss( model_pred: torch.Tensor, target: torch.Tensor, reduction: str = "mean", loss_type: str = "l2", huber_c: float = 0.1, ): if loss_type == "l2": loss = F.mse_loss(model_pred, target, reduction=reduction) elif loss_type == "huber": loss = 2 * huber_c * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) elif loss_type == "smooth_l1": loss = 2 * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) else: raise NotImplementedError(f"Unsupported Loss Type {loss_type}") return loss def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. # For this to work properly all models must be run through `accelerate.prepare`. But accelerate # will try to assign the same optimizer with the same weights to all models during # `deepspeed.initialize`, which of course doesn't work. # # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 # frozen models from being partitioned during `zero.Init` which gets called during # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. with ContextManagers(deepspeed_zero_init_disabled_context_manager()): text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision ) # Freeze vae and text_encoder and set unet to trainable vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.train() # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for _ in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return {"pixel_values": pixel_values, "input_ids": input_ids} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 args.mixed_precision = accelerator.mixed_precision elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 args.mixed_precision = accelerator.mixed_precision # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Function for unwrapping if model was compiled with `torch.compile`. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (latents.shape[0], latents.shape[1], 1, 1), device=latents.device ) if args.input_perturbation: new_noise = noise + args.input_perturbation * torch.randn_like(noise) bsz = latents.shape[0] # Sample a random timestep for each image if args.loss_type == "huber" or args.loss_type == "smooth_l1": timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (1,), device="cpu") timestep = timesteps.item() if args.huber_schedule == "exponential": alpha = -math.log(args.huber_c) / noise_scheduler.config.num_train_timesteps huber_c = math.exp(-alpha * timestep) elif args.huber_schedule == "snr": alphas_cumprod = noise_scheduler.alphas_cumprod[timestep] sigmas = ((1.0 - alphas_cumprod) / alphas_cumprod) ** 0.5 huber_c = (1 - args.huber_c) / (1 + sigmas) ** 2 + args.huber_c elif args.huber_schedule == "constant": huber_c = args.huber_c else: raise NotImplementedError(f"Unknown Huber loss schedule {args.huber_schedule}!") timesteps = timesteps.repeat(bsz).to(latents.device) elif args.loss_type == "l2": timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) huber_c = 1 # may be anything, as it's not used else: raise NotImplementedError(f"Unknown loss type {args.loss_type}") timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) if args.input_perturbation: noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps) else: noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"], return_dict=False)[0] # Get the target for loss depending on the prediction type if args.prediction_type is not None: # set prediction_type of scheduler if defined noise_scheduler.register_to_config(prediction_type=args.prediction_type) if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] if args.snr_gamma is None: loss = conditional_loss( model_pred.float(), target.float(), reduction="mean", loss_type=args.loss_type, huber_c=huber_c ) else: # Compute loss-weights as per Section 3.4 of https://huggingface.co/papers/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = conditional_loss( model_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c ) loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, revision=args.revision, variant=args.variant, ) pipeline.save_pretrained(args.output_dir) # Run a final round of inference. images = [] if args.validation_prompts is not None: logger.info("Running inference for collecting generated images...") pipeline = pipeline.to(accelerator.device) pipeline.torch_dtype = weight_dtype pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) if args.push_to_hub: save_model_card(args, repo_id, images, repo_folder=args.output_dir) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image.py/0
{ "file_path": "diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image.py", "repo_id": "diffusers", "token_count": 21062 }
145
import argparse import logging import math import os import random from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import PIL import torch import torch.utils.checkpoint import transformers from flax import jax_utils from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, FlaxPNDMScheduler, FlaxStableDiffusionPipeline, FlaxUNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.36.0.dev0") logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=True, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--use_auth_token", action="store_true", help=( "Will use the token generated when running `hf auth login` (necessary to use this script with" " private models)." ), ) parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng): if model.config.vocab_size == new_num_tokens or new_num_tokens is None: return model.config.vocab_size = new_num_tokens params = model.params old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"] old_num_tokens, emb_dim = old_embeddings.shape initializer = jax.nn.initializers.normal() new_embeddings = initializer(rng, (new_num_tokens, emb_dim)) new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings) new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id]) params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings model.params = params return model def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() if args.seed is not None: set_seed(args.seed) if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae, vae_params = FlaxAutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision ) unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) # Create sampling rng rng = jax.random.PRNGKey(args.seed) rng, _ = jax.random.split(rng) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder = resize_token_embeddings( text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng ) original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"] train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) input_ids = torch.stack([example["input_ids"] for example in examples]) batch = {"pixel_values": pixel_values, "input_ids": input_ids} batch = {k: v.numpy() for k, v in batch.items()} return batch total_train_batch_size = args.train_batch_size * jax.local_device_count() train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn ) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) optimizer = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) def create_mask(params, label_fn): def _map(params, mask, label_fn): for k in params: if label_fn(k): mask[k] = "token_embedding" else: if isinstance(params[k], dict): mask[k] = {} _map(params[k], mask[k], label_fn) else: mask[k] = "zero" mask = {} _map(params, mask, label_fn) return mask def zero_grads(): # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491 def init_fn(_): return () def update_fn(updates, state, params=None): return jax.tree_util.tree_map(jnp.zeros_like, updates), () return optax.GradientTransformation(init_fn, update_fn) # Zero out gradients of layers other than the token embedding layer tx = optax.multi_transform( {"token_embedding": optimizer, "zero": zero_grads()}, create_mask(text_encoder.params, lambda s: s == "token_embedding"), ) state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx) noise_scheduler = FlaxDDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 ) noise_scheduler_state = noise_scheduler.create_state() # Initialize our training train_rngs = jax.random.split(rng, jax.local_device_count()) # Define gradient train step fn def train_step(state, vae_params, unet_params, batch, train_rng): dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) def compute_loss(params): vae_outputs = vae.apply( {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) encoder_hidden_states = state.apply_fn( batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True )[0] # Predict the noise residual and compute loss model_pred = unet.apply( {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = (target - model_pred) ** 2 loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) # Keep the token embeddings fixed except the newly added embeddings for the concept, # as we only want to optimize the concept embeddings token_embeds = original_token_embeds.at[placeholder_token_id].set( new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id] ) new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics, new_train_rng # Create parallel version of the train and eval step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) vae_params = jax_utils.replicate(vae_params) unet_params = jax_utils.replicate(unet_params) # Train! num_update_steps_per_epoch = math.ceil(len(train_dataloader)) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_metrics = [] steps_per_epoch = len(train_dataset) // total_train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_dataloader: batch = shard(batch) state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs) train_metrics.append(train_metric) train_step_progress_bar.update(1) global_step += 1 if global_step >= args.max_train_steps: break if global_step % args.save_steps == 0: learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"][ "embedding" ][placeholder_token_id] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save( os.path.join(args.output_dir, "learned_embeds-" + str(global_step) + ".npy"), learned_embeds_dict ) train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") # Create the pipeline using using the trained modules and save it. if jax.process_index() == 0: scheduler = FlaxPNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", from_pt=True ) pipeline = FlaxStableDiffusionPipeline( text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) pipeline.save_pretrained( args.output_dir, params={ "text_encoder": get_params_to_save(state.params), "vae": get_params_to_save(vae_params), "unet": get_params_to_save(unet_params), "safety_checker": safety_checker.params, }, ) # Also save the newly trained embeddings learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][ placeholder_token_id ] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if __name__ == "__main__": main()
diffusers/examples/textual_inversion/textual_inversion_flax.py/0
{ "file_path": "diffusers/examples/textual_inversion/textual_inversion_flax.py", "repo_id": "diffusers", "token_count": 11073 }
146
import argparse import os import torch from huggingface_hub import create_repo, upload_folder from safetensors.torch import load_file, save_file def convert_motion_module(original_state_dict): converted_state_dict = {} for k, v in original_state_dict.items(): if "pos_encoder" in k: continue else: converted_state_dict[ k.replace(".norms.0", ".norm1") .replace(".norms.1", ".norm2") .replace(".ff_norm", ".norm3") .replace(".attention_blocks.0", ".attn1") .replace(".attention_blocks.1", ".attn2") .replace(".temporal_transformer", "") ] = v return converted_state_dict def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, required=True, help="Path to checkpoint") parser.add_argument("--output_path", type=str, required=True, help="Path to output directory") parser.add_argument( "--push_to_hub", action="store_true", default=False, help="Whether to push the converted model to the HF or not", ) return parser.parse_args() if __name__ == "__main__": args = get_args() if args.ckpt_path.endswith(".safetensors"): state_dict = load_file(args.ckpt_path) else: state_dict = torch.load(args.ckpt_path, map_location="cpu") if "state_dict" in state_dict.keys(): state_dict = state_dict["state_dict"] conv_state_dict = convert_motion_module(state_dict) # convert to new format output_dict = {} for module_name, params in conv_state_dict.items(): if type(params) is not torch.Tensor: continue output_dict.update({f"unet.{module_name}": params}) os.makedirs(args.output_path, exist_ok=True) filepath = os.path.join(args.output_path, "diffusion_pytorch_model.safetensors") save_file(output_dict, filepath) if args.push_to_hub: repo_id = create_repo(args.output_path, exist_ok=True).repo_id upload_folder(repo_id=repo_id, folder_path=args.output_path, repo_type="model")
diffusers/scripts/convert_animatediff_motion_lora_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_animatediff_motion_lora_to_diffusers.py", "repo_id": "diffusers", "token_count": 931 }
147
# Script for converting a Hugging Face Diffusers trained SDXL LoRAs to Kohya format # This means that you can input your diffusers-trained LoRAs and # Get the output to work with WebUIs such as AUTOMATIC1111, ComfyUI, SD.Next and others. # To get started you can find some cool `diffusers` trained LoRAs such as this cute Corgy # https://huggingface.co/ignasbud/corgy_dog_LoRA/, download its `pytorch_lora_weights.safetensors` file # and run the script: # python convert_diffusers_sdxl_lora_to_webui.py --input_lora pytorch_lora_weights.safetensors --output_lora corgy.safetensors # now you can use corgy.safetensors in your WebUI of choice! # To train your own, here are some diffusers training scripts and utils that you can use and then convert: # LoRA Ease - no code SDXL Dreambooth LoRA trainer: https://huggingface.co/spaces/multimodalart/lora-ease # Dreambooth Advanced Training Script - state of the art techniques such as pivotal tuning and prodigy optimizer: # - Script: https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py # - Colab (only on Pro): https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb # Canonical diffusers training scripts: # - Script: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_sdxl.py # - Colab (runs on free tier): https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_DreamBooth_LoRA_.ipynb import argparse import os from safetensors.torch import load_file, save_file from diffusers.utils import convert_all_state_dict_to_peft, convert_state_dict_to_kohya def convert_and_save(input_lora, output_lora=None): if output_lora is None: base_name = os.path.splitext(input_lora)[0] output_lora = f"{base_name}_webui.safetensors" diffusers_state_dict = load_file(input_lora) peft_state_dict = convert_all_state_dict_to_peft(diffusers_state_dict) kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict) save_file(kohya_state_dict, output_lora) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Convert LoRA model to PEFT and then to Kohya format.") parser.add_argument( "--input_lora", type=str, required=True, help="Path to the input LoRA model file in the diffusers format.", ) parser.add_argument( "--output_lora", type=str, required=False, help="Path for the converted LoRA (safetensors format for AUTOMATIC1111, ComfyUI, etc.). Optional, defaults to input name with a _webui suffix.", ) args = parser.parse_args() convert_and_save(args.input_lora, args.output_lora)
diffusers/scripts/convert_diffusers_sdxl_lora_to_webui.py/0
{ "file_path": "diffusers/scripts/convert_diffusers_sdxl_lora_to_webui.py", "repo_id": "diffusers", "token_count": 1027 }
148
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the LDM checkpoints.""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNet2DModel, VQModel def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("proj_out.weight", "proj_attn.weight") new_item = new_item.replace("proj_out.bias", "proj_attn.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] else: checkpoint[new_path] = old_checkpoint[path["old"]] def convert_ldm_checkpoint(checkpoint, config): """ Takes a state dict and a config, and returns a converted checkpoint. """ new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["time_embed.2.bias"] new_checkpoint["conv_in.weight"] = checkpoint["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = checkpoint["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = checkpoint["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = checkpoint["out.0.bias"] new_checkpoint["conv_out.weight"] = checkpoint["out.2.weight"] new_checkpoint["conv_out.bias"] = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["num_res_blocks"] + 1) layer_in_block_id = (i - 1) % (config["num_res_blocks"] + 1) resnets = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in checkpoint: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = checkpoint[ f"input_blocks.{i}.0.op.weight" ] new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = checkpoint[ f"input_blocks.{i}.0.op.bias" ] continue paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} resnet_op = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path, resnet_op], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}", } to_split = { f"input_blocks.{i}.1.qkv.bias": { "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"input_blocks.{i}.1.qkv.weight": { "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], attention_paths_to_split=to_split, config=config, ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, checkpoint, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, checkpoint, config=config) attentions_paths = renew_attention_paths(attentions) to_split = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( attentions_paths, new_checkpoint, checkpoint, attention_paths_to_split=to_split, config=config ) for i in range(num_output_blocks): block_id = i // (config["num_res_blocks"] + 1) layer_in_block_id = i % (config["num_res_blocks"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], config=config) if ["conv.weight", "conv.bias"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.weight", "conv.bias"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } to_split = { f"output_blocks.{i}.1.qkv.bias": { "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"output_blocks.{i}.1.qkv.weight": { "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], attention_paths_to_split=to_split if any("qkv" in key for key in attentions) else None, config=config, ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() checkpoint = torch.load(args.checkpoint_path) with open(args.config_file) as f: config = json.loads(f.read()) converted_checkpoint = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] model = UNet2DModel(**config) model.load_state_dict(converted_checkpoint) try: scheduler = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) vqvae = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) pipe = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py", "repo_id": "diffusers", "token_count": 6854 }
149
import argparse import os import torch from transformers import T5EncoderModel, T5Tokenizer from diffusers import AutoencoderKL, DPMSolverMultistepScheduler, PixArtAlphaPipeline, Transformer2DModel ckpt_id = "PixArt-alpha/PixArt-alpha" # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/scripts/inference.py#L125 interpolation_scale = {256: 0.5, 512: 1, 1024: 2} def main(args): all_state_dict = torch.load(args.orig_ckpt_path, map_location="cpu") state_dict = all_state_dict.pop("state_dict") converted_state_dict = {} # Patch embeddings. converted_state_dict["pos_embed.proj.weight"] = state_dict.pop("x_embedder.proj.weight") converted_state_dict["pos_embed.proj.bias"] = state_dict.pop("x_embedder.proj.bias") # Caption projection. converted_state_dict["caption_projection.linear_1.weight"] = state_dict.pop("y_embedder.y_proj.fc1.weight") converted_state_dict["caption_projection.linear_1.bias"] = state_dict.pop("y_embedder.y_proj.fc1.bias") converted_state_dict["caption_projection.linear_2.weight"] = state_dict.pop("y_embedder.y_proj.fc2.weight") converted_state_dict["caption_projection.linear_2.bias"] = state_dict.pop("y_embedder.y_proj.fc2.bias") # AdaLN-single LN converted_state_dict["adaln_single.emb.timestep_embedder.linear_1.weight"] = state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["adaln_single.emb.timestep_embedder.linear_1.bias"] = state_dict.pop("t_embedder.mlp.0.bias") converted_state_dict["adaln_single.emb.timestep_embedder.linear_2.weight"] = state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["adaln_single.emb.timestep_embedder.linear_2.bias"] = state_dict.pop("t_embedder.mlp.2.bias") if args.image_size == 1024: # Resolution. converted_state_dict["adaln_single.emb.resolution_embedder.linear_1.weight"] = state_dict.pop( "csize_embedder.mlp.0.weight" ) converted_state_dict["adaln_single.emb.resolution_embedder.linear_1.bias"] = state_dict.pop( "csize_embedder.mlp.0.bias" ) converted_state_dict["adaln_single.emb.resolution_embedder.linear_2.weight"] = state_dict.pop( "csize_embedder.mlp.2.weight" ) converted_state_dict["adaln_single.emb.resolution_embedder.linear_2.bias"] = state_dict.pop( "csize_embedder.mlp.2.bias" ) # Aspect ratio. converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_1.weight"] = state_dict.pop( "ar_embedder.mlp.0.weight" ) converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_1.bias"] = state_dict.pop( "ar_embedder.mlp.0.bias" ) converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_2.weight"] = state_dict.pop( "ar_embedder.mlp.2.weight" ) converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_2.bias"] = state_dict.pop( "ar_embedder.mlp.2.bias" ) # Shared norm. converted_state_dict["adaln_single.linear.weight"] = state_dict.pop("t_block.1.weight") converted_state_dict["adaln_single.linear.bias"] = state_dict.pop("t_block.1.bias") for depth in range(28): # Transformer blocks. converted_state_dict[f"transformer_blocks.{depth}.scale_shift_table"] = state_dict.pop( f"blocks.{depth}.scale_shift_table" ) # Attention is all you need 🤘 # Self attention. q, k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.attn.qkv.weight"), 3, dim=0) q_bias, k_bias, v_bias = torch.chunk(state_dict.pop(f"blocks.{depth}.attn.qkv.bias"), 3, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn1.to_q.bias"] = q_bias converted_state_dict[f"transformer_blocks.{depth}.attn1.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn1.to_k.bias"] = k_bias converted_state_dict[f"transformer_blocks.{depth}.attn1.to_v.weight"] = v converted_state_dict[f"transformer_blocks.{depth}.attn1.to_v.bias"] = v_bias # Projection. converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.attn.proj.bias" ) # Feed-forward. converted_state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.weight"] = state_dict.pop( f"blocks.{depth}.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.bias"] = state_dict.pop( f"blocks.{depth}.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.net.2.weight"] = state_dict.pop( f"blocks.{depth}.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.net.2.bias"] = state_dict.pop( f"blocks.{depth}.mlp.fc2.bias" ) # Cross-attention. q = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.weight") q_bias = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.bias") k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.weight"), 2, dim=0) k_bias, v_bias = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.bias"), 2, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.bias"] = q_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.bias"] = k_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.weight"] = v converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.bias"] = v_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.bias" ) # Final block. converted_state_dict["proj_out.weight"] = state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = state_dict.pop("final_layer.linear.bias") converted_state_dict["scale_shift_table"] = state_dict.pop("final_layer.scale_shift_table") # DiT XL/2 transformer = Transformer2DModel( sample_size=args.image_size // 8, num_layers=28, attention_head_dim=72, in_channels=4, out_channels=8, patch_size=2, attention_bias=True, num_attention_heads=16, cross_attention_dim=1152, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, caption_channels=4096, ) transformer.load_state_dict(converted_state_dict, strict=True) assert transformer.pos_embed.pos_embed is not None state_dict.pop("pos_embed") state_dict.pop("y_embedder.y_embedding") assert len(state_dict) == 0, f"State dict is not empty, {state_dict.keys()}" num_model_params = sum(p.numel() for p in transformer.parameters()) print(f"Total number of transformer parameters: {num_model_params}") if args.only_transformer: transformer.save_pretrained(os.path.join(args.dump_path, "transformer")) else: scheduler = DPMSolverMultistepScheduler() vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="sd-vae-ft-ema") tokenizer = T5Tokenizer.from_pretrained(ckpt_id, subfolder="t5-v1_1-xxl") text_encoder = T5EncoderModel.from_pretrained(ckpt_id, subfolder="t5-v1_1-xxl") pipeline = PixArtAlphaPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler ) pipeline.save_pretrained(args.dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--orig_ckpt_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--image_size", default=1024, type=int, choices=[256, 512, 1024], required=False, help="Image size of pretrained model, either 512 or 1024.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output pipeline.") parser.add_argument("--only_transformer", default=True, type=bool, required=True) args = parser.parse_args() main(args)
diffusers/scripts/convert_pixart_alpha_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_pixart_alpha_to_diffusers.py", "repo_id": "diffusers", "token_count": 4082 }
150
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) args = parser.parse_args() txt2img = UnCLIPPipeline.from_pretrained(args.txt2img_unclip) feature_extractor = CLIPImageProcessor() image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") img2img = UnCLIPImageVariationPipeline( decoder=txt2img.decoder, text_encoder=txt2img.text_encoder, tokenizer=txt2img.tokenizer, text_proj=txt2img.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txt2img.super_res_first, super_res_last=txt2img.super_res_last, decoder_scheduler=txt2img.decoder_scheduler, super_res_scheduler=txt2img.super_res_scheduler, ) img2img.save_pretrained(args.dump_path)
diffusers/scripts/convert_unclip_txt2img_to_image_variation.py/0
{ "file_path": "diffusers/scripts/convert_unclip_txt2img_to_image_variation.py", "repo_id": "diffusers", "token_count": 554 }
151
#!/usr/bin/env python # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from .custom_blocks import CustomBlocksCommand from .env import EnvironmentCommand from .fp16_safetensors import FP16SafetensorsCommand def main(): parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]") commands_parser = parser.add_subparsers(help="diffusers-cli command helpers") # Register commands EnvironmentCommand.register_subcommand(commands_parser) FP16SafetensorsCommand.register_subcommand(commands_parser) CustomBlocksCommand.register_subcommand(commands_parser) # Let's go args = parser.parse_args() if not hasattr(args, "func"): parser.print_help() exit(1) # Run service = args.func(args) service.run() if __name__ == "__main__": main()
diffusers/src/diffusers/commands/diffusers_cli.py/0
{ "file_path": "diffusers/src/diffusers/commands/diffusers_cli.py", "repo_id": "diffusers", "token_count": 439 }
152
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import torch from huggingface_hub.utils import validate_hf_hub_args from typing_extensions import Self from ..configuration_utils import ConfigMixin from ..utils import PushToHubMixin, get_logger if TYPE_CHECKING: from ..modular_pipelines.modular_pipeline import BlockState GUIDER_CONFIG_NAME = "guider_config.json" logger = get_logger(__name__) # pylint: disable=invalid-name class BaseGuidance(ConfigMixin, PushToHubMixin): r"""Base class providing the skeleton for implementing guidance techniques.""" config_name = GUIDER_CONFIG_NAME _input_predictions = None _identifier_key = "__guidance_identifier__" def __init__(self, start: float = 0.0, stop: float = 1.0): self._start = start self._stop = stop self._step: int = None self._num_inference_steps: int = None self._timestep: torch.LongTensor = None self._count_prepared = 0 self._input_fields: Dict[str, Union[str, Tuple[str, str]]] = None self._enabled = True if not (0.0 <= start < 1.0): raise ValueError(f"Expected `start` to be between 0.0 and 1.0, but got {start}.") if not (start <= stop <= 1.0): raise ValueError(f"Expected `stop` to be between {start} and 1.0, but got {stop}.") if self._input_predictions is None or not isinstance(self._input_predictions, list): raise ValueError( "`_input_predictions` must be a list of required prediction names for the guidance technique." ) def disable(self): self._enabled = False def enable(self): self._enabled = True def set_state(self, step: int, num_inference_steps: int, timestep: torch.LongTensor) -> None: self._step = step self._num_inference_steps = num_inference_steps self._timestep = timestep self._count_prepared = 0 def set_input_fields(self, **kwargs: Dict[str, Union[str, Tuple[str, str]]]) -> None: """ Set the input fields for the guidance technique. The input fields are used to specify the names of the returned attributes containing the prepared data after `prepare_inputs` is called. The prepared data is obtained from the values of the provided keyword arguments to this method. Args: **kwargs (`Dict[str, Union[str, Tuple[str, str]]]`): A dictionary where the keys are the names of the fields that will be used to store the data once it is prepared with `prepare_inputs`. The values can be either a string or a tuple of length 2, which is used to look up the required data provided for preparation. If a string is provided, it will be used as the conditional data (or unconditional if used with a guidance method that requires it). If a tuple of length 2 is provided, the first element must be the conditional data identifier and the second element must be the unconditional data identifier or None. Example: ``` data = {"prompt_embeds": <some tensor>, "negative_prompt_embeds": <some tensor>, "latents": <some tensor>} BaseGuidance.set_input_fields( latents="latents", prompt_embeds=("prompt_embeds", "negative_prompt_embeds"), ) ``` """ for key, value in kwargs.items(): is_string = isinstance(value, str) is_tuple_of_str_with_len_2 = ( isinstance(value, tuple) and len(value) == 2 and all(isinstance(v, str) for v in value) ) if not (is_string or is_tuple_of_str_with_len_2): raise ValueError( f"Expected `set_input_fields` to be called with a string or a tuple of string with length 2, but got {type(value)} for key {key}." ) self._input_fields = kwargs def prepare_models(self, denoiser: torch.nn.Module) -> None: """ Prepares the models for the guidance technique on a given batch of data. This method should be overridden in subclasses to implement specific model preparation logic. """ self._count_prepared += 1 def cleanup_models(self, denoiser: torch.nn.Module) -> None: """ Cleans up the models for the guidance technique after a given batch of data. This method should be overridden in subclasses to implement specific model cleanup logic. It is useful for removing any hooks or other stateful modifications made during `prepare_models`. """ pass def prepare_inputs(self, data: "BlockState") -> List["BlockState"]: raise NotImplementedError("BaseGuidance::prepare_inputs must be implemented in subclasses.") def __call__(self, data: List["BlockState"]) -> Any: if not all(hasattr(d, "noise_pred") for d in data): raise ValueError("Expected all data to have `noise_pred` attribute.") if len(data) != self.num_conditions: raise ValueError( f"Expected {self.num_conditions} data items, but got {len(data)}. Please check the input data." ) forward_inputs = {getattr(d, self._identifier_key): d.noise_pred for d in data} return self.forward(**forward_inputs) def forward(self, *args, **kwargs) -> Any: raise NotImplementedError("BaseGuidance::forward must be implemented in subclasses.") @property def is_conditional(self) -> bool: raise NotImplementedError("BaseGuidance::is_conditional must be implemented in subclasses.") @property def is_unconditional(self) -> bool: return not self.is_conditional @property def num_conditions(self) -> int: raise NotImplementedError("BaseGuidance::num_conditions must be implemented in subclasses.") @classmethod def _prepare_batch( cls, input_fields: Dict[str, Union[str, Tuple[str, str]]], data: "BlockState", tuple_index: int, identifier: str, ) -> "BlockState": """ Prepares a batch of data for the guidance technique. This method is used in the `prepare_inputs` method of the `BaseGuidance` class. It prepares the batch based on the provided tuple index. Args: input_fields (`Dict[str, Union[str, Tuple[str, str]]]`): A dictionary where the keys are the names of the fields that will be used to store the data once it is prepared with `prepare_inputs`. The values can be either a string or a tuple of length 2, which is used to look up the required data provided for preparation. If a string is provided, it will be used as the conditional data (or unconditional if used with a guidance method that requires it). If a tuple of length 2 is provided, the first element must be the conditional data identifier and the second element must be the unconditional data identifier or None. data (`BlockState`): The input data to be prepared. tuple_index (`int`): The index to use when accessing input fields that are tuples. Returns: `BlockState`: The prepared batch of data. """ from ..modular_pipelines.modular_pipeline import BlockState if input_fields is None: raise ValueError( "Input fields cannot be None. Please pass `input_fields` to `prepare_inputs` or call `set_input_fields` before preparing inputs." ) data_batch = {} for key, value in input_fields.items(): try: if isinstance(value, str): data_batch[key] = getattr(data, value) elif isinstance(value, tuple): data_batch[key] = getattr(data, value[tuple_index]) else: # We've already checked that value is a string or a tuple of strings with length 2 pass except AttributeError: logger.debug(f"`data` does not have attribute(s) {value}, skipping.") data_batch[cls._identifier_key] = identifier return BlockState(**data_batch) @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, subfolder: Optional[str] = None, return_unused_kwargs=False, **kwargs, ) -> Self: r""" Instantiate a guider from a pre-defined JSON configuration file in a local directory or Hub repository. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the guider configuration saved with [`~BaseGuidance.save_pretrained`]. subfolder (`str`, *optional*): The subfolder location of a model file within a larger model repository on the Hub or locally. return_unused_kwargs (`bool`, *optional*, defaults to `False`): Whether kwargs that are not consumed by the Python class should be returned or not. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. <Tip> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf auth login`. You can also activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> """ config, kwargs, commit_hash = cls.load_config( pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, return_unused_kwargs=True, return_commit_hash=True, **kwargs, ) return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a guider configuration object to a directory so that it can be reloaded using the [`~BaseGuidance.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg
diffusers/src/diffusers/guiders/guider_utils.py/0
{ "file_path": "diffusers/src/diffusers/guiders/guider_utils.py", "repo_id": "diffusers", "token_count": 5937 }
153
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ._common import _ALL_TRANSFORMER_BLOCK_IDENTIFIERS, _ATTENTION_CLASSES, _FEEDFORWARD_CLASSES def _get_identifiable_transformer_blocks_in_module(module: torch.nn.Module): module_list_with_transformer_blocks = [] for name, submodule in module.named_modules(): name_endswith_identifier = any(name.endswith(identifier) for identifier in _ALL_TRANSFORMER_BLOCK_IDENTIFIERS) is_modulelist = isinstance(submodule, torch.nn.ModuleList) if name_endswith_identifier and is_modulelist: module_list_with_transformer_blocks.append((name, submodule)) return module_list_with_transformer_blocks def _get_identifiable_attention_layers_in_module(module: torch.nn.Module): attention_layers = [] for name, submodule in module.named_modules(): if isinstance(submodule, _ATTENTION_CLASSES): attention_layers.append((name, submodule)) return attention_layers def _get_identifiable_feedforward_layers_in_module(module: torch.nn.Module): feedforward_layers = [] for name, submodule in module.named_modules(): if isinstance(submodule, _FEEDFORWARD_CLASSES): feedforward_layers.append((name, submodule)) return feedforward_layers
diffusers/src/diffusers/hooks/utils.py/0
{ "file_path": "diffusers/src/diffusers/hooks/utils.py", "repo_id": "diffusers", "token_count": 617 }
154
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict import torch class AttnProcsLayers(torch.nn.Module): def __init__(self, state_dict: Dict[str, torch.Tensor]): super().__init__() self.layers = torch.nn.ModuleList(state_dict.values()) self.mapping = dict(enumerate(state_dict.keys())) self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} # .processor for unet, .self_attn for text encoder self.split_keys = [".processor", ".self_attn"] # we add a hook to state_dict() and load_state_dict() so that the # naming fits with `unet.attn_processors` def map_to(module, state_dict, *args, **kwargs): new_state_dict = {} for key, value in state_dict.items(): num = int(key.split(".")[1]) # 0 is always "layers" new_key = key.replace(f"layers.{num}", module.mapping[num]) new_state_dict[new_key] = value return new_state_dict def remap_key(key, state_dict): for k in self.split_keys: if k in key: return key.split(k)[0] + k raise ValueError( f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}." ) def map_from(module, state_dict, *args, **kwargs): all_keys = list(state_dict.keys()) for key in all_keys: replace_key = remap_key(key, state_dict) new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}") state_dict[new_key] = state_dict[key] del state_dict[key] self._register_state_dict_hook(map_to) self._register_load_state_dict_pre_hook(map_from, with_module=True)
diffusers/src/diffusers/loaders/utils.py/0
{ "file_path": "diffusers/src/diffusers/loaders/utils.py", "repo_id": "diffusers", "token_count": 1031 }
155
# Copyright 2025 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import deprecate, logging from .controlnets.controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SD3ControlNetOutput(SD3ControlNetOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SD3ControlNetOutput` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetOutput`, instead." deprecate("diffusers.models.controlnet_sd3.SD3ControlNetOutput", "0.34", deprecation_message) super().__init__(*args, **kwargs) class SD3ControlNetModel(SD3ControlNetModel): def __init__( self, sample_size: int = 128, patch_size: int = 2, in_channels: int = 16, num_layers: int = 18, attention_head_dim: int = 64, num_attention_heads: int = 18, joint_attention_dim: int = 4096, caption_projection_dim: int = 1152, pooled_projection_dim: int = 2048, out_channels: int = 16, pos_embed_max_size: int = 96, extra_conditioning_channels: int = 0, ): deprecation_message = "Importing `SD3ControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetModel`, instead." deprecate("diffusers.models.controlnet_sd3.SD3ControlNetModel", "0.34", deprecation_message) super().__init__( sample_size=sample_size, patch_size=patch_size, in_channels=in_channels, num_layers=num_layers, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads, joint_attention_dim=joint_attention_dim, caption_projection_dim=caption_projection_dim, pooled_projection_dim=pooled_projection_dim, out_channels=out_channels, pos_embed_max_size=pos_embed_max_size, extra_conditioning_channels=extra_conditioning_channels, ) class SD3MultiControlNetModel(SD3MultiControlNetModel): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SD3MultiControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3MultiControlNetModel`, instead." deprecate("diffusers.models.controlnet_sd3.SD3MultiControlNetModel", "0.34", deprecation_message) super().__init__(*args, **kwargs)
diffusers/src/diffusers/models/controlnet_sd3.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnet_sd3.py", "repo_id": "diffusers", "token_count": 1263 }
156
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import nn from ..utils import deprecate from .activations import FP32SiLU, get_activation from .attention_processor import Attention def get_timestep_embedding( timesteps: torch.Tensor, embedding_dim: int, flip_sin_to_cos: bool = False, downscale_freq_shift: float = 1, scale: float = 1, max_period: int = 10000, ) -> torch.Tensor: """ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. Args timesteps (torch.Tensor): a 1-D Tensor of N indices, one per batch element. These may be fractional. embedding_dim (int): the dimension of the output. flip_sin_to_cos (bool): Whether the embedding order should be `cos, sin` (if True) or `sin, cos` (if False) downscale_freq_shift (float): Controls the delta between frequencies between dimensions scale (float): Scaling factor applied to the embeddings. max_period (int): Controls the maximum frequency of the embeddings Returns torch.Tensor: an [N x dim] Tensor of positional embeddings. """ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" half_dim = embedding_dim // 2 exponent = -math.log(max_period) * torch.arange( start=0, end=half_dim, dtype=torch.float32, device=timesteps.device ) exponent = exponent / (half_dim - downscale_freq_shift) emb = torch.exp(exponent) emb = timesteps[:, None].float() * emb[None, :] # scale embeddings emb = scale * emb # concat sine and cosine embeddings emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # flip sine and cosine embeddings if flip_sin_to_cos: emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) # zero pad if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb def get_3d_sincos_pos_embed( embed_dim: int, spatial_size: Union[int, Tuple[int, int]], temporal_size: int, spatial_interpolation_scale: float = 1.0, temporal_interpolation_scale: float = 1.0, device: Optional[torch.device] = None, output_type: str = "np", ) -> torch.Tensor: r""" Creates 3D sinusoidal positional embeddings. Args: embed_dim (`int`): The embedding dimension of inputs. It must be divisible by 16. spatial_size (`int` or `Tuple[int, int]`): The spatial dimension of positional embeddings. If an integer is provided, the same size is applied to both spatial dimensions (height and width). temporal_size (`int`): The temporal dimension of positional embeddings (number of frames). spatial_interpolation_scale (`float`, defaults to 1.0): Scale factor for spatial grid interpolation. temporal_interpolation_scale (`float`, defaults to 1.0): Scale factor for temporal grid interpolation. Returns: `torch.Tensor`: The 3D sinusoidal positional embeddings of shape `[temporal_size, spatial_size[0] * spatial_size[1], embed_dim]`. """ if output_type == "np": return _get_3d_sincos_pos_embed_np( embed_dim=embed_dim, spatial_size=spatial_size, temporal_size=temporal_size, spatial_interpolation_scale=spatial_interpolation_scale, temporal_interpolation_scale=temporal_interpolation_scale, ) if embed_dim % 4 != 0: raise ValueError("`embed_dim` must be divisible by 4") if isinstance(spatial_size, int): spatial_size = (spatial_size, spatial_size) embed_dim_spatial = 3 * embed_dim // 4 embed_dim_temporal = embed_dim // 4 # 1. Spatial grid_h = torch.arange(spatial_size[1], device=device, dtype=torch.float32) / spatial_interpolation_scale grid_w = torch.arange(spatial_size[0], device=device, dtype=torch.float32) / spatial_interpolation_scale grid = torch.meshgrid(grid_w, grid_h, indexing="xy") # here w goes first grid = torch.stack(grid, dim=0) grid = grid.reshape([2, 1, spatial_size[1], spatial_size[0]]) pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(embed_dim_spatial, grid, output_type="pt") # 2. Temporal grid_t = torch.arange(temporal_size, device=device, dtype=torch.float32) / temporal_interpolation_scale pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(embed_dim_temporal, grid_t, output_type="pt") # 3. Concat pos_embed_spatial = pos_embed_spatial[None, :, :] pos_embed_spatial = pos_embed_spatial.repeat_interleave( temporal_size, dim=0, output_size=pos_embed_spatial.shape[0] * temporal_size ) # [T, H*W, D // 4 * 3] pos_embed_temporal = pos_embed_temporal[:, None, :] pos_embed_temporal = pos_embed_temporal.repeat_interleave( spatial_size[0] * spatial_size[1], dim=1 ) # [T, H*W, D // 4] pos_embed = torch.concat([pos_embed_temporal, pos_embed_spatial], dim=-1) # [T, H*W, D] return pos_embed def _get_3d_sincos_pos_embed_np( embed_dim: int, spatial_size: Union[int, Tuple[int, int]], temporal_size: int, spatial_interpolation_scale: float = 1.0, temporal_interpolation_scale: float = 1.0, ) -> np.ndarray: r""" Creates 3D sinusoidal positional embeddings. Args: embed_dim (`int`): The embedding dimension of inputs. It must be divisible by 16. spatial_size (`int` or `Tuple[int, int]`): The spatial dimension of positional embeddings. If an integer is provided, the same size is applied to both spatial dimensions (height and width). temporal_size (`int`): The temporal dimension of positional embeddings (number of frames). spatial_interpolation_scale (`float`, defaults to 1.0): Scale factor for spatial grid interpolation. temporal_interpolation_scale (`float`, defaults to 1.0): Scale factor for temporal grid interpolation. Returns: `np.ndarray`: The 3D sinusoidal positional embeddings of shape `[temporal_size, spatial_size[0] * spatial_size[1], embed_dim]`. """ deprecation_message = ( "`get_3d_sincos_pos_embed` uses `torch` and supports `device`." " `from_numpy` is no longer required." " Pass `output_type='pt' to use the new version now." ) deprecate("output_type=='np'", "0.33.0", deprecation_message, standard_warn=False) if embed_dim % 4 != 0: raise ValueError("`embed_dim` must be divisible by 4") if isinstance(spatial_size, int): spatial_size = (spatial_size, spatial_size) embed_dim_spatial = 3 * embed_dim // 4 embed_dim_temporal = embed_dim // 4 # 1. Spatial grid_h = np.arange(spatial_size[1], dtype=np.float32) / spatial_interpolation_scale grid_w = np.arange(spatial_size[0], dtype=np.float32) / spatial_interpolation_scale grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, spatial_size[1], spatial_size[0]]) pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(embed_dim_spatial, grid) # 2. Temporal grid_t = np.arange(temporal_size, dtype=np.float32) / temporal_interpolation_scale pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(embed_dim_temporal, grid_t) # 3. Concat pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :] pos_embed_spatial = np.repeat(pos_embed_spatial, temporal_size, axis=0) # [T, H*W, D // 4 * 3] pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :] pos_embed_temporal = np.repeat(pos_embed_temporal, spatial_size[0] * spatial_size[1], axis=1) # [T, H*W, D // 4] pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1) # [T, H*W, D] return pos_embed def get_2d_sincos_pos_embed( embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16, device: Optional[torch.device] = None, output_type: str = "np", ): """ Creates 2D sinusoidal positional embeddings. Args: embed_dim (`int`): The embedding dimension. grid_size (`int`): The size of the grid height and width. cls_token (`bool`, defaults to `False`): Whether or not to add a classification token. extra_tokens (`int`, defaults to `0`): The number of extra tokens to add. interpolation_scale (`float`, defaults to `1.0`): The scale of the interpolation. Returns: pos_embed (`torch.Tensor`): Shape is either `[grid_size * grid_size, embed_dim]` if not using cls_token, or `[1 + grid_size*grid_size, embed_dim]` if using cls_token """ if output_type == "np": deprecation_message = ( "`get_2d_sincos_pos_embed` uses `torch` and supports `device`." " `from_numpy` is no longer required." " Pass `output_type='pt' to use the new version now." ) deprecate("output_type=='np'", "0.33.0", deprecation_message, standard_warn=False) return get_2d_sincos_pos_embed_np( embed_dim=embed_dim, grid_size=grid_size, cls_token=cls_token, extra_tokens=extra_tokens, interpolation_scale=interpolation_scale, base_size=base_size, ) if isinstance(grid_size, int): grid_size = (grid_size, grid_size) grid_h = ( torch.arange(grid_size[0], device=device, dtype=torch.float32) / (grid_size[0] / base_size) / interpolation_scale ) grid_w = ( torch.arange(grid_size[1], device=device, dtype=torch.float32) / (grid_size[1] / base_size) / interpolation_scale ) grid = torch.meshgrid(grid_w, grid_h, indexing="xy") # here w goes first grid = torch.stack(grid, dim=0) grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid, output_type=output_type) if cls_token and extra_tokens > 0: pos_embed = torch.concat([torch.zeros([extra_tokens, embed_dim]), pos_embed], dim=0) return pos_embed def get_2d_sincos_pos_embed_from_grid(embed_dim, grid, output_type="np"): r""" This function generates 2D sinusoidal positional embeddings from a grid. Args: embed_dim (`int`): The embedding dimension. grid (`torch.Tensor`): Grid of positions with shape `(H * W,)`. Returns: `torch.Tensor`: The 2D sinusoidal positional embeddings with shape `(H * W, embed_dim)` """ if output_type == "np": deprecation_message = ( "`get_2d_sincos_pos_embed_from_grid` uses `torch` and supports `device`." " `from_numpy` is no longer required." " Pass `output_type='pt' to use the new version now." ) deprecate("output_type=='np'", "0.33.0", deprecation_message, standard_warn=False) return get_2d_sincos_pos_embed_from_grid_np( embed_dim=embed_dim, grid=grid, ) if embed_dim % 2 != 0: raise ValueError("embed_dim must be divisible by 2") # use half of dimensions to encode grid_h emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0], output_type=output_type) # (H*W, D/2) emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1], output_type=output_type) # (H*W, D/2) emb = torch.concat([emb_h, emb_w], dim=1) # (H*W, D) return emb def get_1d_sincos_pos_embed_from_grid(embed_dim, pos, output_type="np", flip_sin_to_cos=False): """ This function generates 1D positional embeddings from a grid. Args: embed_dim (`int`): The embedding dimension `D` pos (`torch.Tensor`): 1D tensor of positions with shape `(M,)` Returns: `torch.Tensor`: Sinusoidal positional embeddings of shape `(M, D)`. """ if output_type == "np": deprecation_message = ( "`get_1d_sincos_pos_embed_from_grid` uses `torch` and supports `device`." " `from_numpy` is no longer required." " Pass `output_type='pt' to use the new version now." ) deprecate("output_type=='np'", "0.34.0", deprecation_message, standard_warn=False) return get_1d_sincos_pos_embed_from_grid_np(embed_dim=embed_dim, pos=pos) if embed_dim % 2 != 0: raise ValueError("embed_dim must be divisible by 2") omega = torch.arange(embed_dim // 2, device=pos.device, dtype=torch.float64) omega /= embed_dim / 2.0 omega = 1.0 / 10000**omega # (D/2,) pos = pos.reshape(-1) # (M,) out = torch.outer(pos, omega) # (M, D/2), outer product emb_sin = torch.sin(out) # (M, D/2) emb_cos = torch.cos(out) # (M, D/2) emb = torch.concat([emb_sin, emb_cos], dim=1) # (M, D) # flip sine and cosine embeddings if flip_sin_to_cos: emb = torch.cat([emb[:, embed_dim // 2 :], emb[:, : embed_dim // 2]], dim=1) return emb def get_2d_sincos_pos_embed_np( embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16 ): """ Creates 2D sinusoidal positional embeddings. Args: embed_dim (`int`): The embedding dimension. grid_size (`int`): The size of the grid height and width. cls_token (`bool`, defaults to `False`): Whether or not to add a classification token. extra_tokens (`int`, defaults to `0`): The number of extra tokens to add. interpolation_scale (`float`, defaults to `1.0`): The scale of the interpolation. Returns: pos_embed (`np.ndarray`): Shape is either `[grid_size * grid_size, embed_dim]` if not using cls_token, or `[1 + grid_size*grid_size, embed_dim]` if using cls_token """ if isinstance(grid_size, int): grid_size = (grid_size, grid_size) grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) pos_embed = get_2d_sincos_pos_embed_from_grid_np(embed_dim, grid) if cls_token and extra_tokens > 0: pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) return pos_embed def get_2d_sincos_pos_embed_from_grid_np(embed_dim, grid): r""" This function generates 2D sinusoidal positional embeddings from a grid. Args: embed_dim (`int`): The embedding dimension. grid (`np.ndarray`): Grid of positions with shape `(H * W,)`. Returns: `np.ndarray`: The 2D sinusoidal positional embeddings with shape `(H * W, embed_dim)` """ if embed_dim % 2 != 0: raise ValueError("embed_dim must be divisible by 2") # use half of dimensions to encode grid_h emb_h = get_1d_sincos_pos_embed_from_grid_np(embed_dim // 2, grid[0]) # (H*W, D/2) emb_w = get_1d_sincos_pos_embed_from_grid_np(embed_dim // 2, grid[1]) # (H*W, D/2) emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) return emb def get_1d_sincos_pos_embed_from_grid_np(embed_dim, pos): """ This function generates 1D positional embeddings from a grid. Args: embed_dim (`int`): The embedding dimension `D` pos (`numpy.ndarray`): 1D tensor of positions with shape `(M,)` Returns: `numpy.ndarray`: Sinusoidal positional embeddings of shape `(M, D)`. """ if embed_dim % 2 != 0: raise ValueError("embed_dim must be divisible by 2") omega = np.arange(embed_dim // 2, dtype=np.float64) omega /= embed_dim / 2.0 omega = 1.0 / 10000**omega # (D/2,) pos = pos.reshape(-1) # (M,) out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product emb_sin = np.sin(out) # (M, D/2) emb_cos = np.cos(out) # (M, D/2) emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) return emb class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding with support for SD3 cropping. Args: height (`int`, defaults to `224`): The height of the image. width (`int`, defaults to `224`): The width of the image. patch_size (`int`, defaults to `16`): The size of the patches. in_channels (`int`, defaults to `3`): The number of input channels. embed_dim (`int`, defaults to `768`): The output dimension of the embedding. layer_norm (`bool`, defaults to `False`): Whether or not to use layer normalization. flatten (`bool`, defaults to `True`): Whether or not to flatten the output. bias (`bool`, defaults to `True`): Whether or not to use bias. interpolation_scale (`float`, defaults to `1`): The scale of the interpolation. pos_embed_type (`str`, defaults to `"sincos"`): The type of positional embedding. pos_embed_max_size (`int`, defaults to `None`): The maximum size of the positional embedding. """ def __init__( self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, layer_norm=False, flatten=True, bias=True, interpolation_scale=1, pos_embed_type="sincos", pos_embed_max_size=None, # For SD3 cropping ): super().__init__() num_patches = (height // patch_size) * (width // patch_size) self.flatten = flatten self.layer_norm = layer_norm self.pos_embed_max_size = pos_embed_max_size self.proj = nn.Conv2d( in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias ) if layer_norm: self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6) else: self.norm = None self.patch_size = patch_size self.height, self.width = height // patch_size, width // patch_size self.base_size = height // patch_size self.interpolation_scale = interpolation_scale # Calculate positional embeddings based on max size or default if pos_embed_max_size: grid_size = pos_embed_max_size else: grid_size = int(num_patches**0.5) if pos_embed_type is None: self.pos_embed = None elif pos_embed_type == "sincos": pos_embed = get_2d_sincos_pos_embed( embed_dim, grid_size, base_size=self.base_size, interpolation_scale=self.interpolation_scale, output_type="pt", ) persistent = True if pos_embed_max_size else False self.register_buffer("pos_embed", pos_embed.float().unsqueeze(0), persistent=persistent) else: raise ValueError(f"Unsupported pos_embed_type: {pos_embed_type}") def cropped_pos_embed(self, height, width): """Crops positional embeddings for SD3 compatibility.""" if self.pos_embed_max_size is None: raise ValueError("`pos_embed_max_size` must be set for cropping.") height = height // self.patch_size width = width // self.patch_size if height > self.pos_embed_max_size: raise ValueError( f"Height ({height}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}." ) if width > self.pos_embed_max_size: raise ValueError( f"Width ({width}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}." ) top = (self.pos_embed_max_size - height) // 2 left = (self.pos_embed_max_size - width) // 2 spatial_pos_embed = self.pos_embed.reshape(1, self.pos_embed_max_size, self.pos_embed_max_size, -1) spatial_pos_embed = spatial_pos_embed[:, top : top + height, left : left + width, :] spatial_pos_embed = spatial_pos_embed.reshape(1, -1, spatial_pos_embed.shape[-1]) return spatial_pos_embed def forward(self, latent): if self.pos_embed_max_size is not None: height, width = latent.shape[-2:] else: height, width = latent.shape[-2] // self.patch_size, latent.shape[-1] // self.patch_size latent = self.proj(latent) if self.flatten: latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC if self.layer_norm: latent = self.norm(latent) if self.pos_embed is None: return latent.to(latent.dtype) # Interpolate or crop positional embeddings as needed if self.pos_embed_max_size: pos_embed = self.cropped_pos_embed(height, width) else: if self.height != height or self.width != width: pos_embed = get_2d_sincos_pos_embed( embed_dim=self.pos_embed.shape[-1], grid_size=(height, width), base_size=self.base_size, interpolation_scale=self.interpolation_scale, device=latent.device, output_type="pt", ) pos_embed = pos_embed.float().unsqueeze(0) else: pos_embed = self.pos_embed return (latent + pos_embed).to(latent.dtype) class LuminaPatchEmbed(nn.Module): """ 2D Image to Patch Embedding with support for Lumina-T2X Args: patch_size (`int`, defaults to `2`): The size of the patches. in_channels (`int`, defaults to `4`): The number of input channels. embed_dim (`int`, defaults to `768`): The output dimension of the embedding. bias (`bool`, defaults to `True`): Whether or not to use bias. """ def __init__(self, patch_size=2, in_channels=4, embed_dim=768, bias=True): super().__init__() self.patch_size = patch_size self.proj = nn.Linear( in_features=patch_size * patch_size * in_channels, out_features=embed_dim, bias=bias, ) def forward(self, x, freqs_cis): """ Patchifies and embeds the input tensor(s). Args: x (List[torch.Tensor] | torch.Tensor): The input tensor(s) to be patchified and embedded. Returns: Tuple[torch.Tensor, torch.Tensor, List[Tuple[int, int]], torch.Tensor]: A tuple containing the patchified and embedded tensor(s), the mask indicating the valid patches, the original image size(s), and the frequency tensor(s). """ freqs_cis = freqs_cis.to(x[0].device) patch_height = patch_width = self.patch_size batch_size, channel, height, width = x.size() height_tokens, width_tokens = height // patch_height, width // patch_width x = x.view(batch_size, channel, height_tokens, patch_height, width_tokens, patch_width).permute( 0, 2, 4, 1, 3, 5 ) x = x.flatten(3) x = self.proj(x) x = x.flatten(1, 2) mask = torch.ones(x.shape[0], x.shape[1], dtype=torch.int32, device=x.device) return ( x, mask, [(height, width)] * batch_size, freqs_cis[:height_tokens, :width_tokens].flatten(0, 1).unsqueeze(0), ) class CogVideoXPatchEmbed(nn.Module): def __init__( self, patch_size: int = 2, patch_size_t: Optional[int] = None, in_channels: int = 16, embed_dim: int = 1920, text_embed_dim: int = 4096, bias: bool = True, sample_width: int = 90, sample_height: int = 60, sample_frames: int = 49, temporal_compression_ratio: int = 4, max_text_seq_length: int = 226, spatial_interpolation_scale: float = 1.875, temporal_interpolation_scale: float = 1.0, use_positional_embeddings: bool = True, use_learned_positional_embeddings: bool = True, ) -> None: super().__init__() self.patch_size = patch_size self.patch_size_t = patch_size_t self.embed_dim = embed_dim self.sample_height = sample_height self.sample_width = sample_width self.sample_frames = sample_frames self.temporal_compression_ratio = temporal_compression_ratio self.max_text_seq_length = max_text_seq_length self.spatial_interpolation_scale = spatial_interpolation_scale self.temporal_interpolation_scale = temporal_interpolation_scale self.use_positional_embeddings = use_positional_embeddings self.use_learned_positional_embeddings = use_learned_positional_embeddings if patch_size_t is None: # CogVideoX 1.0 checkpoints self.proj = nn.Conv2d( in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias ) else: # CogVideoX 1.5 checkpoints self.proj = nn.Linear(in_channels * patch_size * patch_size * patch_size_t, embed_dim) self.text_proj = nn.Linear(text_embed_dim, embed_dim) if use_positional_embeddings or use_learned_positional_embeddings: persistent = use_learned_positional_embeddings pos_embedding = self._get_positional_embeddings(sample_height, sample_width, sample_frames) self.register_buffer("pos_embedding", pos_embedding, persistent=persistent) def _get_positional_embeddings( self, sample_height: int, sample_width: int, sample_frames: int, device: Optional[torch.device] = None ) -> torch.Tensor: post_patch_height = sample_height // self.patch_size post_patch_width = sample_width // self.patch_size post_time_compression_frames = (sample_frames - 1) // self.temporal_compression_ratio + 1 num_patches = post_patch_height * post_patch_width * post_time_compression_frames pos_embedding = get_3d_sincos_pos_embed( self.embed_dim, (post_patch_width, post_patch_height), post_time_compression_frames, self.spatial_interpolation_scale, self.temporal_interpolation_scale, device=device, output_type="pt", ) pos_embedding = pos_embedding.flatten(0, 1) joint_pos_embedding = pos_embedding.new_zeros( 1, self.max_text_seq_length + num_patches, self.embed_dim, requires_grad=False ) joint_pos_embedding.data[:, self.max_text_seq_length :].copy_(pos_embedding) return joint_pos_embedding def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): r""" Args: text_embeds (`torch.Tensor`): Input text embeddings. Expected shape: (batch_size, seq_length, embedding_dim). image_embeds (`torch.Tensor`): Input image embeddings. Expected shape: (batch_size, num_frames, channels, height, width). """ text_embeds = self.text_proj(text_embeds) batch_size, num_frames, channels, height, width = image_embeds.shape if self.patch_size_t is None: image_embeds = image_embeds.reshape(-1, channels, height, width) image_embeds = self.proj(image_embeds) image_embeds = image_embeds.view(batch_size, num_frames, *image_embeds.shape[1:]) image_embeds = image_embeds.flatten(3).transpose(2, 3) # [batch, num_frames, height x width, channels] image_embeds = image_embeds.flatten(1, 2) # [batch, num_frames x height x width, channels] else: p = self.patch_size p_t = self.patch_size_t image_embeds = image_embeds.permute(0, 1, 3, 4, 2) image_embeds = image_embeds.reshape( batch_size, num_frames // p_t, p_t, height // p, p, width // p, p, channels ) image_embeds = image_embeds.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(4, 7).flatten(1, 3) image_embeds = self.proj(image_embeds) embeds = torch.cat( [text_embeds, image_embeds], dim=1 ).contiguous() # [batch, seq_length + num_frames x height x width, channels] if self.use_positional_embeddings or self.use_learned_positional_embeddings: if self.use_learned_positional_embeddings and (self.sample_width != width or self.sample_height != height): raise ValueError( "It is currently not possible to generate videos at a different resolution that the defaults. This should only be the case with 'THUDM/CogVideoX-5b-I2V'." "If you think this is incorrect, please open an issue at https://github.com/huggingface/diffusers/issues." ) pre_time_compression_frames = (num_frames - 1) * self.temporal_compression_ratio + 1 if ( self.sample_height != height or self.sample_width != width or self.sample_frames != pre_time_compression_frames ): pos_embedding = self._get_positional_embeddings( height, width, pre_time_compression_frames, device=embeds.device ) else: pos_embedding = self.pos_embedding pos_embedding = pos_embedding.to(dtype=embeds.dtype) embeds = embeds + pos_embedding return embeds class CogView3PlusPatchEmbed(nn.Module): def __init__( self, in_channels: int = 16, hidden_size: int = 2560, patch_size: int = 2, text_hidden_size: int = 4096, pos_embed_max_size: int = 128, ): super().__init__() self.in_channels = in_channels self.hidden_size = hidden_size self.patch_size = patch_size self.text_hidden_size = text_hidden_size self.pos_embed_max_size = pos_embed_max_size # Linear projection for image patches self.proj = nn.Linear(in_channels * patch_size**2, hidden_size) # Linear projection for text embeddings self.text_proj = nn.Linear(text_hidden_size, hidden_size) pos_embed = get_2d_sincos_pos_embed( hidden_size, pos_embed_max_size, base_size=pos_embed_max_size, output_type="pt" ) pos_embed = pos_embed.reshape(pos_embed_max_size, pos_embed_max_size, hidden_size) self.register_buffer("pos_embed", pos_embed.float(), persistent=False) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor) -> torch.Tensor: batch_size, channel, height, width = hidden_states.shape if height % self.patch_size != 0 or width % self.patch_size != 0: raise ValueError("Height and width must be divisible by patch size") height = height // self.patch_size width = width // self.patch_size hidden_states = hidden_states.view(batch_size, channel, height, self.patch_size, width, self.patch_size) hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5).contiguous() hidden_states = hidden_states.view(batch_size, height * width, channel * self.patch_size * self.patch_size) # Project the patches hidden_states = self.proj(hidden_states) encoder_hidden_states = self.text_proj(encoder_hidden_states) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) # Calculate text_length text_length = encoder_hidden_states.shape[1] image_pos_embed = self.pos_embed[:height, :width].reshape(height * width, -1) text_pos_embed = torch.zeros( (text_length, self.hidden_size), dtype=image_pos_embed.dtype, device=image_pos_embed.device ) pos_embed = torch.cat([text_pos_embed, image_pos_embed], dim=0)[None, ...] return (hidden_states + pos_embed).to(hidden_states.dtype) def get_3d_rotary_pos_embed( embed_dim, crops_coords, grid_size, temporal_size, theta: int = 10000, use_real: bool = True, grid_type: str = "linspace", max_size: Optional[Tuple[int, int]] = None, device: Optional[torch.device] = None, ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ RoPE for video tokens with 3D structure. Args: embed_dim: (`int`): The embedding dimension size, corresponding to hidden_size_head. crops_coords (`Tuple[int]`): The top-left and bottom-right coordinates of the crop. grid_size (`Tuple[int]`): The grid size of the spatial positional embedding (height, width). temporal_size (`int`): The size of the temporal dimension. theta (`float`): Scaling factor for frequency computation. grid_type (`str`): Whether to use "linspace" or "slice" to compute grids. Returns: `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`. """ if use_real is not True: raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed") if grid_type == "linspace": start, stop = crops_coords grid_size_h, grid_size_w = grid_size grid_h = torch.linspace( start[0], stop[0] * (grid_size_h - 1) / grid_size_h, grid_size_h, device=device, dtype=torch.float32 ) grid_w = torch.linspace( start[1], stop[1] * (grid_size_w - 1) / grid_size_w, grid_size_w, device=device, dtype=torch.float32 ) grid_t = torch.arange(temporal_size, device=device, dtype=torch.float32) grid_t = torch.linspace( 0, temporal_size * (temporal_size - 1) / temporal_size, temporal_size, device=device, dtype=torch.float32 ) elif grid_type == "slice": max_h, max_w = max_size grid_size_h, grid_size_w = grid_size grid_h = torch.arange(max_h, device=device, dtype=torch.float32) grid_w = torch.arange(max_w, device=device, dtype=torch.float32) grid_t = torch.arange(temporal_size, device=device, dtype=torch.float32) else: raise ValueError("Invalid value passed for `grid_type`.") # Compute dimensions for each axis dim_t = embed_dim // 4 dim_h = embed_dim // 8 * 3 dim_w = embed_dim // 8 * 3 # Temporal frequencies freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, theta=theta, use_real=True) # Spatial frequencies for height and width freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, theta=theta, use_real=True) freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, theta=theta, use_real=True) # BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor def combine_time_height_width(freqs_t, freqs_h, freqs_w): freqs_t = freqs_t[:, None, None, :].expand( -1, grid_size_h, grid_size_w, -1 ) # temporal_size, grid_size_h, grid_size_w, dim_t freqs_h = freqs_h[None, :, None, :].expand( temporal_size, -1, grid_size_w, -1 ) # temporal_size, grid_size_h, grid_size_2, dim_h freqs_w = freqs_w[None, None, :, :].expand( temporal_size, grid_size_h, -1, -1 ) # temporal_size, grid_size_h, grid_size_2, dim_w freqs = torch.cat( [freqs_t, freqs_h, freqs_w], dim=-1 ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w) freqs = freqs.view( temporal_size * grid_size_h * grid_size_w, -1 ) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w) return freqs t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w if grid_type == "slice": t_cos, t_sin = t_cos[:temporal_size], t_sin[:temporal_size] h_cos, h_sin = h_cos[:grid_size_h], h_sin[:grid_size_h] w_cos, w_sin = w_cos[:grid_size_w], w_sin[:grid_size_w] cos = combine_time_height_width(t_cos, h_cos, w_cos) sin = combine_time_height_width(t_sin, h_sin, w_sin) return cos, sin def get_3d_rotary_pos_embed_allegro( embed_dim, crops_coords, grid_size, temporal_size, interpolation_scale: Tuple[float, float, float] = (1.0, 1.0, 1.0), theta: int = 10000, device: Optional[torch.device] = None, ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: # TODO(aryan): docs start, stop = crops_coords grid_size_h, grid_size_w = grid_size interpolation_scale_t, interpolation_scale_h, interpolation_scale_w = interpolation_scale grid_t = torch.linspace( 0, temporal_size * (temporal_size - 1) / temporal_size, temporal_size, device=device, dtype=torch.float32 ) grid_h = torch.linspace( start[0], stop[0] * (grid_size_h - 1) / grid_size_h, grid_size_h, device=device, dtype=torch.float32 ) grid_w = torch.linspace( start[1], stop[1] * (grid_size_w - 1) / grid_size_w, grid_size_w, device=device, dtype=torch.float32 ) # Compute dimensions for each axis dim_t = embed_dim // 3 dim_h = embed_dim // 3 dim_w = embed_dim // 3 # Temporal frequencies freqs_t = get_1d_rotary_pos_embed( dim_t, grid_t / interpolation_scale_t, theta=theta, use_real=True, repeat_interleave_real=False ) # Spatial frequencies for height and width freqs_h = get_1d_rotary_pos_embed( dim_h, grid_h / interpolation_scale_h, theta=theta, use_real=True, repeat_interleave_real=False ) freqs_w = get_1d_rotary_pos_embed( dim_w, grid_w / interpolation_scale_w, theta=theta, use_real=True, repeat_interleave_real=False ) return freqs_t, freqs_h, freqs_w, grid_t, grid_h, grid_w def get_2d_rotary_pos_embed( embed_dim, crops_coords, grid_size, use_real=True, device: Optional[torch.device] = None, output_type: str = "np" ): """ RoPE for image tokens with 2d structure. Args: embed_dim: (`int`): The embedding dimension size crops_coords (`Tuple[int]`) The top-left and bottom-right coordinates of the crop. grid_size (`Tuple[int]`): The grid size of the positional embedding. use_real (`bool`): If True, return real part and imaginary part separately. Otherwise, return complex numbers. device: (`torch.device`, **optional**): The device used to create tensors. Returns: `torch.Tensor`: positional embedding with shape `( grid_size * grid_size, embed_dim/2)`. """ if output_type == "np": deprecation_message = ( "`get_2d_sincos_pos_embed` uses `torch` and supports `device`." " `from_numpy` is no longer required." " Pass `output_type='pt' to use the new version now." ) deprecate("output_type=='np'", "0.33.0", deprecation_message, standard_warn=False) return _get_2d_rotary_pos_embed_np( embed_dim=embed_dim, crops_coords=crops_coords, grid_size=grid_size, use_real=use_real, ) start, stop = crops_coords # scale end by (steps−1)/steps matches np.linspace(..., endpoint=False) grid_h = torch.linspace( start[0], stop[0] * (grid_size[0] - 1) / grid_size[0], grid_size[0], device=device, dtype=torch.float32 ) grid_w = torch.linspace( start[1], stop[1] * (grid_size[1] - 1) / grid_size[1], grid_size[1], device=device, dtype=torch.float32 ) grid = torch.meshgrid(grid_w, grid_h, indexing="xy") grid = torch.stack(grid, dim=0) # [2, W, H] grid = grid.reshape([2, 1, *grid.shape[1:]]) pos_embed = get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=use_real) return pos_embed def _get_2d_rotary_pos_embed_np(embed_dim, crops_coords, grid_size, use_real=True): """ RoPE for image tokens with 2d structure. Args: embed_dim: (`int`): The embedding dimension size crops_coords (`Tuple[int]`) The top-left and bottom-right coordinates of the crop. grid_size (`Tuple[int]`): The grid size of the positional embedding. use_real (`bool`): If True, return real part and imaginary part separately. Otherwise, return complex numbers. Returns: `torch.Tensor`: positional embedding with shape `( grid_size * grid_size, embed_dim/2)`. """ start, stop = crops_coords grid_h = np.linspace(start[0], stop[0], grid_size[0], endpoint=False, dtype=np.float32) grid_w = np.linspace(start[1], stop[1], grid_size[1], endpoint=False, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) # [2, W, H] grid = grid.reshape([2, 1, *grid.shape[1:]]) pos_embed = get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=use_real) return pos_embed def get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=False): """ Get 2D RoPE from grid. Args: embed_dim: (`int`): The embedding dimension size, corresponding to hidden_size_head. grid (`np.ndarray`): The grid of the positional embedding. use_real (`bool`): If True, return real part and imaginary part separately. Otherwise, return complex numbers. Returns: `torch.Tensor`: positional embedding with shape `( grid_size * grid_size, embed_dim/2)`. """ assert embed_dim % 4 == 0 # use half of dimensions to encode grid_h emb_h = get_1d_rotary_pos_embed( embed_dim // 2, grid[0].reshape(-1), use_real=use_real ) # (H*W, D/2) if use_real else (H*W, D/4) emb_w = get_1d_rotary_pos_embed( embed_dim // 2, grid[1].reshape(-1), use_real=use_real ) # (H*W, D/2) if use_real else (H*W, D/4) if use_real: cos = torch.cat([emb_h[0], emb_w[0]], dim=1) # (H*W, D) sin = torch.cat([emb_h[1], emb_w[1]], dim=1) # (H*W, D) return cos, sin else: emb = torch.cat([emb_h, emb_w], dim=1) # (H*W, D/2) return emb def get_2d_rotary_pos_embed_lumina(embed_dim, len_h, len_w, linear_factor=1.0, ntk_factor=1.0): """ Get 2D RoPE from grid. Args: embed_dim: (`int`): The embedding dimension size, corresponding to hidden_size_head. grid (`np.ndarray`): The grid of the positional embedding. linear_factor (`float`): The linear factor of the positional embedding, which is used to scale the positional embedding in the linear layer. ntk_factor (`float`): The ntk factor of the positional embedding, which is used to scale the positional embedding in the ntk layer. Returns: `torch.Tensor`: positional embedding with shape `( grid_size * grid_size, embed_dim/2)`. """ assert embed_dim % 4 == 0 emb_h = get_1d_rotary_pos_embed( embed_dim // 2, len_h, linear_factor=linear_factor, ntk_factor=ntk_factor ) # (H, D/4) emb_w = get_1d_rotary_pos_embed( embed_dim // 2, len_w, linear_factor=linear_factor, ntk_factor=ntk_factor ) # (W, D/4) emb_h = emb_h.view(len_h, 1, embed_dim // 4, 1).repeat(1, len_w, 1, 1) # (H, W, D/4, 1) emb_w = emb_w.view(1, len_w, embed_dim // 4, 1).repeat(len_h, 1, 1, 1) # (H, W, D/4, 1) emb = torch.cat([emb_h, emb_w], dim=-1).flatten(2) # (H, W, D/2) return emb def get_1d_rotary_pos_embed( dim: int, pos: Union[np.ndarray, int], theta: float = 10000.0, use_real=False, linear_factor=1.0, ntk_factor=1.0, repeat_interleave_real=True, freqs_dtype=torch.float32, # torch.float32, torch.float64 (flux) ): """ Precompute the frequency tensor for complex exponentials (cis) with given dimensions. This function calculates a frequency tensor with complex exponentials using the given dimension 'dim' and the end index 'end'. The 'theta' parameter scales the frequencies. The returned tensor contains complex values in complex64 data type. Args: dim (`int`): Dimension of the frequency tensor. pos (`np.ndarray` or `int`): Position indices for the frequency tensor. [S] or scalar theta (`float`, *optional*, defaults to 10000.0): Scaling factor for frequency computation. Defaults to 10000.0. use_real (`bool`, *optional*): If True, return real part and imaginary part separately. Otherwise, return complex numbers. linear_factor (`float`, *optional*, defaults to 1.0): Scaling factor for the context extrapolation. Defaults to 1.0. ntk_factor (`float`, *optional*, defaults to 1.0): Scaling factor for the NTK-Aware RoPE. Defaults to 1.0. repeat_interleave_real (`bool`, *optional*, defaults to `True`): If `True` and `use_real`, real part and imaginary part are each interleaved with themselves to reach `dim`. Otherwise, they are concateanted with themselves. freqs_dtype (`torch.float32` or `torch.float64`, *optional*, defaults to `torch.float32`): the dtype of the frequency tensor. Returns: `torch.Tensor`: Precomputed frequency tensor with complex exponentials. [S, D/2] """ assert dim % 2 == 0 if isinstance(pos, int): pos = torch.arange(pos) if isinstance(pos, np.ndarray): pos = torch.from_numpy(pos) # type: ignore # [S] theta = theta * ntk_factor freqs = ( 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device) / dim)) / linear_factor ) # [D/2] freqs = torch.outer(pos, freqs) # type: ignore # [S, D/2] is_npu = freqs.device.type == "npu" if is_npu: freqs = freqs.float() if use_real and repeat_interleave_real: # flux, hunyuan-dit, cogvideox freqs_cos = freqs.cos().repeat_interleave(2, dim=1, output_size=freqs.shape[1] * 2).float() # [S, D] freqs_sin = freqs.sin().repeat_interleave(2, dim=1, output_size=freqs.shape[1] * 2).float() # [S, D] return freqs_cos, freqs_sin elif use_real: # stable audio, allegro freqs_cos = torch.cat([freqs.cos(), freqs.cos()], dim=-1).float() # [S, D] freqs_sin = torch.cat([freqs.sin(), freqs.sin()], dim=-1).float() # [S, D] return freqs_cos, freqs_sin else: # lumina freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 # [S, D/2] return freqs_cis def apply_rotary_emb( x: torch.Tensor, freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], use_real: bool = True, use_real_unbind_dim: int = -1, sequence_dim: int = 2, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are returned as real tensors. Args: x (`torch.Tensor`): Query or key tensor to apply rotary embeddings. [B, H, S, D] xk (torch.Tensor): Key tensor to apply freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],) Returns: Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. """ if use_real: cos, sin = freqs_cis # [S, D] if sequence_dim == 2: cos = cos[None, None, :, :] sin = sin[None, None, :, :] elif sequence_dim == 1: cos = cos[None, :, None, :] sin = sin[None, :, None, :] else: raise ValueError(f"`sequence_dim={sequence_dim}` but should be 1 or 2.") cos, sin = cos.to(x.device), sin.to(x.device) if use_real_unbind_dim == -1: # Used for flux, cogvideox, hunyuan-dit x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, H, S, D//2] x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) elif use_real_unbind_dim == -2: # Used for Stable Audio, OmniGen, CogView4 and Cosmos x_real, x_imag = x.reshape(*x.shape[:-1], 2, -1).unbind(-2) # [B, H, S, D//2] x_rotated = torch.cat([-x_imag, x_real], dim=-1) else: raise ValueError(f"`use_real_unbind_dim={use_real_unbind_dim}` but should be -1 or -2.") out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) return out else: # used for lumina x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) freqs_cis = freqs_cis.unsqueeze(2) x_out = torch.view_as_real(x_rotated * freqs_cis).flatten(3) return x_out.type_as(x) def apply_rotary_emb_allegro(x: torch.Tensor, freqs_cis, positions): # TODO(aryan): rewrite def apply_1d_rope(tokens, pos, cos, sin): cos = F.embedding(pos, cos)[:, None, :, :] sin = F.embedding(pos, sin)[:, None, :, :] x1, x2 = tokens[..., : tokens.shape[-1] // 2], tokens[..., tokens.shape[-1] // 2 :] tokens_rotated = torch.cat((-x2, x1), dim=-1) return (tokens.float() * cos + tokens_rotated.float() * sin).to(tokens.dtype) (t_cos, t_sin), (h_cos, h_sin), (w_cos, w_sin) = freqs_cis t, h, w = x.chunk(3, dim=-1) t = apply_1d_rope(t, positions[0], t_cos, t_sin) h = apply_1d_rope(h, positions[1], h_cos, h_sin) w = apply_1d_rope(w, positions[2], w_cos, w_sin) x = torch.cat([t, h, w], dim=-1) return x class TimestepEmbedding(nn.Module): def __init__( self, in_channels: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None, post_act_fn: Optional[str] = None, cond_proj_dim=None, sample_proj_bias=True, ): super().__init__() self.linear_1 = nn.Linear(in_channels, time_embed_dim, sample_proj_bias) if cond_proj_dim is not None: self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False) else: self.cond_proj = None self.act = get_activation(act_fn) if out_dim is not None: time_embed_dim_out = out_dim else: time_embed_dim_out = time_embed_dim self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out, sample_proj_bias) if post_act_fn is None: self.post_act = None else: self.post_act = get_activation(post_act_fn) def forward(self, sample, condition=None): if condition is not None: sample = sample + self.cond_proj(condition) sample = self.linear_1(sample) if self.act is not None: sample = self.act(sample) sample = self.linear_2(sample) if self.post_act is not None: sample = self.post_act(sample) return sample class Timesteps(nn.Module): def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int = 1): super().__init__() self.num_channels = num_channels self.flip_sin_to_cos = flip_sin_to_cos self.downscale_freq_shift = downscale_freq_shift self.scale = scale def forward(self, timesteps: torch.Tensor) -> torch.Tensor: t_emb = get_timestep_embedding( timesteps, self.num_channels, flip_sin_to_cos=self.flip_sin_to_cos, downscale_freq_shift=self.downscale_freq_shift, scale=self.scale, ) return t_emb class GaussianFourierProjection(nn.Module): """Gaussian Fourier embeddings for noise levels.""" def __init__( self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False ): super().__init__() self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.log = log self.flip_sin_to_cos = flip_sin_to_cos if set_W_to_weight: # to delete later del self.weight self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.weight = self.W del self.W def forward(self, x): if self.log: x = torch.log(x) x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi if self.flip_sin_to_cos: out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1) else: out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) return out class SinusoidalPositionalEmbedding(nn.Module): """Apply positional information to a sequence of embeddings. Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to them Args: embed_dim: (int): Dimension of the positional embedding. max_seq_length: Maximum sequence length to apply positional embeddings """ def __init__(self, embed_dim: int, max_seq_length: int = 32): super().__init__() position = torch.arange(max_seq_length).unsqueeze(1) div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)) pe = torch.zeros(1, max_seq_length, embed_dim) pe[0, :, 0::2] = torch.sin(position * div_term) pe[0, :, 1::2] = torch.cos(position * div_term) self.register_buffer("pe", pe) def forward(self, x): _, seq_length, _ = x.shape x = x + self.pe[:, :seq_length] return x class ImagePositionalEmbeddings(nn.Module): """ Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the height and width of the latent space. For more details, see figure 10 of the dall-e paper: https://huggingface.co/papers/2102.12092 For VQ-diffusion: Output vector embeddings are used as input for the transformer. Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE. Args: num_embed (`int`): Number of embeddings for the latent pixels embeddings. height (`int`): Height of the latent image i.e. the number of height embeddings. width (`int`): Width of the latent image i.e. the number of width embeddings. embed_dim (`int`): Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings. """ def __init__( self, num_embed: int, height: int, width: int, embed_dim: int, ): super().__init__() self.height = height self.width = width self.num_embed = num_embed self.embed_dim = embed_dim self.emb = nn.Embedding(self.num_embed, embed_dim) self.height_emb = nn.Embedding(self.height, embed_dim) self.width_emb = nn.Embedding(self.width, embed_dim) def forward(self, index): emb = self.emb(index) height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height)) # 1 x H x D -> 1 x H x 1 x D height_emb = height_emb.unsqueeze(2) width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width)) # 1 x W x D -> 1 x 1 x W x D width_emb = width_emb.unsqueeze(1) pos_emb = height_emb + width_emb # 1 x H x W x D -> 1 x L xD pos_emb = pos_emb.view(1, self.height * self.width, -1) emb = emb + pos_emb[:, : emb.shape[1], :] return emb class LabelEmbedding(nn.Module): """ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. Args: num_classes (`int`): The number of classes. hidden_size (`int`): The size of the vector embeddings. dropout_prob (`float`): The probability of dropping a label. """ def __init__(self, num_classes, hidden_size, dropout_prob): super().__init__() use_cfg_embedding = dropout_prob > 0 self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size) self.num_classes = num_classes self.dropout_prob = dropout_prob def token_drop(self, labels, force_drop_ids=None): """ Drops labels to enable classifier-free guidance. """ if force_drop_ids is None: drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob else: drop_ids = torch.tensor(force_drop_ids == 1) labels = torch.where(drop_ids, self.num_classes, labels) return labels def forward(self, labels: torch.LongTensor, force_drop_ids=None): use_dropout = self.dropout_prob > 0 if (self.training and use_dropout) or (force_drop_ids is not None): labels = self.token_drop(labels, force_drop_ids) embeddings = self.embedding_table(labels) return embeddings class TextImageProjection(nn.Module): def __init__( self, text_embed_dim: int = 1024, image_embed_dim: int = 768, cross_attention_dim: int = 768, num_image_text_embeds: int = 10, ): super().__init__() self.num_image_text_embeds = num_image_text_embeds self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim) self.text_proj = nn.Linear(text_embed_dim, cross_attention_dim) def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): batch_size = text_embeds.shape[0] # image image_text_embeds = self.image_embeds(image_embeds) image_text_embeds = image_text_embeds.reshape(batch_size, self.num_image_text_embeds, -1) # text text_embeds = self.text_proj(text_embeds) return torch.cat([image_text_embeds, text_embeds], dim=1) class ImageProjection(nn.Module): def __init__( self, image_embed_dim: int = 768, cross_attention_dim: int = 768, num_image_text_embeds: int = 32, ): super().__init__() self.num_image_text_embeds = num_image_text_embeds self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim) self.norm = nn.LayerNorm(cross_attention_dim) def forward(self, image_embeds: torch.Tensor): batch_size = image_embeds.shape[0] # image image_embeds = self.image_embeds(image_embeds.to(self.image_embeds.weight.dtype)) image_embeds = image_embeds.reshape(batch_size, self.num_image_text_embeds, -1) image_embeds = self.norm(image_embeds) return image_embeds class IPAdapterFullImageProjection(nn.Module): def __init__(self, image_embed_dim=1024, cross_attention_dim=1024): super().__init__() from .attention import FeedForward self.ff = FeedForward(image_embed_dim, cross_attention_dim, mult=1, activation_fn="gelu") self.norm = nn.LayerNorm(cross_attention_dim) def forward(self, image_embeds: torch.Tensor): return self.norm(self.ff(image_embeds)) class IPAdapterFaceIDImageProjection(nn.Module): def __init__(self, image_embed_dim=1024, cross_attention_dim=1024, mult=1, num_tokens=1): super().__init__() from .attention import FeedForward self.num_tokens = num_tokens self.cross_attention_dim = cross_attention_dim self.ff = FeedForward(image_embed_dim, cross_attention_dim * num_tokens, mult=mult, activation_fn="gelu") self.norm = nn.LayerNorm(cross_attention_dim) def forward(self, image_embeds: torch.Tensor): x = self.ff(image_embeds) x = x.reshape(-1, self.num_tokens, self.cross_attention_dim) return self.norm(x) class CombinedTimestepLabelEmbeddings(nn.Module): def __init__(self, num_classes, embedding_dim, class_dropout_prob=0.1): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.class_embedder = LabelEmbedding(num_classes, embedding_dim, class_dropout_prob) def forward(self, timestep, class_labels, hidden_dtype=None): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D) class_labels = self.class_embedder(class_labels) # (N, D) conditioning = timesteps_emb + class_labels # (N, D) return conditioning class CombinedTimestepTextProjEmbeddings(nn.Module): def __init__(self, embedding_dim, pooled_projection_dim): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu") def forward(self, timestep, pooled_projection): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) pooled_projections = self.text_embedder(pooled_projection) conditioning = timesteps_emb + pooled_projections return conditioning class CombinedTimestepGuidanceTextProjEmbeddings(nn.Module): def __init__(self, embedding_dim, pooled_projection_dim): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu") def forward(self, timestep, guidance, pooled_projection): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) guidance_proj = self.time_proj(guidance) guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) time_guidance_emb = timesteps_emb + guidance_emb pooled_projections = self.text_embedder(pooled_projection) conditioning = time_guidance_emb + pooled_projections return conditioning class CogView3CombinedTimestepSizeEmbeddings(nn.Module): def __init__(self, embedding_dim: int, condition_dim: int, pooled_projection_dim: int, timesteps_dim: int = 256): super().__init__() self.time_proj = Timesteps(num_channels=timesteps_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.condition_proj = Timesteps(num_channels=condition_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=timesteps_dim, time_embed_dim=embedding_dim) self.condition_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu") def forward( self, timestep: torch.Tensor, original_size: torch.Tensor, target_size: torch.Tensor, crop_coords: torch.Tensor, hidden_dtype: torch.dtype, ) -> torch.Tensor: timesteps_proj = self.time_proj(timestep) original_size_proj = self.condition_proj(original_size.flatten()).view(original_size.size(0), -1) crop_coords_proj = self.condition_proj(crop_coords.flatten()).view(crop_coords.size(0), -1) target_size_proj = self.condition_proj(target_size.flatten()).view(target_size.size(0), -1) # (B, 3 * condition_dim) condition_proj = torch.cat([original_size_proj, crop_coords_proj, target_size_proj], dim=1) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (B, embedding_dim) condition_emb = self.condition_embedder(condition_proj.to(dtype=hidden_dtype)) # (B, embedding_dim) conditioning = timesteps_emb + condition_emb return conditioning class HunyuanDiTAttentionPool(nn.Module): # Copied from https://github.com/Tencent/HunyuanDiT/blob/cb709308d92e6c7e8d59d0dff41b74d35088db6a/hydit/modules/poolers.py#L6 def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim + 1, embed_dim) / embed_dim**0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.permute(1, 0, 2) # NLC -> LNC x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (L+1)NC x = x + self.positional_embedding[:, None, :].to(x.dtype) # (L+1)NC x, _ = F.multi_head_attention_forward( query=x[:1], key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False, ) return x.squeeze(0) class HunyuanCombinedTimestepTextSizeStyleEmbedding(nn.Module): def __init__( self, embedding_dim, pooled_projection_dim=1024, seq_len=256, cross_attention_dim=2048, use_style_cond_and_image_meta_size=True, ): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.size_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.pooler = HunyuanDiTAttentionPool( seq_len, cross_attention_dim, num_heads=8, output_dim=pooled_projection_dim ) # Here we use a default learned embedder layer for future extension. self.use_style_cond_and_image_meta_size = use_style_cond_and_image_meta_size if use_style_cond_and_image_meta_size: self.style_embedder = nn.Embedding(1, embedding_dim) extra_in_dim = 256 * 6 + embedding_dim + pooled_projection_dim else: extra_in_dim = pooled_projection_dim self.extra_embedder = PixArtAlphaTextProjection( in_features=extra_in_dim, hidden_size=embedding_dim * 4, out_features=embedding_dim, act_fn="silu_fp32", ) def forward(self, timestep, encoder_hidden_states, image_meta_size, style, hidden_dtype=None): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, 256) # extra condition1: text pooled_projections = self.pooler(encoder_hidden_states) # (N, 1024) if self.use_style_cond_and_image_meta_size: # extra condition2: image meta size embedding image_meta_size = self.size_proj(image_meta_size.view(-1)) image_meta_size = image_meta_size.to(dtype=hidden_dtype) image_meta_size = image_meta_size.view(-1, 6 * 256) # (N, 1536) # extra condition3: style embedding style_embedding = self.style_embedder(style) # (N, embedding_dim) # Concatenate all extra vectors extra_cond = torch.cat([pooled_projections, image_meta_size, style_embedding], dim=1) else: extra_cond = torch.cat([pooled_projections], dim=1) conditioning = timesteps_emb + self.extra_embedder(extra_cond) # [B, D] return conditioning class LuminaCombinedTimestepCaptionEmbedding(nn.Module): def __init__(self, hidden_size=4096, cross_attention_dim=2048, frequency_embedding_size=256): super().__init__() self.time_proj = Timesteps( num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0.0 ) self.timestep_embedder = TimestepEmbedding(in_channels=frequency_embedding_size, time_embed_dim=hidden_size) self.caption_embedder = nn.Sequential( nn.LayerNorm(cross_attention_dim), nn.Linear( cross_attention_dim, hidden_size, bias=True, ), ) def forward(self, timestep, caption_feat, caption_mask): # timestep embedding: time_freq = self.time_proj(timestep) time_embed = self.timestep_embedder(time_freq.to(dtype=caption_feat.dtype)) # caption condition embedding: caption_mask_float = caption_mask.float().unsqueeze(-1) caption_feats_pool = (caption_feat * caption_mask_float).sum(dim=1) / caption_mask_float.sum(dim=1) caption_feats_pool = caption_feats_pool.to(caption_feat) caption_embed = self.caption_embedder(caption_feats_pool) conditioning = time_embed + caption_embed return conditioning class MochiCombinedTimestepCaptionEmbedding(nn.Module): def __init__( self, embedding_dim: int, pooled_projection_dim: int, text_embed_dim: int, time_embed_dim: int = 256, num_attention_heads: int = 8, ) -> None: super().__init__() self.time_proj = Timesteps(num_channels=time_embed_dim, flip_sin_to_cos=True, downscale_freq_shift=0.0) self.timestep_embedder = TimestepEmbedding(in_channels=time_embed_dim, time_embed_dim=embedding_dim) self.pooler = MochiAttentionPool( num_attention_heads=num_attention_heads, embed_dim=text_embed_dim, output_dim=embedding_dim ) self.caption_proj = nn.Linear(text_embed_dim, pooled_projection_dim) def forward( self, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_attention_mask: torch.Tensor, hidden_dtype: Optional[torch.dtype] = None, ): time_proj = self.time_proj(timestep) time_emb = self.timestep_embedder(time_proj.to(dtype=hidden_dtype)) pooled_projections = self.pooler(encoder_hidden_states, encoder_attention_mask) caption_proj = self.caption_proj(encoder_hidden_states) conditioning = time_emb + pooled_projections return conditioning, caption_proj class TextTimeEmbedding(nn.Module): def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int = 64): super().__init__() self.norm1 = nn.LayerNorm(encoder_dim) self.pool = AttentionPooling(num_heads, encoder_dim) self.proj = nn.Linear(encoder_dim, time_embed_dim) self.norm2 = nn.LayerNorm(time_embed_dim) def forward(self, hidden_states): hidden_states = self.norm1(hidden_states) hidden_states = self.pool(hidden_states) hidden_states = self.proj(hidden_states) hidden_states = self.norm2(hidden_states) return hidden_states class TextImageTimeEmbedding(nn.Module): def __init__(self, text_embed_dim: int = 768, image_embed_dim: int = 768, time_embed_dim: int = 1536): super().__init__() self.text_proj = nn.Linear(text_embed_dim, time_embed_dim) self.text_norm = nn.LayerNorm(time_embed_dim) self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): # text time_text_embeds = self.text_proj(text_embeds) time_text_embeds = self.text_norm(time_text_embeds) # image time_image_embeds = self.image_proj(image_embeds) return time_image_embeds + time_text_embeds class ImageTimeEmbedding(nn.Module): def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536): super().__init__() self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) self.image_norm = nn.LayerNorm(time_embed_dim) def forward(self, image_embeds: torch.Tensor): # image time_image_embeds = self.image_proj(image_embeds) time_image_embeds = self.image_norm(time_image_embeds) return time_image_embeds class ImageHintTimeEmbedding(nn.Module): def __init__(self, image_embed_dim: int = 768, time_embed_dim: int = 1536): super().__init__() self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) self.image_norm = nn.LayerNorm(time_embed_dim) self.input_hint_block = nn.Sequential( nn.Conv2d(3, 16, 3, padding=1), nn.SiLU(), nn.Conv2d(16, 16, 3, padding=1), nn.SiLU(), nn.Conv2d(16, 32, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(32, 32, 3, padding=1), nn.SiLU(), nn.Conv2d(32, 96, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(96, 96, 3, padding=1), nn.SiLU(), nn.Conv2d(96, 256, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(256, 4, 3, padding=1), ) def forward(self, image_embeds: torch.Tensor, hint: torch.Tensor): # image time_image_embeds = self.image_proj(image_embeds) time_image_embeds = self.image_norm(time_image_embeds) hint = self.input_hint_block(hint) return time_image_embeds, hint class AttentionPooling(nn.Module): # Copied from https://github.com/deep-floyd/IF/blob/2f91391f27dd3c468bf174be5805b4cc92980c0b/deepfloyd_if/model/nn.py#L54 def __init__(self, num_heads, embed_dim, dtype=None): super().__init__() self.dtype = dtype self.positional_embedding = nn.Parameter(torch.randn(1, embed_dim) / embed_dim**0.5) self.k_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) self.q_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) self.v_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) self.num_heads = num_heads self.dim_per_head = embed_dim // self.num_heads def forward(self, x): bs, length, width = x.size() def shape(x): # (bs, length, width) --> (bs, length, n_heads, dim_per_head) x = x.view(bs, -1, self.num_heads, self.dim_per_head) # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) x = x.transpose(1, 2) # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) x = x.reshape(bs * self.num_heads, -1, self.dim_per_head) # (bs*n_heads, length, dim_per_head) --> (bs*n_heads, dim_per_head, length) x = x.transpose(1, 2) return x class_token = x.mean(dim=1, keepdim=True) + self.positional_embedding.to(x.dtype) x = torch.cat([class_token, x], dim=1) # (bs, length+1, width) # (bs*n_heads, class_token_length, dim_per_head) q = shape(self.q_proj(class_token)) # (bs*n_heads, length+class_token_length, dim_per_head) k = shape(self.k_proj(x)) v = shape(self.v_proj(x)) # (bs*n_heads, class_token_length, length+class_token_length): scale = 1 / math.sqrt(math.sqrt(self.dim_per_head)) weight = torch.einsum("bct,bcs->bts", q * scale, k * scale) # More stable with f16 than dividing afterwards weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) # (bs*n_heads, dim_per_head, class_token_length) a = torch.einsum("bts,bcs->bct", weight, v) # (bs, length+1, width) a = a.reshape(bs, -1, 1).transpose(1, 2) return a[:, 0, :] # cls_token class MochiAttentionPool(nn.Module): def __init__( self, num_attention_heads: int, embed_dim: int, output_dim: Optional[int] = None, ) -> None: super().__init__() self.output_dim = output_dim or embed_dim self.num_attention_heads = num_attention_heads self.to_kv = nn.Linear(embed_dim, 2 * embed_dim) self.to_q = nn.Linear(embed_dim, embed_dim) self.to_out = nn.Linear(embed_dim, self.output_dim) @staticmethod def pool_tokens(x: torch.Tensor, mask: torch.Tensor, *, keepdim=False) -> torch.Tensor: """ Pool tokens in x using mask. NOTE: We assume x does not require gradients. Args: x: (B, L, D) tensor of tokens. mask: (B, L) boolean tensor indicating which tokens are not padding. Returns: pooled: (B, D) tensor of pooled tokens. """ assert x.size(1) == mask.size(1) # Expected mask to have same length as tokens. assert x.size(0) == mask.size(0) # Expected mask to have same batch size as tokens. mask = mask[:, :, None].to(dtype=x.dtype) mask = mask / mask.sum(dim=1, keepdim=True).clamp(min=1) pooled = (x * mask).sum(dim=1, keepdim=keepdim) return pooled def forward(self, x: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor: r""" Args: x (`torch.Tensor`): Tensor of shape `(B, S, D)` of input tokens. mask (`torch.Tensor`): Boolean ensor of shape `(B, S)` indicating which tokens are not padding. Returns: `torch.Tensor`: `(B, D)` tensor of pooled tokens. """ D = x.size(2) # Construct attention mask, shape: (B, 1, num_queries=1, num_keys=1+L). attn_mask = mask[:, None, None, :].bool() # (B, 1, 1, L). attn_mask = F.pad(attn_mask, (1, 0), value=True) # (B, 1, 1, 1+L). # Average non-padding token features. These will be used as the query. x_pool = self.pool_tokens(x, mask, keepdim=True) # (B, 1, D) # Concat pooled features to input sequence. x = torch.cat([x_pool, x], dim=1) # (B, L+1, D) # Compute queries, keys, values. Only the mean token is used to create a query. kv = self.to_kv(x) # (B, L+1, 2 * D) q = self.to_q(x[:, 0]) # (B, D) # Extract heads. head_dim = D // self.num_attention_heads kv = kv.unflatten(2, (2, self.num_attention_heads, head_dim)) # (B, 1+L, 2, H, head_dim) kv = kv.transpose(1, 3) # (B, H, 2, 1+L, head_dim) k, v = kv.unbind(2) # (B, H, 1+L, head_dim) q = q.unflatten(1, (self.num_attention_heads, head_dim)) # (B, H, head_dim) q = q.unsqueeze(2) # (B, H, 1, head_dim) # Compute attention. x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=0.0) # (B, H, 1, head_dim) # Concatenate heads and run output. x = x.squeeze(2).flatten(1, 2) # (B, D = H * head_dim) x = self.to_out(x) return x def get_fourier_embeds_from_boundingbox(embed_dim, box): """ Args: embed_dim: int box: a 3-D tensor [B x N x 4] representing the bounding boxes for GLIGEN pipeline Returns: [B x N x embed_dim] tensor of positional embeddings """ batch_size, num_boxes = box.shape[:2] emb = 100 ** (torch.arange(embed_dim) / embed_dim) emb = emb[None, None, None].to(device=box.device, dtype=box.dtype) emb = emb * box.unsqueeze(-1) emb = torch.stack((emb.sin(), emb.cos()), dim=-1) emb = emb.permute(0, 1, 3, 4, 2).reshape(batch_size, num_boxes, embed_dim * 2 * 4) return emb class GLIGENTextBoundingboxProjection(nn.Module): def __init__(self, positive_len, out_dim, feature_type="text-only", fourier_freqs=8): super().__init__() self.positive_len = positive_len self.out_dim = out_dim self.fourier_embedder_dim = fourier_freqs self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy if isinstance(out_dim, tuple): out_dim = out_dim[0] if feature_type == "text-only": self.linears = nn.Sequential( nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim), ) self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) elif feature_type == "text-image": self.linears_text = nn.Sequential( nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim), ) self.linears_image = nn.Sequential( nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim), ) self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim])) def forward( self, boxes, masks, positive_embeddings=None, phrases_masks=None, image_masks=None, phrases_embeddings=None, image_embeddings=None, ): masks = masks.unsqueeze(-1) # embedding position (it may includes padding as placeholder) xyxy_embedding = get_fourier_embeds_from_boundingbox(self.fourier_embedder_dim, boxes) # B*N*4 -> B*N*C # learnable null embedding xyxy_null = self.null_position_feature.view(1, 1, -1) # replace padding with learnable null embedding xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null # positionet with text only information if positive_embeddings is not None: # learnable null embedding positive_null = self.null_positive_feature.view(1, 1, -1) # replace padding with learnable null embedding positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1)) # positionet with text and image information else: phrases_masks = phrases_masks.unsqueeze(-1) image_masks = image_masks.unsqueeze(-1) # learnable null embedding text_null = self.null_text_feature.view(1, 1, -1) image_null = self.null_image_feature.view(1, 1, -1) # replace padding with learnable null embedding phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1)) objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1)) objs = torch.cat([objs_text, objs_image], dim=1) return objs class PixArtAlphaCombinedTimestepSizeEmbeddings(nn.Module): """ For PixArt-Alpha. Reference: https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L164C9-L168C29 """ def __init__(self, embedding_dim, size_emb_dim, use_additional_conditions: bool = False): super().__init__() self.outdim = size_emb_dim self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.use_additional_conditions = use_additional_conditions if use_additional_conditions: self.additional_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.resolution_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim) self.aspect_ratio_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim) def forward(self, timestep, resolution, aspect_ratio, batch_size, hidden_dtype): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D) if self.use_additional_conditions: resolution_emb = self.additional_condition_proj(resolution.flatten()).to(hidden_dtype) resolution_emb = self.resolution_embedder(resolution_emb).reshape(batch_size, -1) aspect_ratio_emb = self.additional_condition_proj(aspect_ratio.flatten()).to(hidden_dtype) aspect_ratio_emb = self.aspect_ratio_embedder(aspect_ratio_emb).reshape(batch_size, -1) conditioning = timesteps_emb + torch.cat([resolution_emb, aspect_ratio_emb], dim=1) else: conditioning = timesteps_emb return conditioning class PixArtAlphaTextProjection(nn.Module): """ Projects caption embeddings. Also handles dropout for classifier-free guidance. Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py """ def __init__(self, in_features, hidden_size, out_features=None, act_fn="gelu_tanh"): super().__init__() if out_features is None: out_features = hidden_size self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True) if act_fn == "gelu_tanh": self.act_1 = nn.GELU(approximate="tanh") elif act_fn == "silu": self.act_1 = nn.SiLU() elif act_fn == "silu_fp32": self.act_1 = FP32SiLU() else: raise ValueError(f"Unknown activation function: {act_fn}") self.linear_2 = nn.Linear(in_features=hidden_size, out_features=out_features, bias=True) def forward(self, caption): hidden_states = self.linear_1(caption) hidden_states = self.act_1(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class IPAdapterPlusImageProjectionBlock(nn.Module): def __init__( self, embed_dims: int = 768, dim_head: int = 64, heads: int = 16, ffn_ratio: float = 4, ) -> None: super().__init__() from .attention import FeedForward self.ln0 = nn.LayerNorm(embed_dims) self.ln1 = nn.LayerNorm(embed_dims) self.attn = Attention( query_dim=embed_dims, dim_head=dim_head, heads=heads, out_bias=False, ) self.ff = nn.Sequential( nn.LayerNorm(embed_dims), FeedForward(embed_dims, embed_dims, activation_fn="gelu", mult=ffn_ratio, bias=False), ) def forward(self, x, latents, residual): encoder_hidden_states = self.ln0(x) latents = self.ln1(latents) encoder_hidden_states = torch.cat([encoder_hidden_states, latents], dim=-2) latents = self.attn(latents, encoder_hidden_states) + residual latents = self.ff(latents) + latents return latents class IPAdapterPlusImageProjection(nn.Module): """Resampler of IP-Adapter Plus. Args: embed_dims (int): The feature dimension. Defaults to 768. output_dims (int): The number of output channels, that is the same number of the channels in the `unet.config.cross_attention_dim`. Defaults to 1024. hidden_dims (int): The number of hidden channels. Defaults to 1280. depth (int): The number of blocks. Defaults to 8. dim_head (int): The number of head channels. Defaults to 64. heads (int): Parallel attention heads. Defaults to 16. num_queries (int): The number of queries. Defaults to 8. ffn_ratio (float): The expansion ratio of feedforward network hidden layer channels. Defaults to 4. """ def __init__( self, embed_dims: int = 768, output_dims: int = 1024, hidden_dims: int = 1280, depth: int = 4, dim_head: int = 64, heads: int = 16, num_queries: int = 8, ffn_ratio: float = 4, ) -> None: super().__init__() self.latents = nn.Parameter(torch.randn(1, num_queries, hidden_dims) / hidden_dims**0.5) self.proj_in = nn.Linear(embed_dims, hidden_dims) self.proj_out = nn.Linear(hidden_dims, output_dims) self.norm_out = nn.LayerNorm(output_dims) self.layers = nn.ModuleList( [IPAdapterPlusImageProjectionBlock(hidden_dims, dim_head, heads, ffn_ratio) for _ in range(depth)] ) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x (torch.Tensor): Input Tensor. Returns: torch.Tensor: Output Tensor. """ latents = self.latents.repeat(x.size(0), 1, 1) x = self.proj_in(x) for block in self.layers: residual = latents latents = block(x, latents, residual) latents = self.proj_out(latents) return self.norm_out(latents) class IPAdapterFaceIDPlusImageProjection(nn.Module): """FacePerceiverResampler of IP-Adapter Plus. Args: embed_dims (int): The feature dimension. Defaults to 768. output_dims (int): The number of output channels, that is the same number of the channels in the `unet.config.cross_attention_dim`. Defaults to 1024. hidden_dims (int): The number of hidden channels. Defaults to 1280. depth (int): The number of blocks. Defaults to 8. dim_head (int): The number of head channels. Defaults to 64. heads (int): Parallel attention heads. Defaults to 16. num_tokens (int): Number of tokens num_queries (int): The number of queries. Defaults to 8. ffn_ratio (float): The expansion ratio of feedforward network hidden layer channels. Defaults to 4. ffproj_ratio (float): The expansion ratio of feedforward network hidden layer channels (for ID embeddings). Defaults to 4. """ def __init__( self, embed_dims: int = 768, output_dims: int = 768, hidden_dims: int = 1280, id_embeddings_dim: int = 512, depth: int = 4, dim_head: int = 64, heads: int = 16, num_tokens: int = 4, num_queries: int = 8, ffn_ratio: float = 4, ffproj_ratio: int = 2, ) -> None: super().__init__() from .attention import FeedForward self.num_tokens = num_tokens self.embed_dim = embed_dims self.clip_embeds = None self.shortcut = False self.shortcut_scale = 1.0 self.proj = FeedForward(id_embeddings_dim, embed_dims * num_tokens, activation_fn="gelu", mult=ffproj_ratio) self.norm = nn.LayerNorm(embed_dims) self.proj_in = nn.Linear(hidden_dims, embed_dims) self.proj_out = nn.Linear(embed_dims, output_dims) self.norm_out = nn.LayerNorm(output_dims) self.layers = nn.ModuleList( [IPAdapterPlusImageProjectionBlock(embed_dims, dim_head, heads, ffn_ratio) for _ in range(depth)] ) def forward(self, id_embeds: torch.Tensor) -> torch.Tensor: """Forward pass. Args: id_embeds (torch.Tensor): Input Tensor (ID embeds). Returns: torch.Tensor: Output Tensor. """ id_embeds = id_embeds.to(self.clip_embeds.dtype) id_embeds = self.proj(id_embeds) id_embeds = id_embeds.reshape(-1, self.num_tokens, self.embed_dim) id_embeds = self.norm(id_embeds) latents = id_embeds clip_embeds = self.proj_in(self.clip_embeds) x = clip_embeds.reshape(-1, clip_embeds.shape[2], clip_embeds.shape[3]) for block in self.layers: residual = latents latents = block(x, latents, residual) latents = self.proj_out(latents) out = self.norm_out(latents) if self.shortcut: out = id_embeds + self.shortcut_scale * out return out class IPAdapterTimeImageProjectionBlock(nn.Module): """Block for IPAdapterTimeImageProjection. Args: hidden_dim (`int`, defaults to 1280): The number of hidden channels. dim_head (`int`, defaults to 64): The number of head channels. heads (`int`, defaults to 20): Parallel attention heads. ffn_ratio (`int`, defaults to 4): The expansion ratio of feedforward network hidden layer channels. """ def __init__( self, hidden_dim: int = 1280, dim_head: int = 64, heads: int = 20, ffn_ratio: int = 4, ) -> None: super().__init__() from .attention import FeedForward self.ln0 = nn.LayerNorm(hidden_dim) self.ln1 = nn.LayerNorm(hidden_dim) self.attn = Attention( query_dim=hidden_dim, cross_attention_dim=hidden_dim, dim_head=dim_head, heads=heads, bias=False, out_bias=False, ) self.ff = FeedForward(hidden_dim, hidden_dim, activation_fn="gelu", mult=ffn_ratio, bias=False) # AdaLayerNorm self.adaln_silu = nn.SiLU() self.adaln_proj = nn.Linear(hidden_dim, 4 * hidden_dim) self.adaln_norm = nn.LayerNorm(hidden_dim) # Set attention scale and fuse KV self.attn.scale = 1 / math.sqrt(math.sqrt(dim_head)) self.attn.fuse_projections() self.attn.to_k = None self.attn.to_v = None def forward(self, x: torch.Tensor, latents: torch.Tensor, timestep_emb: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x (`torch.Tensor`): Image features. latents (`torch.Tensor`): Latent features. timestep_emb (`torch.Tensor`): Timestep embedding. Returns: `torch.Tensor`: Output latent features. """ # Shift and scale for AdaLayerNorm emb = self.adaln_proj(self.adaln_silu(timestep_emb)) shift_msa, scale_msa, shift_mlp, scale_mlp = emb.chunk(4, dim=1) # Fused Attention residual = latents x = self.ln0(x) latents = self.ln1(latents) * (1 + scale_msa[:, None]) + shift_msa[:, None] batch_size = latents.shape[0] query = self.attn.to_q(latents) kv_input = torch.cat((x, latents), dim=-2) key, value = self.attn.to_kv(kv_input).chunk(2, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // self.attn.heads query = query.view(batch_size, -1, self.attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, self.attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, self.attn.heads, head_dim).transpose(1, 2) weight = (query * self.attn.scale) @ (key * self.attn.scale).transpose(-2, -1) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) latents = weight @ value latents = latents.transpose(1, 2).reshape(batch_size, -1, self.attn.heads * head_dim) latents = self.attn.to_out[0](latents) latents = self.attn.to_out[1](latents) latents = latents + residual ## FeedForward residual = latents latents = self.adaln_norm(latents) * (1 + scale_mlp[:, None]) + shift_mlp[:, None] return self.ff(latents) + residual # Modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py class IPAdapterTimeImageProjection(nn.Module): """Resampler of SD3 IP-Adapter with timestep embedding. Args: embed_dim (`int`, defaults to 1152): The feature dimension. output_dim (`int`, defaults to 2432): The number of output channels. hidden_dim (`int`, defaults to 1280): The number of hidden channels. depth (`int`, defaults to 4): The number of blocks. dim_head (`int`, defaults to 64): The number of head channels. heads (`int`, defaults to 20): Parallel attention heads. num_queries (`int`, defaults to 64): The number of queries. ffn_ratio (`int`, defaults to 4): The expansion ratio of feedforward network hidden layer channels. timestep_in_dim (`int`, defaults to 320): The number of input channels for timestep embedding. timestep_flip_sin_to_cos (`bool`, defaults to True): Flip the timestep embedding order to `cos, sin` (if True) or `sin, cos` (if False). timestep_freq_shift (`int`, defaults to 0): Controls the timestep delta between frequencies between dimensions. """ def __init__( self, embed_dim: int = 1152, output_dim: int = 2432, hidden_dim: int = 1280, depth: int = 4, dim_head: int = 64, heads: int = 20, num_queries: int = 64, ffn_ratio: int = 4, timestep_in_dim: int = 320, timestep_flip_sin_to_cos: bool = True, timestep_freq_shift: int = 0, ) -> None: super().__init__() self.latents = nn.Parameter(torch.randn(1, num_queries, hidden_dim) / hidden_dim**0.5) self.proj_in = nn.Linear(embed_dim, hidden_dim) self.proj_out = nn.Linear(hidden_dim, output_dim) self.norm_out = nn.LayerNorm(output_dim) self.layers = nn.ModuleList( [IPAdapterTimeImageProjectionBlock(hidden_dim, dim_head, heads, ffn_ratio) for _ in range(depth)] ) self.time_proj = Timesteps(timestep_in_dim, timestep_flip_sin_to_cos, timestep_freq_shift) self.time_embedding = TimestepEmbedding(timestep_in_dim, hidden_dim, act_fn="silu") def forward(self, x: torch.Tensor, timestep: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """Forward pass. Args: x (`torch.Tensor`): Image features. timestep (`torch.Tensor`): Timestep in denoising process. Returns: `Tuple`[`torch.Tensor`, `torch.Tensor`]: The pair (latents, timestep_emb). """ timestep_emb = self.time_proj(timestep).to(dtype=x.dtype) timestep_emb = self.time_embedding(timestep_emb) latents = self.latents.repeat(x.size(0), 1, 1) x = self.proj_in(x) x = x + timestep_emb[:, None] for block in self.layers: latents = block(x, latents, timestep_emb) latents = self.proj_out(latents) latents = self.norm_out(latents) return latents, timestep_emb class MultiIPAdapterImageProjection(nn.Module): def __init__(self, IPAdapterImageProjectionLayers: Union[List[nn.Module], Tuple[nn.Module]]): super().__init__() self.image_projection_layers = nn.ModuleList(IPAdapterImageProjectionLayers) @property def num_ip_adapters(self) -> int: """Number of IP-Adapters loaded.""" return len(self.image_projection_layers) def forward(self, image_embeds: List[torch.Tensor]): projected_image_embeds = [] # currently, we accept `image_embeds` as # 1. a tensor (deprecated) with shape [batch_size, embed_dim] or [batch_size, sequence_length, embed_dim] # 2. list of `n` tensors where `n` is number of ip-adapters, each tensor can hae shape [batch_size, num_images, embed_dim] or [batch_size, num_images, sequence_length, embed_dim] if not isinstance(image_embeds, list): deprecation_message = ( "You have passed a tensor as `image_embeds`.This is deprecated and will be removed in a future release." " Please make sure to update your script to pass `image_embeds` as a list of tensors to suppress this warning." ) deprecate("image_embeds not a list", "1.0.0", deprecation_message, standard_warn=False) image_embeds = [image_embeds.unsqueeze(1)] if len(image_embeds) != len(self.image_projection_layers): raise ValueError( f"image_embeds must have the same length as image_projection_layers, got {len(image_embeds)} and {len(self.image_projection_layers)}" ) for image_embed, image_projection_layer in zip(image_embeds, self.image_projection_layers): batch_size, num_images = image_embed.shape[0], image_embed.shape[1] image_embed = image_embed.reshape((batch_size * num_images,) + image_embed.shape[2:]) image_embed = image_projection_layer(image_embed) image_embed = image_embed.reshape((batch_size, num_images) + image_embed.shape[1:]) projected_image_embeds.append(image_embed) return projected_image_embeds class FluxPosEmbed(nn.Module): def __new__(cls, *args, **kwargs): deprecation_message = "Importing and using `FluxPosEmbed` from `diffusers.models.embeddings` is deprecated. Please import it from `diffusers.models.transformers.transformer_flux`." deprecate("FluxPosEmbed", "1.0.0", deprecation_message) from .transformers.transformer_flux import FluxPosEmbed return FluxPosEmbed(*args, **kwargs)
diffusers/src/diffusers/models/embeddings.py/0
{ "file_path": "diffusers/src/diffusers/models/embeddings.py", "repo_id": "diffusers", "token_count": 47160 }
157
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ..attention import BasicTransformerBlock from ..embeddings import PatchEmbed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin logger = logging.get_logger(__name__) # pylint: disable=invalid-name class DiTTransformer2DModel(ModelMixin, ConfigMixin): r""" A 2D Transformer model as introduced in DiT (https://huggingface.co/papers/2212.09748). Parameters: num_attention_heads (int, optional, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (int, optional, defaults to 72): The number of channels in each head. in_channels (int, defaults to 4): The number of channels in the input. out_channels (int, optional): The number of channels in the output. Specify this parameter if the output channel number differs from the input. num_layers (int, optional, defaults to 28): The number of layers of Transformer blocks to use. dropout (float, optional, defaults to 0.0): The dropout probability to use within the Transformer blocks. norm_num_groups (int, optional, defaults to 32): Number of groups for group normalization within Transformer blocks. attention_bias (bool, optional, defaults to True): Configure if the Transformer blocks' attention should contain a bias parameter. sample_size (int, defaults to 32): The width of the latent images. This parameter is fixed during training. patch_size (int, defaults to 2): Size of the patches the model processes, relevant for architectures working on non-sequential data. activation_fn (str, optional, defaults to "gelu-approximate"): Activation function to use in feed-forward networks within Transformer blocks. num_embeds_ada_norm (int, optional, defaults to 1000): Number of embeddings for AdaLayerNorm, fixed during training and affects the maximum denoising steps during inference. upcast_attention (bool, optional, defaults to False): If true, upcasts the attention mechanism dimensions for potentially improved performance. norm_type (str, optional, defaults to "ada_norm_zero"): Specifies the type of normalization used, can be 'ada_norm_zero'. norm_elementwise_affine (bool, optional, defaults to False): If true, enables element-wise affine parameters in the normalization layers. norm_eps (float, optional, defaults to 1e-5): A small constant added to the denominator in normalization layers to prevent division by zero. """ _skip_layerwise_casting_patterns = ["pos_embed", "norm"] _supports_gradient_checkpointing = True _supports_group_offloading = False @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 72, in_channels: int = 4, out_channels: Optional[int] = None, num_layers: int = 28, dropout: float = 0.0, norm_num_groups: int = 32, attention_bias: bool = True, sample_size: int = 32, patch_size: int = 2, activation_fn: str = "gelu-approximate", num_embeds_ada_norm: Optional[int] = 1000, upcast_attention: bool = False, norm_type: str = "ada_norm_zero", norm_elementwise_affine: bool = False, norm_eps: float = 1e-5, ): super().__init__() # Validate inputs. if norm_type != "ada_norm_zero": raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type == "ada_norm_zero" and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." ) # Set some common variables used across the board. self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.out_channels = in_channels if out_channels is None else out_channels self.gradient_checkpointing = False # 2. Initialize the position embedding and transformer blocks. self.height = self.config.sample_size self.width = self.config.sample_size self.patch_size = self.config.patch_size self.pos_embed = PatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, ) for _ in range(self.config.num_layers) ] ) # 3. Output blocks. self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) self.proj_out_2 = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels ) def forward( self, hidden_states: torch.Tensor, timestep: Optional[torch.LongTensor] = None, class_labels: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, return_dict: bool = True, ): """ The [`DiTTransformer2DModel`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): Input `hidden_states`. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. cross_attention_kwargs ( `Dict[str, Any]`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size hidden_states = self.pos_embed(hidden_states) # 2. Blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, None, None, None, timestep, cross_attention_kwargs, class_labels, ) else: hidden_states = block( hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output conditioning = self.transformer_blocks[0].norm1.emb(timestep, class_labels, hidden_dtype=hidden_states.dtype) shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) # unpatchify height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/dit_transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/dit_transformer_2d.py", "repo_id": "diffusers", "token_count": 4528 }
158
# Copyright 2025 The NVIDIA Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin from ...utils import is_torchvision_available from ..attention import FeedForward from ..attention_processor import Attention from ..embeddings import Timesteps from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import RMSNorm if is_torchvision_available(): from torchvision import transforms class CosmosPatchEmbed(nn.Module): def __init__( self, in_channels: int, out_channels: int, patch_size: Tuple[int, int, int], bias: bool = True ) -> None: super().__init__() self.patch_size = patch_size self.proj = nn.Linear(in_channels * patch_size[0] * patch_size[1] * patch_size[2], out_channels, bias=bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.patch_size hidden_states = hidden_states.reshape( batch_size, num_channels, num_frames // p_t, p_t, height // p_h, p_h, width // p_w, p_w ) hidden_states = hidden_states.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7) hidden_states = self.proj(hidden_states) return hidden_states class CosmosTimestepEmbedding(nn.Module): def __init__(self, in_features: int, out_features: int) -> None: super().__init__() self.linear_1 = nn.Linear(in_features, out_features, bias=False) self.activation = nn.SiLU() self.linear_2 = nn.Linear(out_features, 3 * out_features, bias=False) def forward(self, timesteps: torch.Tensor) -> torch.Tensor: emb = self.linear_1(timesteps) emb = self.activation(emb) emb = self.linear_2(emb) return emb class CosmosEmbedding(nn.Module): def __init__(self, embedding_dim: int, condition_dim: int) -> None: super().__init__() self.time_proj = Timesteps(embedding_dim, flip_sin_to_cos=True, downscale_freq_shift=0.0) self.t_embedder = CosmosTimestepEmbedding(embedding_dim, condition_dim) self.norm = RMSNorm(embedding_dim, eps=1e-6, elementwise_affine=True) def forward(self, hidden_states: torch.Tensor, timestep: torch.LongTensor) -> torch.Tensor: timesteps_proj = self.time_proj(timestep).type_as(hidden_states) temb = self.t_embedder(timesteps_proj) embedded_timestep = self.norm(timesteps_proj) return temb, embedded_timestep class CosmosAdaLayerNorm(nn.Module): def __init__(self, in_features: int, hidden_features: int) -> None: super().__init__() self.embedding_dim = in_features self.activation = nn.SiLU() self.norm = nn.LayerNorm(in_features, elementwise_affine=False, eps=1e-6) self.linear_1 = nn.Linear(in_features, hidden_features, bias=False) self.linear_2 = nn.Linear(hidden_features, 2 * in_features, bias=False) def forward( self, hidden_states: torch.Tensor, embedded_timestep: torch.Tensor, temb: Optional[torch.Tensor] = None ) -> torch.Tensor: embedded_timestep = self.activation(embedded_timestep) embedded_timestep = self.linear_1(embedded_timestep) embedded_timestep = self.linear_2(embedded_timestep) if temb is not None: embedded_timestep = embedded_timestep + temb[..., : 2 * self.embedding_dim] shift, scale = embedded_timestep.chunk(2, dim=-1) hidden_states = self.norm(hidden_states) if embedded_timestep.ndim == 2: shift, scale = (x.unsqueeze(1) for x in (shift, scale)) hidden_states = hidden_states * (1 + scale) + shift return hidden_states class CosmosAdaLayerNormZero(nn.Module): def __init__(self, in_features: int, hidden_features: Optional[int] = None) -> None: super().__init__() self.norm = nn.LayerNorm(in_features, elementwise_affine=False, eps=1e-6) self.activation = nn.SiLU() if hidden_features is None: self.linear_1 = nn.Identity() else: self.linear_1 = nn.Linear(in_features, hidden_features, bias=False) self.linear_2 = nn.Linear(hidden_features, 3 * in_features, bias=False) def forward( self, hidden_states: torch.Tensor, embedded_timestep: torch.Tensor, temb: Optional[torch.Tensor] = None, ) -> torch.Tensor: embedded_timestep = self.activation(embedded_timestep) embedded_timestep = self.linear_1(embedded_timestep) embedded_timestep = self.linear_2(embedded_timestep) if temb is not None: embedded_timestep = embedded_timestep + temb shift, scale, gate = embedded_timestep.chunk(3, dim=-1) hidden_states = self.norm(hidden_states) if embedded_timestep.ndim == 2: shift, scale, gate = (x.unsqueeze(1) for x in (shift, scale, gate)) hidden_states = hidden_states * (1 + scale) + shift return hidden_states, gate class CosmosAttnProcessor2_0: def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CosmosAttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: # 1. QKV projections if encoder_hidden_states is None: encoder_hidden_states = hidden_states query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) # 2. QK normalization query = attn.norm_q(query) key = attn.norm_k(key) # 3. Apply RoPE if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb query = apply_rotary_emb(query, image_rotary_emb, use_real=True, use_real_unbind_dim=-2) key = apply_rotary_emb(key, image_rotary_emb, use_real=True, use_real_unbind_dim=-2) # 4. Prepare for GQA if torch.onnx.is_in_onnx_export(): query_idx = torch.tensor(query.size(3), device=query.device) key_idx = torch.tensor(key.size(3), device=key.device) value_idx = torch.tensor(value.size(3), device=value.device) else: query_idx = query.size(3) key_idx = key.size(3) value_idx = value.size(3) key = key.repeat_interleave(query_idx // key_idx, dim=3) value = value.repeat_interleave(query_idx // value_idx, dim=3) # 5. Attention hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).flatten(2, 3).type_as(query) # 6. Output projection hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class CosmosTransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, cross_attention_dim: int, mlp_ratio: float = 4.0, adaln_lora_dim: int = 256, qk_norm: str = "rms_norm", out_bias: bool = False, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = CosmosAdaLayerNormZero(in_features=hidden_size, hidden_features=adaln_lora_dim) self.attn1 = Attention( query_dim=hidden_size, cross_attention_dim=None, heads=num_attention_heads, dim_head=attention_head_dim, qk_norm=qk_norm, elementwise_affine=True, out_bias=out_bias, processor=CosmosAttnProcessor2_0(), ) self.norm2 = CosmosAdaLayerNormZero(in_features=hidden_size, hidden_features=adaln_lora_dim) self.attn2 = Attention( query_dim=hidden_size, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, qk_norm=qk_norm, elementwise_affine=True, out_bias=out_bias, processor=CosmosAttnProcessor2_0(), ) self.norm3 = CosmosAdaLayerNormZero(in_features=hidden_size, hidden_features=adaln_lora_dim) self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu", bias=out_bias) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, embedded_timestep: torch.Tensor, temb: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, extra_pos_emb: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: if extra_pos_emb is not None: hidden_states = hidden_states + extra_pos_emb # 1. Self Attention norm_hidden_states, gate = self.norm1(hidden_states, embedded_timestep, temb) attn_output = self.attn1(norm_hidden_states, image_rotary_emb=image_rotary_emb) hidden_states = hidden_states + gate * attn_output # 2. Cross Attention norm_hidden_states, gate = self.norm2(hidden_states, embedded_timestep, temb) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask ) hidden_states = hidden_states + gate * attn_output # 3. Feed Forward norm_hidden_states, gate = self.norm3(hidden_states, embedded_timestep, temb) ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + gate * ff_output return hidden_states class CosmosRotaryPosEmbed(nn.Module): def __init__( self, hidden_size: int, max_size: Tuple[int, int, int] = (128, 240, 240), patch_size: Tuple[int, int, int] = (1, 2, 2), base_fps: int = 24, rope_scale: Tuple[float, float, float] = (2.0, 1.0, 1.0), ) -> None: super().__init__() self.max_size = [size // patch for size, patch in zip(max_size, patch_size)] self.patch_size = patch_size self.base_fps = base_fps self.dim_h = hidden_size // 6 * 2 self.dim_w = hidden_size // 6 * 2 self.dim_t = hidden_size - self.dim_h - self.dim_w self.h_ntk_factor = rope_scale[1] ** (self.dim_h / (self.dim_h - 2)) self.w_ntk_factor = rope_scale[2] ** (self.dim_w / (self.dim_w - 2)) self.t_ntk_factor = rope_scale[0] ** (self.dim_t / (self.dim_t - 2)) def forward(self, hidden_states: torch.Tensor, fps: Optional[int] = None) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, num_channels, num_frames, height, width = hidden_states.shape pe_size = [num_frames // self.patch_size[0], height // self.patch_size[1], width // self.patch_size[2]] device = hidden_states.device h_theta = 10000.0 * self.h_ntk_factor w_theta = 10000.0 * self.w_ntk_factor t_theta = 10000.0 * self.t_ntk_factor seq = torch.arange(max(self.max_size), device=device, dtype=torch.float32) dim_h_range = ( torch.arange(0, self.dim_h, 2, device=device, dtype=torch.float32)[: (self.dim_h // 2)] / self.dim_h ) dim_w_range = ( torch.arange(0, self.dim_w, 2, device=device, dtype=torch.float32)[: (self.dim_w // 2)] / self.dim_w ) dim_t_range = ( torch.arange(0, self.dim_t, 2, device=device, dtype=torch.float32)[: (self.dim_t // 2)] / self.dim_t ) h_spatial_freqs = 1.0 / (h_theta**dim_h_range) w_spatial_freqs = 1.0 / (w_theta**dim_w_range) temporal_freqs = 1.0 / (t_theta**dim_t_range) emb_h = torch.outer(seq[: pe_size[1]], h_spatial_freqs)[None, :, None, :].repeat(pe_size[0], 1, pe_size[2], 1) emb_w = torch.outer(seq[: pe_size[2]], w_spatial_freqs)[None, None, :, :].repeat(pe_size[0], pe_size[1], 1, 1) # Apply sequence scaling in temporal dimension if fps is None: # Images emb_t = torch.outer(seq[: pe_size[0]], temporal_freqs) else: # Videos emb_t = torch.outer(seq[: pe_size[0]] / fps * self.base_fps, temporal_freqs) emb_t = emb_t[:, None, None, :].repeat(1, pe_size[1], pe_size[2], 1) freqs = torch.cat([emb_t, emb_h, emb_w] * 2, dim=-1).flatten(0, 2).float() cos = torch.cos(freqs) sin = torch.sin(freqs) return cos, sin class CosmosLearnablePositionalEmbed(nn.Module): def __init__( self, hidden_size: int, max_size: Tuple[int, int, int], patch_size: Tuple[int, int, int], eps: float = 1e-6, ) -> None: super().__init__() self.max_size = [size // patch for size, patch in zip(max_size, patch_size)] self.patch_size = patch_size self.eps = eps self.pos_emb_t = nn.Parameter(torch.zeros(self.max_size[0], hidden_size)) self.pos_emb_h = nn.Parameter(torch.zeros(self.max_size[1], hidden_size)) self.pos_emb_w = nn.Parameter(torch.zeros(self.max_size[2], hidden_size)) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape pe_size = [num_frames // self.patch_size[0], height // self.patch_size[1], width // self.patch_size[2]] emb_t = self.pos_emb_t[: pe_size[0]][None, :, None, None, :].repeat(batch_size, 1, pe_size[1], pe_size[2], 1) emb_h = self.pos_emb_h[: pe_size[1]][None, None, :, None, :].repeat(batch_size, pe_size[0], 1, pe_size[2], 1) emb_w = self.pos_emb_w[: pe_size[2]][None, None, None, :, :].repeat(batch_size, pe_size[0], pe_size[1], 1, 1) emb = emb_t + emb_h + emb_w emb = emb.flatten(1, 3) norm = torch.linalg.vector_norm(emb, dim=-1, keepdim=True, dtype=torch.float32) norm = torch.add(self.eps, norm, alpha=np.sqrt(norm.numel() / emb.numel())) return (emb / norm).type_as(hidden_states) class CosmosTransformer3DModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): r""" A Transformer model for video-like data used in [Cosmos](https://github.com/NVIDIA/Cosmos). Args: in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, defaults to `16`): The number of channels in the output. num_attention_heads (`int`, defaults to `32`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `128`): The number of channels in each attention head. num_layers (`int`, defaults to `28`): The number of layers of transformer blocks to use. mlp_ratio (`float`, defaults to `4.0`): The ratio of the hidden layer size to the input size in the feedforward network. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. adaln_lora_dim (`int`, defaults to `256`): The hidden dimension of the Adaptive LayerNorm LoRA layer. max_size (`Tuple[int, int, int]`, defaults to `(128, 240, 240)`): The maximum size of the input latent tensors in the temporal, height, and width dimensions. patch_size (`Tuple[int, int, int]`, defaults to `(1, 2, 2)`): The patch size to use for patchifying the input latent tensors in the temporal, height, and width dimensions. rope_scale (`Tuple[float, float, float]`, defaults to `(2.0, 1.0, 1.0)`): The scaling factor to use for RoPE in the temporal, height, and width dimensions. concat_padding_mask (`bool`, defaults to `True`): Whether to concatenate the padding mask to the input latent tensors. extra_pos_embed_type (`str`, *optional*, defaults to `learnable`): The type of extra positional embeddings to use. Can be one of `None` or `learnable`. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["patch_embed", "final_layer", "norm"] _no_split_modules = ["CosmosTransformerBlock"] _keep_in_fp32_modules = ["learnable_pos_embed"] @register_to_config def __init__( self, in_channels: int = 16, out_channels: int = 16, num_attention_heads: int = 32, attention_head_dim: int = 128, num_layers: int = 28, mlp_ratio: float = 4.0, text_embed_dim: int = 1024, adaln_lora_dim: int = 256, max_size: Tuple[int, int, int] = (128, 240, 240), patch_size: Tuple[int, int, int] = (1, 2, 2), rope_scale: Tuple[float, float, float] = (2.0, 1.0, 1.0), concat_padding_mask: bool = True, extra_pos_embed_type: Optional[str] = "learnable", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim # 1. Patch Embedding patch_embed_in_channels = in_channels + 1 if concat_padding_mask else in_channels self.patch_embed = CosmosPatchEmbed(patch_embed_in_channels, hidden_size, patch_size, bias=False) # 2. Positional Embedding self.rope = CosmosRotaryPosEmbed( hidden_size=attention_head_dim, max_size=max_size, patch_size=patch_size, rope_scale=rope_scale ) self.learnable_pos_embed = None if extra_pos_embed_type == "learnable": self.learnable_pos_embed = CosmosLearnablePositionalEmbed( hidden_size=hidden_size, max_size=max_size, patch_size=patch_size, ) # 3. Time Embedding self.time_embed = CosmosEmbedding(hidden_size, hidden_size) # 4. Transformer Blocks self.transformer_blocks = nn.ModuleList( [ CosmosTransformerBlock( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, cross_attention_dim=text_embed_dim, mlp_ratio=mlp_ratio, adaln_lora_dim=adaln_lora_dim, qk_norm="rms_norm", out_bias=False, ) for _ in range(num_layers) ] ) # 5. Output norm & projection self.norm_out = CosmosAdaLayerNorm(hidden_size, adaln_lora_dim) self.proj_out = nn.Linear( hidden_size, patch_size[0] * patch_size[1] * patch_size[2] * out_channels, bias=False ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, fps: Optional[int] = None, condition_mask: Optional[torch.Tensor] = None, padding_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape # 1. Concatenate padding mask if needed & prepare attention mask if condition_mask is not None: hidden_states = torch.cat([hidden_states, condition_mask], dim=1) if self.config.concat_padding_mask: padding_mask = transforms.functional.resize( padding_mask, list(hidden_states.shape[-2:]), interpolation=transforms.InterpolationMode.NEAREST ) hidden_states = torch.cat( [hidden_states, padding_mask.unsqueeze(2).repeat(batch_size, 1, num_frames, 1, 1)], dim=1 ) if attention_mask is not None: attention_mask = attention_mask.unsqueeze(1).unsqueeze(1) # [B, 1, 1, S] # 2. Generate positional embeddings image_rotary_emb = self.rope(hidden_states, fps=fps) extra_pos_emb = self.learnable_pos_embed(hidden_states) if self.config.extra_pos_embed_type else None # 3. Patchify input p_t, p_h, p_w = self.config.patch_size post_patch_num_frames = num_frames // p_t post_patch_height = height // p_h post_patch_width = width // p_w hidden_states = self.patch_embed(hidden_states) hidden_states = hidden_states.flatten(1, 3) # [B, T, H, W, C] -> [B, THW, C] # 4. Timestep embeddings if timestep.ndim == 1: temb, embedded_timestep = self.time_embed(hidden_states, timestep) elif timestep.ndim == 5: assert timestep.shape == (batch_size, 1, num_frames, 1, 1), ( f"Expected timestep to have shape [B, 1, T, 1, 1], but got {timestep.shape}" ) timestep = timestep.flatten() temb, embedded_timestep = self.time_embed(hidden_states, timestep) # We can do this because num_frames == post_patch_num_frames, as p_t is 1 temb, embedded_timestep = ( x.view(batch_size, post_patch_num_frames, 1, 1, -1) .expand(-1, -1, post_patch_height, post_patch_width, -1) .flatten(1, 3) for x in (temb, embedded_timestep) ) # [BT, C] -> [B, T, 1, 1, C] -> [B, T, H, W, C] -> [B, THW, C] else: assert False # 5. Transformer blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, embedded_timestep, temb, image_rotary_emb, extra_pos_emb, attention_mask, ) else: hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, embedded_timestep=embedded_timestep, temb=temb, image_rotary_emb=image_rotary_emb, extra_pos_emb=extra_pos_emb, attention_mask=attention_mask, ) # 6. Output norm & projection & unpatchify hidden_states = self.norm_out(hidden_states, embedded_timestep, temb) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.unflatten(2, (p_h, p_w, p_t, -1)) hidden_states = hidden_states.unflatten(1, (post_patch_num_frames, post_patch_height, post_patch_width)) # NOTE: The permutation order here is not the inverse operation of what happens when patching as usually expected. # It might be a source of confusion to the reader, but this is correct hidden_states = hidden_states.permute(0, 7, 1, 6, 2, 4, 3, 5) hidden_states = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if not return_dict: return (hidden_states,) return Transformer2DModelOutput(sample=hidden_states)
diffusers/src/diffusers/models/transformers/transformer_cosmos.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_cosmos.py", "repo_id": "diffusers", "token_count": 11273 }
159
from ...utils import is_flax_available, is_torch_available if is_torch_available(): from .unet_1d import UNet1DModel from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel from .unet_3d_condition import UNet3DConditionModel from .unet_i2vgen_xl import I2VGenXLUNet from .unet_kandinsky3 import Kandinsky3UNet from .unet_motion_model import MotionAdapter, UNetMotionModel from .unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel from .unet_stable_cascade import StableCascadeUNet from .uvit_2d import UVit2DModel if is_flax_available(): from .unet_2d_condition_flax import FlaxUNet2DConditionModel
diffusers/src/diffusers/models/unets/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/__init__.py", "repo_id": "diffusers", "token_count": 265 }
160
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from ..utils import deprecate from ..utils.import_utils import is_torch_version from .normalization import RMSNorm class Upsample1D(nn.Module): """A 1D upsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. use_conv_transpose (`bool`, default `False`): option to use a convolution transpose. out_channels (`int`, optional): number of output channels. Defaults to `channels`. name (`str`, default `conv`): name of the upsampling 1D layer. """ def __init__( self, channels: int, use_conv: bool = False, use_conv_transpose: bool = False, out_channels: Optional[int] = None, name: str = "conv", ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name self.conv = None if use_conv_transpose: self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) elif use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) def forward(self, inputs: torch.Tensor) -> torch.Tensor: assert inputs.shape[1] == self.channels if self.use_conv_transpose: return self.conv(inputs) outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest") if self.use_conv: outputs = self.conv(outputs) return outputs class Upsample2D(nn.Module): """A 2D upsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. use_conv_transpose (`bool`, default `False`): option to use a convolution transpose. out_channels (`int`, optional): number of output channels. Defaults to `channels`. name (`str`, default `conv`): name of the upsampling 2D layer. """ def __init__( self, channels: int, use_conv: bool = False, use_conv_transpose: bool = False, out_channels: Optional[int] = None, name: str = "conv", kernel_size: Optional[int] = None, padding=1, norm_type=None, eps=None, elementwise_affine=None, bias=True, interpolate=True, ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name self.interpolate = interpolate if norm_type == "ln_norm": self.norm = nn.LayerNorm(channels, eps, elementwise_affine) elif norm_type == "rms_norm": self.norm = RMSNorm(channels, eps, elementwise_affine) elif norm_type is None: self.norm = None else: raise ValueError(f"unknown norm_type: {norm_type}") conv = None if use_conv_transpose: if kernel_size is None: kernel_size = 4 conv = nn.ConvTranspose2d( channels, self.out_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=bias ) elif use_conv: if kernel_size is None: kernel_size = 3 conv = nn.Conv2d(self.channels, self.out_channels, kernel_size=kernel_size, padding=padding, bias=bias) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if name == "conv": self.conv = conv else: self.Conv2d_0 = conv def forward(self, hidden_states: torch.Tensor, output_size: Optional[int] = None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) assert hidden_states.shape[1] == self.channels if self.norm is not None: hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) if self.use_conv_transpose: return self.conv(hidden_states) # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 until PyTorch 2.1 # https://github.com/pytorch/pytorch/issues/86679#issuecomment-1783978767 dtype = hidden_states.dtype if dtype == torch.bfloat16 and is_torch_version("<", "2.1"): hidden_states = hidden_states.to(torch.float32) # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 if hidden_states.shape[0] >= 64: hidden_states = hidden_states.contiguous() # if `output_size` is passed we force the interpolation output # size and do not make use of `scale_factor=2` if self.interpolate: # upsample_nearest_nhwc also fails when the number of output elements is large # https://github.com/pytorch/pytorch/issues/141831 scale_factor = ( 2 if output_size is None else max([f / s for f, s in zip(output_size, hidden_states.shape[-2:])]) ) if hidden_states.numel() * scale_factor > pow(2, 31): hidden_states = hidden_states.contiguous() if output_size is None: hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest") else: hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest") # Cast back to original dtype if dtype == torch.bfloat16 and is_torch_version("<", "2.1"): hidden_states = hidden_states.to(dtype) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if self.use_conv: if self.name == "conv": hidden_states = self.conv(hidden_states) else: hidden_states = self.Conv2d_0(hidden_states) return hidden_states class FirUpsample2D(nn.Module): """A 2D FIR upsampling layer with an optional convolution. Parameters: channels (`int`, optional): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. fir_kernel (`tuple`, default `(1, 3, 3, 1)`): kernel for the FIR filter. """ def __init__( self, channels: Optional[int] = None, out_channels: Optional[int] = None, use_conv: bool = False, fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1), ): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.use_conv = use_conv self.fir_kernel = fir_kernel self.out_channels = out_channels def _upsample_2d( self, hidden_states: torch.Tensor, weight: Optional[torch.Tensor] = None, kernel: Optional[torch.Tensor] = None, factor: int = 2, gain: float = 1, ) -> torch.Tensor: """Fused `upsample_2d()` followed by `Conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: hidden_states (`torch.Tensor`): Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. weight (`torch.Tensor`, *optional*): Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. kernel (`torch.Tensor`, *optional*): FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor (`int`, *optional*): Integer upsampling factor (default: 2). gain (`float`, *optional*): Scaling factor for signal magnitude (default: 1.0). Returns: output (`torch.Tensor`): Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `hidden_states`. """ assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if kernel is None: kernel = [1] * factor # setup kernel kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * (factor**2)) if self.use_conv: convH = weight.shape[2] convW = weight.shape[3] inC = weight.shape[1] pad_value = (kernel.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. output_shape = ( (hidden_states.shape[2] - 1) * factor + convH, (hidden_states.shape[3] - 1) * factor + convW, ) output_padding = ( output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = hidden_states.shape[1] // inC # Transpose weights. weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW)) inverse_conv = F.conv_transpose2d( hidden_states, weight, stride=stride, output_padding=output_padding, padding=0, ) output = upfirdn2d_native( inverse_conv, torch.tensor(kernel, device=inverse_conv.device), pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1), ) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), ) return output def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.use_conv: height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel) height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) return height class KUpsample2D(nn.Module): r"""A 2D K-upsampling layer. Parameters: pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. """ def __init__(self, pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2 self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode) weight = inputs.new_zeros( [ inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1], ] ) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1) class CogVideoXUpsample3D(nn.Module): r""" A 3D Upsample layer using in CogVideoX by Tsinghua University & ZhipuAI # Todo: Wait for paper release. Args: in_channels (`int`): Number of channels in the input image. out_channels (`int`): Number of channels produced by the convolution. kernel_size (`int`, defaults to `3`): Size of the convolving kernel. stride (`int`, defaults to `1`): Stride of the convolution. padding (`int`, defaults to `1`): Padding added to all four sides of the input. compress_time (`bool`, defaults to `False`): Whether or not to compress the time dimension. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, padding: int = 1, compress_time: bool = False, ) -> None: super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.compress_time = compress_time def forward(self, inputs: torch.Tensor) -> torch.Tensor: if self.compress_time: if inputs.shape[2] > 1 and inputs.shape[2] % 2 == 1: # split first frame x_first, x_rest = inputs[:, :, 0], inputs[:, :, 1:] x_first = F.interpolate(x_first, scale_factor=2.0) x_rest = F.interpolate(x_rest, scale_factor=2.0) x_first = x_first[:, :, None, :, :] inputs = torch.cat([x_first, x_rest], dim=2) elif inputs.shape[2] > 1: inputs = F.interpolate(inputs, scale_factor=2.0) else: inputs = inputs.squeeze(2) inputs = F.interpolate(inputs, scale_factor=2.0) inputs = inputs[:, :, None, :, :] else: # only interpolate 2D b, c, t, h, w = inputs.shape inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) inputs = F.interpolate(inputs, scale_factor=2.0) inputs = inputs.reshape(b, t, c, *inputs.shape[2:]).permute(0, 2, 1, 3, 4) b, c, t, h, w = inputs.shape inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) inputs = self.conv(inputs) inputs = inputs.reshape(b, t, *inputs.shape[1:]).permute(0, 2, 1, 3, 4) return inputs def upfirdn2d_native( tensor: torch.Tensor, kernel: torch.Tensor, up: int = 1, down: int = 1, pad: Tuple[int, int] = (0, 0), ) -> torch.Tensor: up_x = up_y = up down_x = down_y = down pad_x0 = pad_y0 = pad[0] pad_x1 = pad_y1 = pad[1] _, channel, in_h, in_w = tensor.shape tensor = tensor.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = tensor.shape kernel_h, kernel_w = kernel.shape out = tensor.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out.to(tensor.device) # Move back to mps if necessary out = out[ :, max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), :, ] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape( -1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, ) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upsample_2d( hidden_states: torch.Tensor, kernel: Optional[torch.Tensor] = None, factor: int = 2, gain: float = 1, ) -> torch.Tensor: r"""Upsample2D a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a: multiple of the upsampling factor. Args: hidden_states (`torch.Tensor`): Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. kernel (`torch.Tensor`, *optional*): FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor (`int`, *optional*, default to `2`): Integer upsampling factor. gain (`float`, *optional*, default to `1.0`): Scaling factor for signal magnitude (default: 1.0). Returns: output (`torch.Tensor`): Tensor of the shape `[N, C, H * factor, W * factor]` """ assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * (factor**2)) pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, kernel.to(device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), ) return output
diffusers/src/diffusers/models/upsampling.py/0
{ "file_path": "diffusers/src/diffusers/models/upsampling.py", "repo_id": "diffusers", "token_count": 9105 }
161
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, List, Optional, Tuple, Union import PIL import torch from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, ControlNetModel, ControlNetUnionModel, UNet2DConditionModel from ...pipelines.controlnet.multicontrolnet import MultiControlNetModel from ...schedulers import EulerDiscreteScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor, unwrap_module from ..modular_pipeline import ( ModularPipelineBlocks, PipelineState, ) from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam from .modular_pipeline import StableDiffusionXLModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name # TODO(yiyi, aryan): We need another step before text encoder to set the `num_inference_steps` attribute for guider so that # things like when to do guidance and how many conditions to be prepared can be determined. Currently, this is done by # always assuming you want to do guidance in the Guiders. So, negative embeddings are prepared regardless of what the # configuration of guider is. # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def prepare_latents_img2img( vae, scheduler, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}") image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: latents_mean = latents_std = None if hasattr(vae.config, "latents_mean") and vae.config.latents_mean is not None: latents_mean = torch.tensor(vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(vae.config, "latents_std") and vae.config.latents_std is not None: latents_std = torch.tensor(vae.config.latents_std).view(1, 4, 1, 1) # make sure the VAE is in float32 mode, as it overflows in float16 if vae.config.force_upcast: image = image.float() vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " ) init_latents = [ retrieve_latents(vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(vae.encode(image), generator=generator) if vae.config.force_upcast: vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * vae.config.scaling_factor / latents_std else: init_latents = vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents class StableDiffusionXLInputStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def description(self) -> str: return ( "Input processing step that:\n" " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n" " 2. Adjusts input tensor shapes based on `batch_size` (number of prompts) and `num_images_per_prompt`\n\n" "All input tensors are expected to have either batch_size=1 or match the batch_size\n" "of prompt_embeds. The tensors will be duplicated across the batch dimension to\n" "have a final batch_size of batch_size * num_images_per_prompt." ) @property def inputs(self) -> List[InputParam]: return [ InputParam("num_images_per_prompt", default=1), InputParam( "prompt_embeds", required=True, type_hint=torch.Tensor, description="Pre-generated text embeddings. Can be generated from text_encoder step.", ), InputParam( "negative_prompt_embeds", type_hint=torch.Tensor, description="Pre-generated negative text embeddings. Can be generated from text_encoder step.", ), InputParam( "pooled_prompt_embeds", required=True, type_hint=torch.Tensor, description="Pre-generated pooled text embeddings. Can be generated from text_encoder step.", ), InputParam( "negative_pooled_prompt_embeds", description="Pre-generated negative pooled text embeddings. Can be generated from text_encoder step.", ), InputParam( "ip_adapter_embeds", type_hint=List[torch.Tensor], description="Pre-generated image embeddings for IP-Adapter. Can be generated from ip_adapter step.", ), InputParam( "negative_ip_adapter_embeds", type_hint=List[torch.Tensor], description="Pre-generated negative image embeddings for IP-Adapter. Can be generated from ip_adapter step.", ), ] @property def intermediate_outputs(self) -> List[str]: return [ OutputParam( "batch_size", type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt", ), OutputParam( "dtype", type_hint=torch.dtype, description="Data type of model tensor inputs (determined by `prompt_embeds`)", ), OutputParam( "prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields description="text embeddings used to guide the image generation", ), OutputParam( "negative_prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields description="negative text embeddings used to guide the image generation", ), OutputParam( "pooled_prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields description="pooled text embeddings used to guide the image generation", ), OutputParam( "negative_pooled_prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields description="negative pooled text embeddings used to guide the image generation", ), OutputParam( "ip_adapter_embeds", type_hint=List[torch.Tensor], kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields description="image embeddings for IP-Adapter", ), OutputParam( "negative_ip_adapter_embeds", type_hint=List[torch.Tensor], kwargs_type="guider_input_fields", # already in intermedites state but declare here again for guider_input_fields description="negative image embeddings for IP-Adapter", ), ] def check_inputs(self, components, block_state): if block_state.prompt_embeds is not None and block_state.negative_prompt_embeds is not None: if block_state.prompt_embeds.shape != block_state.negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {block_state.prompt_embeds.shape} != `negative_prompt_embeds`" f" {block_state.negative_prompt_embeds.shape}." ) if block_state.prompt_embeds is not None and block_state.pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if block_state.negative_prompt_embeds is not None and block_state.negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if block_state.ip_adapter_embeds is not None and not isinstance(block_state.ip_adapter_embeds, list): raise ValueError("`ip_adapter_embeds` must be a list") if block_state.negative_ip_adapter_embeds is not None and not isinstance( block_state.negative_ip_adapter_embeds, list ): raise ValueError("`negative_ip_adapter_embeds` must be a list") if block_state.ip_adapter_embeds is not None and block_state.negative_ip_adapter_embeds is not None: for i, ip_adapter_embed in enumerate(block_state.ip_adapter_embeds): if ip_adapter_embed.shape != block_state.negative_ip_adapter_embeds[i].shape: raise ValueError( "`ip_adapter_embeds` and `negative_ip_adapter_embeds` must have the same shape when passed directly, but" f" got: `ip_adapter_embeds` {ip_adapter_embed.shape} != `negative_ip_adapter_embeds`" f" {block_state.negative_ip_adapter_embeds[i].shape}." ) @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(components, block_state) block_state.batch_size = block_state.prompt_embeds.shape[0] block_state.dtype = block_state.prompt_embeds.dtype _, seq_len, _ = block_state.prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method block_state.prompt_embeds = block_state.prompt_embeds.repeat(1, block_state.num_images_per_prompt, 1) block_state.prompt_embeds = block_state.prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 ) if block_state.negative_prompt_embeds is not None: _, seq_len, _ = block_state.negative_prompt_embeds.shape block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.repeat( 1, block_state.num_images_per_prompt, 1 ) block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 ) block_state.pooled_prompt_embeds = block_state.pooled_prompt_embeds.repeat( 1, block_state.num_images_per_prompt, 1 ) block_state.pooled_prompt_embeds = block_state.pooled_prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, -1 ) if block_state.negative_pooled_prompt_embeds is not None: block_state.negative_pooled_prompt_embeds = block_state.negative_pooled_prompt_embeds.repeat( 1, block_state.num_images_per_prompt, 1 ) block_state.negative_pooled_prompt_embeds = block_state.negative_pooled_prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, -1 ) if block_state.ip_adapter_embeds is not None: for i, ip_adapter_embed in enumerate(block_state.ip_adapter_embeds): block_state.ip_adapter_embeds[i] = torch.cat( [ip_adapter_embed] * block_state.num_images_per_prompt, dim=0 ) if block_state.negative_ip_adapter_embeds is not None: for i, negative_ip_adapter_embed in enumerate(block_state.negative_ip_adapter_embeds): block_state.negative_ip_adapter_embeds[i] = torch.cat( [negative_ip_adapter_embed] * block_state.num_images_per_prompt, dim=0 ) self.set_block_state(state, block_state) return components, state class StableDiffusionXLImg2ImgSetTimestepsStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ] @property def description(self) -> str: return ( "Step that sets the timesteps for the scheduler and determines the initial noise level (latent_timestep) for image-to-image/inpainting generation.\n" + "The latent_timestep is calculated from the `strength` parameter - higher strength means starting from a noisier version of the input image." ) @property def inputs(self) -> List[InputParam]: return [ InputParam("num_inference_steps", default=50), InputParam("timesteps"), InputParam("sigmas"), InputParam("denoising_end"), InputParam("strength", default=0.3), InputParam("denoising_start"), # YiYi TODO: do we need num_images_per_prompt here? InputParam("num_images_per_prompt", default=1), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt", ), ] @property def intermediate_outputs(self) -> List[str]: return [ OutputParam("timesteps", type_hint=torch.Tensor, description="The timesteps to use for inference"), OutputParam( "num_inference_steps", type_hint=int, description="The number of denoising steps to perform at inference time", ), OutputParam( "latent_timestep", type_hint=torch.Tensor, description="The timestep that represents the initial noise level for image-to-image generation", ), ] @staticmethod # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps with self->components def get_timesteps(components, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = components.scheduler.timesteps[t_start * components.scheduler.order :] if hasattr(components.scheduler, "set_begin_index"): components.scheduler.set_begin_index(t_start * components.scheduler.order) return timesteps, num_inference_steps - t_start else: # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. discrete_timestep_cutoff = int( round( components.scheduler.config.num_train_timesteps - (denoising_start * components.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (components.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if components.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end t_start = len(components.scheduler.timesteps) - num_inference_steps timesteps = components.scheduler.timesteps[t_start:] if hasattr(components.scheduler, "set_begin_index"): components.scheduler.set_begin_index(t_start) return timesteps, num_inference_steps @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.device = components._execution_device block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps( components.scheduler, block_state.num_inference_steps, block_state.device, block_state.timesteps, block_state.sigmas, ) def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 block_state.timesteps, block_state.num_inference_steps = self.get_timesteps( components, block_state.num_inference_steps, block_state.strength, block_state.device, denoising_start=block_state.denoising_start if denoising_value_valid(block_state.denoising_start) else None, ) block_state.latent_timestep = block_state.timesteps[:1].repeat( block_state.batch_size * block_state.num_images_per_prompt ) if ( block_state.denoising_end is not None and isinstance(block_state.denoising_end, float) and block_state.denoising_end > 0 and block_state.denoising_end < 1 ): block_state.discrete_timestep_cutoff = int( round( components.scheduler.config.num_train_timesteps - (block_state.denoising_end * components.scheduler.config.num_train_timesteps) ) ) block_state.num_inference_steps = len( list(filter(lambda ts: ts >= block_state.discrete_timestep_cutoff, block_state.timesteps)) ) block_state.timesteps = block_state.timesteps[: block_state.num_inference_steps] self.set_block_state(state, block_state) return components, state class StableDiffusionXLSetTimestepsStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ] @property def description(self) -> str: return "Step that sets the scheduler's timesteps for inference" @property def inputs(self) -> List[InputParam]: return [ InputParam("num_inference_steps", default=50), InputParam("timesteps"), InputParam("sigmas"), InputParam("denoising_end"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam("timesteps", type_hint=torch.Tensor, description="The timesteps to use for inference"), OutputParam( "num_inference_steps", type_hint=int, description="The number of denoising steps to perform at inference time", ), ] @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.device = components._execution_device block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps( components.scheduler, block_state.num_inference_steps, block_state.device, block_state.timesteps, block_state.sigmas, ) if ( block_state.denoising_end is not None and isinstance(block_state.denoising_end, float) and block_state.denoising_end > 0 and block_state.denoising_end < 1 ): block_state.discrete_timestep_cutoff = int( round( components.scheduler.config.num_train_timesteps - (block_state.denoising_end * components.scheduler.config.num_train_timesteps) ) ) block_state.num_inference_steps = len( list(filter(lambda ts: ts >= block_state.discrete_timestep_cutoff, block_state.timesteps)) ) block_state.timesteps = block_state.timesteps[: block_state.num_inference_steps] self.set_block_state(state, block_state) return components, state class StableDiffusionXLInpaintPrepareLatentsStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ] @property def description(self) -> str: return "Step that prepares the latents for the inpainting process" @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("latents"), InputParam("num_images_per_prompt", default=1), InputParam("denoising_start"), InputParam( "strength", default=0.9999, description="Conceptually, indicates how much to transform the reference `image` (the masked portion of image for inpainting). Must be between 0 and 1. `image` " "will be used as a starting point, adding more noise to it the larger the `strength`. The number of " "denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will " "be maximum and the denoising process will run for the full number of iterations specified in " "`num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of " "`denoising_start` being declared as an integer, the value of `strength` will be ignored.", ), InputParam("generator"), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), InputParam( "latent_timestep", required=True, type_hint=torch.Tensor, description="The timestep that represents the initial noise level for image-to-image/inpainting generation. Can be generated in set_timesteps step.", ), InputParam( "image_latents", required=True, type_hint=torch.Tensor, description="The latents representing the reference image for image-to-image/inpainting generation. Can be generated in vae_encode step.", ), InputParam( "mask", required=True, type_hint=torch.Tensor, description="The mask for the inpainting generation. Can be generated in vae_encode step.", ), InputParam( "masked_image_latents", type_hint=torch.Tensor, description="The masked image latents for the inpainting generation (only for inpainting-specific unet). Can be generated in vae_encode step.", ), InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"), ] @property def intermediate_outputs(self) -> List[str]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process" ), OutputParam( "noise", type_hint=torch.Tensor, description="The noise added to the image latents, used for inpainting generation", ), ] # Modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline._encode_vae_image with self->components # YiYi TODO: update the _encode_vae_image so that we can use #Coped from @staticmethod def _encode_vae_image(components, image: torch.Tensor, generator: torch.Generator): latents_mean = latents_std = None if hasattr(components.vae.config, "latents_mean") and components.vae.config.latents_mean is not None: latents_mean = torch.tensor(components.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(components.vae.config, "latents_std") and components.vae.config.latents_std is not None: latents_std = torch.tensor(components.vae.config.latents_std).view(1, 4, 1, 1) dtype = image.dtype if components.vae.config.force_upcast: image = image.float() components.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(components.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(components.vae.encode(image), generator=generator) if components.vae.config.force_upcast: components.vae.to(dtype) image_latents = image_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=image_latents.device, dtype=dtype) latents_std = latents_std.to(device=image_latents.device, dtype=dtype) image_latents = (image_latents - latents_mean) * components.vae.config.scaling_factor / latents_std else: image_latents = components.vae.config.scaling_factor * image_latents return image_latents # Modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.prepare_latents adding components as first argument def prepare_latents_inpaint( self, components, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, add_noise=True, ): shape = ( batch_size, num_channels_latents, int(height) // components.vae_scale_factor, int(width) // components.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif latents is None and not is_strength_max: image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(components, image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else components.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * components.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * components.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents, noise, image_latents) return outputs # modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.prepare_mask_latents # do not accept do_classifier_free_guidance def prepare_mask_latents( self, components, mask, masked_image, batch_size, height, width, dtype, device, generator ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // components.vae_scale_factor, width // components.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(components, masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype block_state.device = components._execution_device block_state.is_strength_max = block_state.strength == 1.0 # for non-inpainting specific unet, we do not need masked_image_latents if hasattr(components, "unet") and components.unet is not None: if components.unet.config.in_channels == 4: block_state.masked_image_latents = None block_state.add_noise = True if block_state.denoising_start is None else False block_state.height = block_state.image_latents.shape[-2] * components.vae_scale_factor block_state.width = block_state.image_latents.shape[-1] * components.vae_scale_factor block_state.latents, block_state.noise, block_state.image_latents = self.prepare_latents_inpaint( components, block_state.batch_size * block_state.num_images_per_prompt, components.num_channels_latents, block_state.height, block_state.width, block_state.dtype, block_state.device, block_state.generator, block_state.latents, image=block_state.image_latents, timestep=block_state.latent_timestep, is_strength_max=block_state.is_strength_max, add_noise=block_state.add_noise, ) # 7. Prepare mask latent variables block_state.mask, block_state.masked_image_latents = self.prepare_mask_latents( components, block_state.mask, block_state.masked_image_latents, block_state.batch_size * block_state.num_images_per_prompt, block_state.height, block_state.width, block_state.dtype, block_state.device, block_state.generator, ) self.set_block_state(state, block_state) return components, state class StableDiffusionXLImg2ImgPrepareLatentsStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("vae", AutoencoderKL), ComponentSpec("scheduler", EulerDiscreteScheduler), ] @property def description(self) -> str: return "Step that prepares the latents for the image-to-image generation process" @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("latents"), InputParam("num_images_per_prompt", default=1), InputParam("denoising_start"), InputParam("generator"), InputParam( "latent_timestep", required=True, type_hint=torch.Tensor, description="The timestep that represents the initial noise level for image-to-image/inpainting generation. Can be generated in set_timesteps step.", ), InputParam( "image_latents", required=True, type_hint=torch.Tensor, description="The latents representing the reference image for image-to-image/inpainting generation. Can be generated in vae_encode step.", ), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), InputParam("dtype", required=True, type_hint=torch.dtype, description="The dtype of the model inputs"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process" ) ] @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype block_state.device = components._execution_device block_state.add_noise = True if block_state.denoising_start is None else False if block_state.latents is None: block_state.latents = prepare_latents_img2img( components.vae, components.scheduler, block_state.image_latents, block_state.latent_timestep, block_state.batch_size, block_state.num_images_per_prompt, block_state.dtype, block_state.device, block_state.generator, block_state.add_noise, ) self.set_block_state(state, block_state) return components, state class StableDiffusionXLPrepareLatentsStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("scheduler", EulerDiscreteScheduler), ComponentSpec("vae", AutoencoderKL), ] @property def description(self) -> str: return "Prepare latents step that prepares the latents for the text-to-image generation process" @property def inputs(self) -> List[InputParam]: return [ InputParam("height"), InputParam("width"), InputParam("latents"), InputParam("num_images_per_prompt", default=1), InputParam("generator"), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process" ) ] @staticmethod def check_inputs(components, block_state): if ( block_state.height is not None and block_state.height % components.vae_scale_factor != 0 or block_state.width is not None and block_state.width % components.vae_scale_factor != 0 ): raise ValueError( f"`height` and `width` have to be divisible by {components.vae_scale_factor} but are {block_state.height} and {block_state.width}." ) @staticmethod # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with self->comp def prepare_latents(comp, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // comp.vae_scale_factor, int(width) // comp.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * comp.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) if block_state.dtype is None: block_state.dtype = components.vae.dtype block_state.device = components._execution_device self.check_inputs(components, block_state) block_state.height = block_state.height or components.default_sample_size * components.vae_scale_factor block_state.width = block_state.width or components.default_sample_size * components.vae_scale_factor block_state.num_channels_latents = components.num_channels_latents block_state.latents = self.prepare_latents( components, block_state.batch_size * block_state.num_images_per_prompt, block_state.num_channels_latents, block_state.height, block_state.width, block_state.dtype, block_state.device, block_state.generator, block_state.latents, ) self.set_block_state(state, block_state) return components, state class StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_configs(self) -> List[ConfigSpec]: return [ ConfigSpec("requires_aesthetics_score", False), ] @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("unet", UNet2DConditionModel), ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 7.5}), default_creation_method="from_config", ), ] @property def description(self) -> str: return "Step that prepares the additional conditioning for the image-to-image/inpainting generation process" @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("original_size"), InputParam("target_size"), InputParam("negative_original_size"), InputParam("negative_target_size"), InputParam("crops_coords_top_left", default=(0, 0)), InputParam("negative_crops_coords_top_left", default=(0, 0)), InputParam("num_images_per_prompt", default=1), InputParam("aesthetic_score", default=6.0), InputParam("negative_aesthetic_score", default=2.0), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", ), InputParam( "pooled_prompt_embeds", required=True, type_hint=torch.Tensor, description="The pooled prompt embeddings to use for the denoising process (used to determine shapes and dtypes for other additional conditioning inputs). Can be generated in text_encoder step.", ), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "add_time_ids", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="The time ids to condition the denoising process", ), OutputParam( "negative_add_time_ids", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="The negative time ids to condition the denoising process", ), OutputParam("timestep_cond", type_hint=torch.Tensor, description="The timestep cond to use for LCM"), ] @staticmethod # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids with self->components def _get_add_time_ids( components, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None, ): if components.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list( negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) ) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = ( components.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = components.unet.add_embedding.linear_1.in_features if ( expected_add_embed_dim > passed_add_embed_dim and (expected_add_embed_dim - passed_add_embed_dim) == components.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." ) elif ( expected_add_embed_dim < passed_add_embed_dim and (passed_add_embed_dim - expected_add_embed_dim) == components.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." ) elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return add_time_ids, add_neg_time_ids # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.device = components._execution_device block_state.vae_scale_factor = components.vae_scale_factor block_state.height, block_state.width = block_state.latents.shape[-2:] block_state.height = block_state.height * block_state.vae_scale_factor block_state.width = block_state.width * block_state.vae_scale_factor block_state.original_size = block_state.original_size or (block_state.height, block_state.width) block_state.target_size = block_state.target_size or (block_state.height, block_state.width) block_state.text_encoder_projection_dim = int(block_state.pooled_prompt_embeds.shape[-1]) if block_state.negative_original_size is None: block_state.negative_original_size = block_state.original_size if block_state.negative_target_size is None: block_state.negative_target_size = block_state.target_size block_state.add_time_ids, block_state.negative_add_time_ids = self._get_add_time_ids( components, block_state.original_size, block_state.crops_coords_top_left, block_state.target_size, block_state.aesthetic_score, block_state.negative_aesthetic_score, block_state.negative_original_size, block_state.negative_crops_coords_top_left, block_state.negative_target_size, dtype=block_state.pooled_prompt_embeds.dtype, text_encoder_projection_dim=block_state.text_encoder_projection_dim, ) block_state.add_time_ids = block_state.add_time_ids.repeat( block_state.batch_size * block_state.num_images_per_prompt, 1 ).to(device=block_state.device) block_state.negative_add_time_ids = block_state.negative_add_time_ids.repeat( block_state.batch_size * block_state.num_images_per_prompt, 1 ).to(device=block_state.device) # Optionally get Guidance Scale Embedding for LCM block_state.timestep_cond = None if ( hasattr(components, "unet") and components.unet is not None and components.unet.config.time_cond_proj_dim is not None ): # TODO(yiyi, aryan): Ideally, this should be `embedded_guidance_scale` instead of pulling from guider. Guider scales should be different from this! block_state.guidance_scale_tensor = torch.tensor(components.guider.guidance_scale - 1).repeat( block_state.batch_size * block_state.num_images_per_prompt ) block_state.timestep_cond = self.get_guidance_scale_embedding( block_state.guidance_scale_tensor, embedding_dim=components.unet.config.time_cond_proj_dim ).to(device=block_state.device, dtype=block_state.latents.dtype) self.set_block_state(state, block_state) return components, state class StableDiffusionXLPrepareAdditionalConditioningStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def description(self) -> str: return "Step that prepares the additional conditioning for the text-to-image generation process" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("unet", UNet2DConditionModel), ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 7.5}), default_creation_method="from_config", ), ] @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("original_size"), InputParam("target_size"), InputParam("negative_original_size"), InputParam("negative_target_size"), InputParam("crops_coords_top_left", default=(0, 0)), InputParam("negative_crops_coords_top_left", default=(0, 0)), InputParam("num_images_per_prompt", default=1), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", ), InputParam( "pooled_prompt_embeds", required=True, type_hint=torch.Tensor, description="The pooled prompt embeddings to use for the denoising process (used to determine shapes and dtypes for other additional conditioning inputs). Can be generated in text_encoder step.", ), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "add_time_ids", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="The time ids to condition the denoising process", ), OutputParam( "negative_add_time_ids", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="The negative time ids to condition the denoising process", ), OutputParam("timestep_cond", type_hint=torch.Tensor, description="The timestep cond to use for LCM"), ] @staticmethod # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids with self->components def _get_add_time_ids( components, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( components.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = components.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.device = components._execution_device block_state.height, block_state.width = block_state.latents.shape[-2:] block_state.height = block_state.height * components.vae_scale_factor block_state.width = block_state.width * components.vae_scale_factor block_state.original_size = block_state.original_size or (block_state.height, block_state.width) block_state.target_size = block_state.target_size or (block_state.height, block_state.width) block_state.text_encoder_projection_dim = int(block_state.pooled_prompt_embeds.shape[-1]) block_state.add_time_ids = self._get_add_time_ids( components, block_state.original_size, block_state.crops_coords_top_left, block_state.target_size, block_state.pooled_prompt_embeds.dtype, text_encoder_projection_dim=block_state.text_encoder_projection_dim, ) if block_state.negative_original_size is not None and block_state.negative_target_size is not None: block_state.negative_add_time_ids = self._get_add_time_ids( components, block_state.negative_original_size, block_state.negative_crops_coords_top_left, block_state.negative_target_size, block_state.pooled_prompt_embeds.dtype, text_encoder_projection_dim=block_state.text_encoder_projection_dim, ) else: block_state.negative_add_time_ids = block_state.add_time_ids block_state.add_time_ids = block_state.add_time_ids.repeat( block_state.batch_size * block_state.num_images_per_prompt, 1 ).to(device=block_state.device) block_state.negative_add_time_ids = block_state.negative_add_time_ids.repeat( block_state.batch_size * block_state.num_images_per_prompt, 1 ).to(device=block_state.device) # Optionally get Guidance Scale Embedding for LCM block_state.timestep_cond = None if ( hasattr(components, "unet") and components.unet is not None and components.unet.config.time_cond_proj_dim is not None ): # TODO(yiyi, aryan): Ideally, this should be `embedded_guidance_scale` instead of pulling from guider. Guider scales should be different from this! block_state.guidance_scale_tensor = torch.tensor(components.guider.guidance_scale - 1).repeat( block_state.batch_size * block_state.num_images_per_prompt ) block_state.timestep_cond = self.get_guidance_scale_embedding( block_state.guidance_scale_tensor, embedding_dim=components.unet.config.time_cond_proj_dim ).to(device=block_state.device, dtype=block_state.latents.dtype) self.set_block_state(state, block_state) return components, state class StableDiffusionXLControlNetInputStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("controlnet", ControlNetModel), ComponentSpec( "control_image_processor", VaeImageProcessor, config=FrozenDict({"do_convert_rgb": True, "do_normalize": False}), default_creation_method="from_config", ), ] @property def description(self) -> str: return "step that prepare inputs for controlnet" @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("control_image", required=True), InputParam("control_guidance_start", default=0.0), InputParam("control_guidance_end", default=1.0), InputParam("controlnet_conditioning_scale", default=1.0), InputParam("guess_mode", default=False), InputParam("num_images_per_prompt", default=1), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", ), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), InputParam( "timesteps", required=True, type_hint=torch.Tensor, description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", ), InputParam( "crops_coords", type_hint=Optional[Tuple[int]], description="The crop coordinates to use for preprocess/postprocess the image and mask, for inpainting task only. Can be generated in vae_encode step.", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam("controlnet_cond", type_hint=torch.Tensor, description="The processed control image"), OutputParam( "control_guidance_start", type_hint=List[float], description="The controlnet guidance start values" ), OutputParam( "control_guidance_end", type_hint=List[float], description="The controlnet guidance end values" ), OutputParam( "conditioning_scale", type_hint=List[float], description="The controlnet conditioning scale values" ), OutputParam("guess_mode", type_hint=bool, description="Whether guess mode is used"), OutputParam("controlnet_keep", type_hint=List[float], description="The controlnet keep values"), ] # Modified from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image # 1. return image without apply any guidance # 2. add crops_coords and resize_mode to preprocess() @staticmethod def prepare_control_image( components, image, width, height, batch_size, num_images_per_prompt, device, dtype, crops_coords=None, ): if crops_coords is not None: image = components.control_image_processor.preprocess( image, height=height, width=width, crops_coords=crops_coords, resize_mode="fill" ).to(dtype=torch.float32) else: image = components.control_image_processor.preprocess(image, height=height, width=width).to( dtype=torch.float32 ) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) return image @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) # (1) prepare controlnet inputs block_state.device = components._execution_device block_state.height, block_state.width = block_state.latents.shape[-2:] block_state.height = block_state.height * components.vae_scale_factor block_state.width = block_state.width * components.vae_scale_factor controlnet = unwrap_module(components.controlnet) # (1.1) # control_guidance_start/control_guidance_end (align format) if not isinstance(block_state.control_guidance_start, list) and isinstance( block_state.control_guidance_end, list ): block_state.control_guidance_start = len(block_state.control_guidance_end) * [ block_state.control_guidance_start ] elif not isinstance(block_state.control_guidance_end, list) and isinstance( block_state.control_guidance_start, list ): block_state.control_guidance_end = len(block_state.control_guidance_start) * [ block_state.control_guidance_end ] elif not isinstance(block_state.control_guidance_start, list) and not isinstance( block_state.control_guidance_end, list ): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 block_state.control_guidance_start, block_state.control_guidance_end = ( mult * [block_state.control_guidance_start], mult * [block_state.control_guidance_end], ) # (1.2) # controlnet_conditioning_scale (align format) if isinstance(controlnet, MultiControlNetModel) and isinstance( block_state.controlnet_conditioning_scale, float ): block_state.controlnet_conditioning_scale = [block_state.controlnet_conditioning_scale] * len( controlnet.nets ) # (1.3) # global_pool_conditions block_state.global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) # (1.4) # guess_mode block_state.guess_mode = block_state.guess_mode or block_state.global_pool_conditions # (1.5) # control_image if isinstance(controlnet, ControlNetModel): block_state.control_image = self.prepare_control_image( components, image=block_state.control_image, width=block_state.width, height=block_state.height, batch_size=block_state.batch_size * block_state.num_images_per_prompt, num_images_per_prompt=block_state.num_images_per_prompt, device=block_state.device, dtype=controlnet.dtype, crops_coords=block_state.crops_coords, ) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in block_state.control_image: control_image = self.prepare_control_image( components, image=control_image_, width=block_state.width, height=block_state.height, batch_size=block_state.batch_size * block_state.num_images_per_prompt, num_images_per_prompt=block_state.num_images_per_prompt, device=block_state.device, dtype=controlnet.dtype, crops_coords=block_state.crops_coords, ) control_images.append(control_image) block_state.control_image = control_images else: assert False # (1.6) # controlnet_keep block_state.controlnet_keep = [] for i in range(len(block_state.timesteps)): keeps = [ 1.0 - float(i / len(block_state.timesteps) < s or (i + 1) / len(block_state.timesteps) > e) for s, e in zip(block_state.control_guidance_start, block_state.control_guidance_end) ] block_state.controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) block_state.controlnet_cond = block_state.control_image block_state.conditioning_scale = block_state.controlnet_conditioning_scale self.set_block_state(state, block_state) return components, state class StableDiffusionXLControlNetUnionInputStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("controlnet", ControlNetUnionModel), ComponentSpec( "control_image_processor", VaeImageProcessor, config=FrozenDict({"do_convert_rgb": True, "do_normalize": False}), default_creation_method="from_config", ), ] @property def description(self) -> str: return "step that prepares inputs for the ControlNetUnion model" @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("control_image", required=True), InputParam("control_mode", required=True), InputParam("control_guidance_start", default=0.0), InputParam("control_guidance_end", default=1.0), InputParam("controlnet_conditioning_scale", default=1.0), InputParam("guess_mode", default=False), InputParam("num_images_per_prompt", default=1), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The initial latents to use for the denoising process. Used to determine the shape of the control images. Can be generated in prepare_latent step.", ), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), InputParam( "dtype", required=True, type_hint=torch.dtype, description="The dtype of model tensor inputs. Can be generated in input step.", ), InputParam( "timesteps", required=True, type_hint=torch.Tensor, description="The timesteps to use for the denoising process. Needed to determine `controlnet_keep`. Can be generated in set_timesteps step.", ), InputParam( "crops_coords", type_hint=Optional[Tuple[int]], description="The crop coordinates to use for preprocess/postprocess the image and mask, for inpainting task only. Can be generated in vae_encode step.", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam("controlnet_cond", type_hint=List[torch.Tensor], description="The processed control images"), OutputParam( "control_type_idx", type_hint=List[int], description="The control mode indices", kwargs_type="controlnet_kwargs", ), OutputParam( "control_type", type_hint=torch.Tensor, description="The control type tensor that specifies which control type is active", kwargs_type="controlnet_kwargs", ), OutputParam("control_guidance_start", type_hint=float, description="The controlnet guidance start value"), OutputParam("control_guidance_end", type_hint=float, description="The controlnet guidance end value"), OutputParam( "conditioning_scale", type_hint=List[float], description="The controlnet conditioning scale values" ), OutputParam("guess_mode", type_hint=bool, description="Whether guess mode is used"), OutputParam("controlnet_keep", type_hint=List[float], description="The controlnet keep values"), ] # Modified from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image # 1. return image without apply any guidance # 2. add crops_coords and resize_mode to preprocess() @staticmethod def prepare_control_image( components, image, width, height, batch_size, num_images_per_prompt, device, dtype, crops_coords=None, ): if crops_coords is not None: image = components.control_image_processor.preprocess( image, height=height, width=width, crops_coords=crops_coords, resize_mode="fill" ).to(dtype=torch.float32) else: image = components.control_image_processor.preprocess(image, height=height, width=width).to( dtype=torch.float32 ) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) return image @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) controlnet = unwrap_module(components.controlnet) device = components._execution_device dtype = block_state.dtype or components.controlnet.dtype block_state.height, block_state.width = block_state.latents.shape[-2:] block_state.height = block_state.height * components.vae_scale_factor block_state.width = block_state.width * components.vae_scale_factor # control_guidance_start/control_guidance_end (align format) if not isinstance(block_state.control_guidance_start, list) and isinstance( block_state.control_guidance_end, list ): block_state.control_guidance_start = len(block_state.control_guidance_end) * [ block_state.control_guidance_start ] elif not isinstance(block_state.control_guidance_end, list) and isinstance( block_state.control_guidance_start, list ): block_state.control_guidance_end = len(block_state.control_guidance_start) * [ block_state.control_guidance_end ] # guess_mode block_state.global_pool_conditions = controlnet.config.global_pool_conditions block_state.guess_mode = block_state.guess_mode or block_state.global_pool_conditions # control_image if not isinstance(block_state.control_image, list): block_state.control_image = [block_state.control_image] # control_mode if not isinstance(block_state.control_mode, list): block_state.control_mode = [block_state.control_mode] if len(block_state.control_image) != len(block_state.control_mode): raise ValueError("Expected len(control_image) == len(control_type)") # control_type block_state.num_control_type = controlnet.config.num_control_type block_state.control_type = [0 for _ in range(block_state.num_control_type)] for control_idx in block_state.control_mode: block_state.control_type[control_idx] = 1 block_state.control_type = torch.Tensor(block_state.control_type) block_state.control_type = block_state.control_type.reshape(1, -1).to(device, dtype=block_state.dtype) repeat_by = block_state.batch_size * block_state.num_images_per_prompt // block_state.control_type.shape[0] block_state.control_type = block_state.control_type.repeat_interleave(repeat_by, dim=0) # prepare control_image for idx, _ in enumerate(block_state.control_image): block_state.control_image[idx] = self.prepare_control_image( components, image=block_state.control_image[idx], width=block_state.width, height=block_state.height, batch_size=block_state.batch_size * block_state.num_images_per_prompt, num_images_per_prompt=block_state.num_images_per_prompt, device=device, dtype=dtype, crops_coords=block_state.crops_coords, ) block_state.height, block_state.width = block_state.control_image[idx].shape[-2:] # controlnet_keep block_state.controlnet_keep = [] for i in range(len(block_state.timesteps)): block_state.controlnet_keep.append( 1.0 - float( i / len(block_state.timesteps) < block_state.control_guidance_start or (i + 1) / len(block_state.timesteps) > block_state.control_guidance_end ) ) block_state.control_type_idx = block_state.control_mode block_state.controlnet_cond = block_state.control_image block_state.conditioning_scale = block_state.controlnet_conditioning_scale self.set_block_state(state, block_state) return components, state
diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py", "repo_id": "diffusers", "token_count": 38320 }
162