text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
use candle::{DType, IndexOp, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use super::image_encoder::ImageEncoderViT; use super::mask_decoder::MaskDecoder; use super::prompt_encoder::PromptEncoder; use super::tiny_vit::{tiny_vit_5m, TinyViT}; const PROMPT_EMBED_DIM: usize = 256; pub const IMAGE_SIZE: usize = 1024; const VIT_PATCH_SIZE: usize = 16; const PRED_IOU_THRESH: f32 = 0.88; const STABILITY_SCORE_OFFSET: f32 = 1.0; const STABILITY_SCORE_THRESHOLD: f32 = 0.95; const MODEL_MASK_THRESHOLD: f32 = 0.0; const CROP_NMS_THRESH: f32 = 0.7; #[derive(Debug)] enum ImageEncoder { Original(ImageEncoderViT), TinyViT(TinyViT), } impl Module for ImageEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::Original(vit) => vit.forward(xs), Self::TinyViT(vit) => vit.forward(xs), } } } #[derive(Debug)] pub struct Sam { image_encoder: ImageEncoder, prompt_encoder: PromptEncoder, mask_decoder: MaskDecoder, pixel_mean: Tensor, pixel_std: Tensor, } impl Sam { pub fn new( encoder_embed_dim: usize, encoder_depth: usize, encoder_num_heads: usize, encoder_global_attn_indexes: &[usize], vb: VarBuilder, ) -> Result<Self> { let image_embedding_size = IMAGE_SIZE / VIT_PATCH_SIZE; let image_encoder = ImageEncoderViT::new( IMAGE_SIZE, VIT_PATCH_SIZE, 3, encoder_embed_dim, encoder_depth, encoder_num_heads, PROMPT_EMBED_DIM, /* qkv_bias */ true, /* use_rel_pos */ true, /* use_abs_pos */ true, /* window_size */ 14, /* global_attn_indexes */ encoder_global_attn_indexes, vb.pp("image_encoder"), )?; let prompt_encoder = PromptEncoder::new( PROMPT_EMBED_DIM, (image_embedding_size, image_embedding_size), (IMAGE_SIZE, IMAGE_SIZE), 16, vb.pp("prompt_encoder"), )?; let mask_decoder = MaskDecoder::new( PROMPT_EMBED_DIM, /* num_multitask_outputs */ 3, /* iou_head_depth */ 3, /* iou_head_hidden_dim */ 256, vb.pp("mask_decoder"), )?; let pixel_mean = Tensor::new(&[123.675f32, 116.28, 103.53], vb.device())?.reshape((3, 1, 1))?; let pixel_std = Tensor::new(&[58.395f32, 57.12, 57.375], vb.device())?.reshape((3, 1, 1))?; Ok(Self { image_encoder: ImageEncoder::Original(image_encoder), prompt_encoder, mask_decoder, pixel_std, pixel_mean, }) } pub fn new_tiny(vb: VarBuilder) -> Result<Self> { let image_embedding_size = IMAGE_SIZE / VIT_PATCH_SIZE; let image_encoder = tiny_vit_5m(vb.pp("image_encoder"))?; let prompt_encoder = PromptEncoder::new( PROMPT_EMBED_DIM, (image_embedding_size, image_embedding_size), (IMAGE_SIZE, IMAGE_SIZE), 16, vb.pp("prompt_encoder"), )?; let mask_decoder = MaskDecoder::new( PROMPT_EMBED_DIM, /* num_multitask_outputs */ 3, /* iou_head_depth */ 3, /* iou_head_hidden_dim */ 256, vb.pp("mask_decoder"), )?; let pixel_mean = Tensor::new(&[123.675f32, 116.28, 103.53], vb.device())?.reshape((3, 1, 1))?; let pixel_std = Tensor::new(&[58.395f32, 57.12, 57.375], vb.device())?.reshape((3, 1, 1))?; Ok(Self { image_encoder: ImageEncoder::TinyViT(image_encoder), prompt_encoder, mask_decoder, pixel_std, pixel_mean, }) } pub fn embeddings(&self, img: &Tensor) -> Result<Tensor> { let img = self.preprocess(img)?.unsqueeze(0)?; self.image_encoder.forward(&img) } pub fn forward( &self, img: &Tensor, points: &[(f64, f64, bool)], multimask_output: bool, ) -> Result<(Tensor, Tensor)> { let (_c, original_h, original_w) = img.dims3()?; let img = self.preprocess(img)?.unsqueeze(0)?; let img_embeddings = self.image_encoder.forward(&img)?; let (low_res_mask, iou) = self.forward_for_embeddings( &img_embeddings, original_h, original_w, points, multimask_output, )?; let mask = low_res_mask .upsample_nearest2d(IMAGE_SIZE, IMAGE_SIZE)? .get(0)? .i((.., ..original_h, ..original_w))?; Ok((mask, iou)) } /// Generate the mask and IOU predictions from some image embeddings and prompt. /// /// The prompt is specified as a list of points `(x, y, b)`. `x` and `y` are the point /// coordinates (between 0 and 1) and `b` is `true` for points that should be part of the mask /// and `false` for points that should be part of the background and so excluded from the mask. pub fn forward_for_embeddings( &self, img_embeddings: &Tensor, original_h: usize, original_w: usize, points: &[(f64, f64, bool)], multimask_output: bool, ) -> Result<(Tensor, Tensor)> { let image_pe = self.prompt_encoder.get_dense_pe()?; let points = if points.is_empty() { None } else { let n_points = points.len(); let xys = points .iter() .flat_map(|(x, y, _b)| { let x = (*x as f32) * (original_w as f32); let y = (*y as f32) * (original_h as f32); [x, y] }) .collect::<Vec<_>>(); let labels = points .iter() .map(|(_x, _y, b)| if *b { 1f32 } else { 0f32 }) .collect::<Vec<_>>(); let points = Tensor::from_vec(xys, (1, n_points, 2), img_embeddings.device())?; let labels = Tensor::from_vec(labels, (1, n_points), img_embeddings.device())?; Some((points, labels)) }; let points = points.as_ref().map(|xy| (&xy.0, &xy.1)); let (sparse_prompt_embeddings, dense_prompt_embeddings) = self.prompt_encoder.forward(points, None, None)?; self.mask_decoder.forward( img_embeddings, &image_pe, &sparse_prompt_embeddings, &dense_prompt_embeddings, multimask_output, ) } pub fn unpreprocess(&self, img: &Tensor) -> Result<Tensor> { let img = img .broadcast_mul(&self.pixel_std)? .broadcast_add(&self.pixel_mean)?; img.maximum(&img.zeros_like()?)? .minimum(&(img.ones_like()? * 255.)?) } pub fn preprocess(&self, img: &Tensor) -> Result<Tensor> { let (_c, h, w) = img.dims3()?; let img = img .to_dtype(DType::F32)? .broadcast_sub(&self.pixel_mean)? .broadcast_div(&self.pixel_std)?; if h > IMAGE_SIZE || w > IMAGE_SIZE { candle::bail!("image is too large ({w}, {h}), maximum size {IMAGE_SIZE}") } let img = img.pad_with_zeros(1, 0, IMAGE_SIZE - h)?; img.pad_with_zeros(2, 0, IMAGE_SIZE - w) } fn process_crop( &self, img: &Tensor, cb: CropBox, point_grids: &[(f64, f64)], ) -> Result<Vec<crate::object_detection::Bbox<Tensor>>> { // Crop the image and calculate embeddings. let img = img.i((.., cb.y0..cb.y1, cb.x0..cb.x1))?; let img = self.preprocess(&img)?.unsqueeze(0)?; let img_embeddings = self.image_encoder.forward(&img)?; let crop_w = cb.x1 - cb.x0; let crop_h = cb.y1 - cb.y0; // Generate masks for this crop. let image_pe = self.prompt_encoder.get_dense_pe()?; let points = point_grids .iter() .map(|&(x, y)| vec![x as f32 * crop_w as f32, y as f32 * crop_h as f32]) .collect::<Vec<_>>(); let mut bboxes = Vec::new(); for points in points.chunks(64) { // Run the model on this batch. let points_len = points.len(); let in_points = Tensor::new(points.to_vec(), img.device())?.unsqueeze(1)?; let in_labels = Tensor::ones((points_len, 1), DType::F32, img.device())?; let (sparse_prompt_embeddings, dense_prompt_embeddings) = self.prompt_encoder .forward(Some((&in_points, &in_labels)), None, None)?; let (low_res_mask, iou_predictions) = self.mask_decoder.forward( &img_embeddings, &image_pe, &sparse_prompt_embeddings, &dense_prompt_embeddings, /* multimask_output */ true, )?; let low_res_mask = low_res_mask.flatten(0, 1)?; let iou_predictions = iou_predictions.flatten(0, 1)?.to_vec1::<f32>()?; let dev = low_res_mask.device(); for (i, iou) in iou_predictions.iter().enumerate() { // Filter by predicted IoU. if *iou < PRED_IOU_THRESH { continue; } let low_res_mask = low_res_mask.get(i)?; // Calculate stability score. let bound = Tensor::new(MODEL_MASK_THRESHOLD + STABILITY_SCORE_OFFSET, dev)? .broadcast_as(low_res_mask.shape())?; let intersections = low_res_mask .ge(&bound)? .to_dtype(DType::F32)? .sum_all()? .to_vec0::<f32>()?; let bound = Tensor::new(MODEL_MASK_THRESHOLD - STABILITY_SCORE_OFFSET, dev)? .broadcast_as(low_res_mask.shape())?; let unions = low_res_mask .ge(&bound)? .to_dtype(DType::F32)? .sum_all()? .to_vec0::<f32>()?; let stability_score = intersections / unions; if stability_score < STABILITY_SCORE_THRESHOLD { continue; } // Threshold masks and calculate boxes. let low_res_mask = low_res_mask .ge(&Tensor::new(0f32, dev)?.broadcast_as(low_res_mask.shape())?)? .to_dtype(DType::U32)?; let low_res_mask_per_x = low_res_mask.sum(0)?.to_vec1::<u32>()?; let low_res_mask_per_y = low_res_mask.sum(1)?.to_vec1::<u32>()?; let min_max_x = min_max_indexes(&low_res_mask_per_x); let min_max_y = min_max_indexes(&low_res_mask_per_y); if let Some(((x0, x1), (y0, y1))) = min_max_x.zip(min_max_y) { let bbox = crate::object_detection::Bbox { xmin: x0 as f32, ymin: y0 as f32, xmax: x1 as f32, ymax: y1 as f32, confidence: *iou, data: low_res_mask, }; bboxes.push(bbox); } // TODO: // Filter boxes that touch crop boundaries // Compress to RLE. } } let mut bboxes = vec![bboxes]; // Remove duplicates within this crop. crate::object_detection::non_maximum_suppression(&mut bboxes, CROP_NMS_THRESH); // TODO: Return to the original image frame. Ok(bboxes.remove(0)) } pub fn generate_masks( &self, img: &Tensor, points_per_side: usize, crop_n_layer: usize, crop_overlap_ratio: f64, crop_n_points_downscale_factor: usize, ) -> Result<Vec<crate::object_detection::Bbox<Tensor>>> { let (_c, h, w) = img.dims3()?; let point_grids = build_all_layer_point_grids( points_per_side, crop_n_layer, crop_n_points_downscale_factor, ); let crop_boxes = generate_crop_boxes((h, w), crop_n_layer, crop_overlap_ratio); let mut bboxes = Vec::new(); for crop_box in crop_boxes.into_iter() { let layer_idx = crop_box.layer_idx; let b = self.process_crop(img, crop_box, &point_grids[layer_idx])?; bboxes.extend(b) } // TODO: remove duplicates Ok(bboxes) } } // Return the first and last indexes i for which values[i] > 0 fn min_max_indexes(values: &[u32]) -> Option<(usize, usize)> { let (mut min_i, mut max_i) = (usize::MAX, usize::MIN); for (i, &s) in values.iter().enumerate() { if s == 0 { continue; } min_i = usize::min(i, min_i); max_i = usize::max(i, max_i); } if max_i < min_i { None } else { Some((min_i, max_i)) } } #[derive(Debug)] struct CropBox { x0: usize, y0: usize, x1: usize, y1: usize, layer_idx: usize, } impl CropBox { fn new(x0: usize, y0: usize, x1: usize, y1: usize, layer_idx: usize) -> Self { Self { x0, y0, x1, y1, layer_idx, } } } fn generate_crop_boxes( (im_h, im_w): (usize, usize), n_layers: usize, overlap_ratio: f64, ) -> Vec<CropBox> { fn crop_len(orig_len: usize, n_crops: usize, overlap: usize) -> usize { f64::ceil((overlap * (n_crops - 1) + orig_len) as f64 / n_crops as f64) as usize } let short_side = usize::min(im_h, im_w); let mut crop_boxes = Vec::new(); // Original image. crop_boxes.push(CropBox::new(0, 0, im_w, im_h, 0)); for layer_idx in 1..=n_layers { let n_crops_per_side = 1 << layer_idx; let overlap = (overlap_ratio * short_side as f64 * 2. / n_crops_per_side as f64) as usize; let crop_w = crop_len(im_w, n_crops_per_side, overlap); let crop_h = crop_len(im_w, n_crops_per_side, overlap); for i_x in 0..n_crops_per_side { let x0 = (crop_w - overlap) * i_x; for i_y in 0..n_crops_per_side { let y0 = (crop_h - overlap) * i_y; let x1 = usize::min(im_w, x0 + crop_w); let y1 = usize::min(im_h, y0 + crop_h); crop_boxes.push(CropBox::new(x0, y0, x1, y1, layer_idx)); } } } crop_boxes } // Generates a 2D grid of points evenly spaced in [0,1]x[0,1]. fn build_point_grid(n_per_side: usize) -> Vec<(f64, f64)> { let offset = 1f64 / (2 * n_per_side) as f64; let mut points = Vec::with_capacity(n_per_side * n_per_side); for i_x in 0..n_per_side { let x = offset + i_x as f64 / n_per_side as f64; for i_y in 0..n_per_side { let y = offset + i_y as f64 / n_per_side as f64; points.push((x, y)) } } points } fn build_all_layer_point_grids( n_per_side: usize, n_layers: usize, scale_per_layer: usize, ) -> Vec<Vec<(f64, f64)>> { let mut points_by_layer = Vec::with_capacity(n_layers + 1); for i in 0..=n_layers { let n_points = n_per_side / scale_per_layer.pow(i as u32); points_by_layer.push(build_point_grid(n_points)) } points_by_layer }
candle/candle-transformers/src/models/segment_anything/sam.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/sam.rs", "repo_id": "candle", "token_count": 8444 }
43
use crate::models::with_tracing::{linear, linear_no_bias, Linear}; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, LayerNorm, VarBuilder}; use serde::Deserialize; use std::sync::Arc; // https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/configuration_stablelm.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) intermediate_size: usize, pub(crate) hidden_size: usize, pub(crate) num_hidden_layers: usize, pub(crate) num_attention_heads: usize, pub(crate) num_key_value_heads: usize, pub(crate) hidden_act: Activation, pub(crate) partial_rotary_factor: f64, pub(crate) rope_theta: f64, pub(crate) max_position_embeddings: usize, pub(crate) layer_norm_eps: f64, pub(crate) use_cache: bool, #[serde(default)] pub(crate) use_qkv_bias: bool, // Used in StableLM-2 #[serde(default)] pub(crate) use_flash_attn: bool, // Not in config.json } impl Config { pub fn stablelm_3b_4e1t(use_flash_attn: bool) -> Self { Self { vocab_size: 50304, intermediate_size: 6912, hidden_size: 2560, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 32, hidden_act: Activation::Silu, partial_rotary_factor: 0.25, rope_theta: 10_000., max_position_embeddings: 4096, layer_norm_eps: 1e-5, use_qkv_bias: false, use_cache: true, use_flash_attn, } } pub fn head_dim(&self) -> usize { self.hidden_size / self.num_attention_heads } pub fn rotary_ndims(&self) -> usize { (self.head_dim() as f64 * self.partial_rotary_factor) as usize } pub fn num_kv_groups(&self) -> usize { self.num_attention_heads / self.num_key_value_heads } pub fn set_use_flash_attn(&mut self, use_flash_attn: bool) { self.use_flash_attn = use_flash_attn } } #[derive(Debug)] pub(crate) struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let xs = xs.chunk(2, D::Minus1)?; Tensor::cat(&[&xs[1].neg()?, &xs[0]], D::Minus1) } impl RotaryEmbedding { pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.rotary_ndims(); let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } pub(crate) fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[derive(Debug)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, span: tracing::Span, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_cache: bool, rotary_ndims: usize, use_flash_attn: bool, span: tracing::Span, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let head_dim = cfg.head_dim(); let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let linear_layer = if cfg.use_qkv_bias { linear } else { linear_no_bias }; let q_proj = linear_layer(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups: cfg.num_kv_groups(), head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, use_cache: cfg.use_cache, rotary_ndims: cfg.rotary_ndims(), use_flash_attn: cfg.use_flash_attn, span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (rot_ndims, pass_ndims) = (self.rotary_ndims, self.head_dim - self.rotary_ndims); let query_rot = query_states.narrow(D::Minus1, 0, rot_ndims)?; let query_pass = query_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let key_rot = key_states.narrow(D::Minus1, 0, rot_ndims)?; let key_pass = key_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let (query_rot, key_rot) = self.rotary_emb .apply_rotary_emb_qkv(&query_rot, &key_rot, seqlen_offset)?; let query_states = Tensor::cat(&[query_rot, query_pass], D::Minus1)?.contiguous()?; let key_states = Tensor::cat(&[key_rot, key_pass], D::Minus1)?.contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; if self.use_cache { self.kv_cache = Some((key_states.clone(), value_states.clone())); } let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = query_states.transpose(1, 2)?; let k = key_states.transpose(1, 2)?; let v = value_states.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)? } else { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: LayerNorm, post_attention_layernorm: LayerNorm, span: tracing::Span, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = candle_nn::layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("input_layernorm"), )?; let post_attention_layernorm = candle_nn::layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } } #[derive(Debug)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: LayerNorm, lm_head: Linear, device: Device, dtype: DType, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/stable_lm.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_lm.rs", "repo_id": "candle", "token_count": 7540 }
44
use super::common::LayerNormNoWeights; use candle::{Module, Result, Tensor}; use candle_nn::VarBuilder; #[derive(Debug)] pub struct MixingResidualBlock { norm1: LayerNormNoWeights, depthwise_conv: candle_nn::Conv2d, norm2: LayerNormNoWeights, channelwise_lin1: candle_nn::Linear, channelwise_lin2: candle_nn::Linear, gammas: Vec<f32>, } impl MixingResidualBlock { pub fn new(inp: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> { let norm1 = LayerNormNoWeights::new(inp)?; let norm2 = LayerNormNoWeights::new(inp)?; let cfg = candle_nn::Conv2dConfig { groups: inp, ..Default::default() }; let depthwise_conv = candle_nn::conv2d(inp, inp, 3, cfg, vb.pp("depthwise.1"))?; let channelwise_lin1 = candle_nn::linear(inp, embed_dim, vb.pp("channelwise.0"))?; let channelwise_lin2 = candle_nn::linear(embed_dim, inp, vb.pp("channelwise.2"))?; let gammas = vb.get(6, "gammas")?.to_vec1::<f32>()?; Ok(Self { norm1, depthwise_conv, norm2, channelwise_lin1, channelwise_lin2, gammas, }) } } impl Module for MixingResidualBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mods = &self.gammas; let x_temp = xs .permute((0, 2, 3, 1))? .apply(&self.norm1)? .permute((0, 3, 1, 2))? .affine(1. + mods[0] as f64, mods[1] as f64)?; let x_temp = candle_nn::ops::replication_pad2d(&x_temp, 1)?; let xs = (xs + x_temp.apply(&self.depthwise_conv)? * mods[2] as f64)?; let x_temp = xs .permute((0, 2, 3, 1))? .apply(&self.norm2)? .permute((0, 3, 1, 2))? .affine(1. + mods[3] as f64, mods[4] as f64)?; let x_temp = x_temp .permute((0, 2, 3, 1))? .contiguous()? .apply(&self.channelwise_lin1)? .gelu()? .apply(&self.channelwise_lin2)? .permute((0, 3, 1, 2))?; xs + x_temp * mods[5] as f64 } } #[derive(Debug)] pub struct PaellaVQ { in_block_conv: candle_nn::Conv2d, out_block_conv: candle_nn::Conv2d, down_blocks: Vec<(Option<candle_nn::Conv2d>, MixingResidualBlock)>, down_blocks_conv: candle_nn::Conv2d, down_blocks_bn: candle_nn::BatchNorm, up_blocks_conv: candle_nn::Conv2d, up_blocks: Vec<(Vec<MixingResidualBlock>, Option<candle_nn::ConvTranspose2d>)>, } impl PaellaVQ { pub fn new(vb: VarBuilder) -> Result<Self> { const IN_CHANNELS: usize = 3; const OUT_CHANNELS: usize = 3; const LATENT_CHANNELS: usize = 4; const EMBED_DIM: usize = 384; const BOTTLENECK_BLOCKS: usize = 12; const C_LEVELS: [usize; 2] = [EMBED_DIM / 2, EMBED_DIM]; let in_block_conv = candle_nn::conv2d( IN_CHANNELS * 4, C_LEVELS[0], 1, Default::default(), vb.pp("in_block.1"), )?; let out_block_conv = candle_nn::conv2d( C_LEVELS[0], OUT_CHANNELS * 4, 1, Default::default(), vb.pp("out_block.0"), )?; let mut down_blocks = Vec::new(); let vb_d = vb.pp("down_blocks"); let mut d_idx = 0; for (i, &c_level) in C_LEVELS.iter().enumerate() { let conv_block = if i > 0 { let cfg = candle_nn::Conv2dConfig { padding: 1, stride: 2, ..Default::default() }; let block = candle_nn::conv2d(C_LEVELS[i - 1], c_level, 4, cfg, vb_d.pp(d_idx))?; d_idx += 1; Some(block) } else { None }; let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_d.pp(d_idx))?; d_idx += 1; down_blocks.push((conv_block, res_block)) } let vb_d = vb_d.pp(d_idx); let down_blocks_conv = candle_nn::conv2d_no_bias( C_LEVELS[1], LATENT_CHANNELS, 1, Default::default(), vb_d.pp(0), )?; let down_blocks_bn = candle_nn::batch_norm(LATENT_CHANNELS, 1e-5, vb_d.pp(1))?; let mut up_blocks = Vec::new(); let vb_u = vb.pp("up_blocks"); let mut u_idx = 0; let up_blocks_conv = candle_nn::conv2d( LATENT_CHANNELS, C_LEVELS[1], 1, Default::default(), vb_u.pp(u_idx).pp(0), )?; u_idx += 1; for (i, &c_level) in C_LEVELS.iter().rev().enumerate() { let mut res_blocks = Vec::new(); let n_bottleneck_blocks = if i == 0 { BOTTLENECK_BLOCKS } else { 1 }; for _j in 0..n_bottleneck_blocks { let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_u.pp(u_idx))?; u_idx += 1; res_blocks.push(res_block) } let conv_block = if i < C_LEVELS.len() - 1 { let cfg = candle_nn::ConvTranspose2dConfig { padding: 1, stride: 2, ..Default::default() }; let block = candle_nn::conv_transpose2d( c_level, C_LEVELS[C_LEVELS.len() - i - 2], 4, cfg, vb_u.pp(u_idx), )?; u_idx += 1; Some(block) } else { None }; up_blocks.push((res_blocks, conv_block)) } Ok(Self { in_block_conv, down_blocks, down_blocks_conv, down_blocks_bn, up_blocks, up_blocks_conv, out_block_conv, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = candle_nn::ops::pixel_unshuffle(xs, 2)?.apply(&self.in_block_conv)?; for down_block in self.down_blocks.iter() { if let Some(conv) = &down_block.0 { xs = xs.apply(conv)? } xs = xs.apply(&down_block.1)? } xs.apply(&self.down_blocks_conv)? .apply_t(&self.down_blocks_bn, false) } pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { // TODO: quantizer if we want to support `force_not_quantize=False`. let mut xs = xs.apply(&self.up_blocks_conv)?; for up_block in self.up_blocks.iter() { for b in up_block.0.iter() { xs = xs.apply(b)?; } if let Some(conv) = &up_block.1 { xs = xs.apply(conv)? } } xs.apply(&self.out_block_conv)? .apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, 2)) } } impl Module for PaellaVQ { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.decode(&self.encode(xs)?) } }
candle/candle-transformers/src/models/wuerstchen/paella_vq.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/paella_vq.rs", "repo_id": "candle", "token_count": 4078 }
45
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::bert::{BertModel, Config}; use candle_wasm_example_bert::console_log; use tokenizers::{PaddingParams, Tokenizer}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { bert: BertModel, tokenizer: Tokenizer, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load(weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let bert = BertModel::load(vb, &config)?; Ok(Self { bert, tokenizer }) } pub fn get_embeddings(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: Params = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let device = &Device::Cpu; if let Some(pp) = self.tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; self.tokenizer.with_padding(Some(pp)); } let tokens = self .tokenizer .encode_batch(sentences.to_vec(), true) .map_err(|m| JsError::new(&m.to_string()))?; let token_ids: Vec<Tensor> = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Tensor::new(tokens.as_slice(), device) }) .collect::<Result<Vec<_>, _>>()?; let attention_mask: Vec<Tensor> = tokens .iter() .map(|tokens| { let tokens = tokens.get_attention_mask().to_vec(); Tensor::new(tokens.as_slice(), device) }) .collect::<Result<Vec<_>, _>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; let attention_mask = Tensor::stack(&attention_mask, 0)?; let token_type_ids = token_ids.zeros_like()?; console_log!("running inference on batch {:?}", token_ids.shape()); let embeddings = self .bert .forward(&token_ids, &token_type_ids, Some(&attention_mask))?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; let embeddings_data = embeddings.to_vec2()?; Ok(serde_wasm_bindgen::to_value(&Embeddings { data: embeddings_data, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct Embeddings { data: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct Params { sentences: Vec<String>, normalize_embeddings: bool, } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/bert/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/bert/src/bin/m.rs", "repo_id": "candle", "token_count": 1752 }
46
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "llama2c-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Llama2C { static instance = {}; static async getInstance(weightsURL, modelID, tokenizerURL) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), ]); this.instance[modelID] = new Model(weightsArrayU8, tokenizerArrayU8); } return this.instance[modelID]; } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, prompt, temp, top_p, repeatPenalty, seed, maxSeqLen, } = data; try { self.postMessage({ status: "loading", message: "Starting llama2.c" }); const model = await Llama2C.getInstance(weightsURL, modelID, tokenizerURL); self.postMessage({ status: "loading", message: "Initializing model" }); const firstToken = model.init_with_prompt( prompt, temp, top_p, repeatPenalty, seed ); const seq_len = model.get_seq_len(); let sentence = firstToken; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const token = await model.next_token(); const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
candle/candle-wasm-examples/llama2-c/llama2cWorker.js/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/llama2cWorker.js", "repo_id": "candle", "token_count": 1223 }
47
//load Candle Bert Module wasm module let init, ModelConditionalGeneration; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class ConditionalGeneration { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelConditionalGeneration } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelConditionalGeneration } = await import( "./build/m.js" )); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelConditionalGeneration( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, prompt, params } = event.data; let { temperature = 0.0, seed = 299792458, repeat_penalty = 1.1, repeat_last_n = 64, top_p = 1, } = { ...params }; try { self.postMessage({ status: "ready", message: "Starting T5 Conditional Generation", }); const model = await ConditionalGeneration.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "decoding", message: "Decoding Prompt", }); const output = model.decode({ prompt, temperature, seed, top_p, repeat_penalty, repeat_last_n, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js/0
{ "file_path": "candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js", "repo_id": "candle", "token_count": 980 }
48
fn main() { wasm_logger::init(wasm_logger::Config::new(log::Level::Trace)); yew::Renderer::<candle_wasm_example_whisper::App>::new().render(); }
candle/candle-wasm-examples/whisper/src/bin/app.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/app.rs", "repo_id": "candle", "token_count": 67 }
49
- local: index title: 🤗 Chat UI - title: Installation sections: - local: installation/local title: Local - local: installation/spaces title: Spaces - local: installation/docker title: Docker - local: installation/helm title: Helm - title: Configuration sections: - local: configuration/overview title: Overview - local: configuration/theming title: Theming - local: configuration/open-id title: OpenID - local: configuration/web-search title: Web Search - local: configuration/metrics title: Metrics - local: configuration/embeddings title: Text Embedding Models - title: Models sections: - local: configuration/models/overview title: Overview - local: configuration/models/multimodal title: Multimodal - local: configuration/models/tools title: Tools - title: Providers sections: - local: configuration/models/providers/anthropic title: Anthropic - local: configuration/models/providers/aws title: AWS - local: configuration/models/providers/cloudflare title: Cloudflare - local: configuration/models/providers/cohere title: Cohere - local: configuration/models/providers/google title: Google - local: configuration/models/providers/langserve title: Langserve - local: configuration/models/providers/llamacpp title: Llama.cpp - local: configuration/models/providers/ollama title: Ollama - local: configuration/models/providers/openai title: OpenAI - local: configuration/models/providers/tgi title: TGI - local: configuration/common-issues title: Common Issues - title: Developing sections: - local: developing/architecture title: Architecture - local: developing/copy-huggingchat title: Copy HuggingChat
chat-ui/docs/source/_toctree.yml/0
{ "file_path": "chat-ui/docs/source/_toctree.yml", "repo_id": "chat-ui", "token_count": 879 }
50
# Tools Tool calling instructs the model to generate an output matching a user-defined schema, which may be parsed for invoking external tools. The model simply chooses the tools and their parameters. Currently, only `TGI` and `Cohere` with `Command R+` are supported. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-dark.png" height="auto"/> </div> ## TGI Configuration A custom tokenizer is required for prompting the model for generating tool calls, as well as prompting with the results. The expected format for these tools and the resulting tool calls are hard coded for TGI, so it's likely that only the following configuration will work: ```ini MODELS=`[ { "name" : "CohereForAI/c4ai-command-r-plus", "displayName": "Command R+", "description": "Command R+ is Cohere's latest LLM and is the first open weight model to beat GPT4 in the Chatbot Arena!", "tools": true, "tokenizer": "Xenova/c4ai-command-r-v01-tokenizer", "modelUrl": "https://huggingface.co/CohereForAI/c4ai-command-r-plus", "websiteUrl": "https://docs.cohere.com/docs/command-r-plus", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/cohere-logo.png", "parameters": { "stop": ["<|END_OF_TURN_TOKEN|>"], "truncate" : 28672, "max_new_tokens" : 4096, "temperature" : 0.3 } } ]` ``` ## Cohere Configuration The Cohere provider supports the endpoint native method of tool calling. Refer to the `endpoints/cohere` for implementation details. ```ini MODELS=`[ { "name": "command-r-plus", "displayName": "Command R+", "description": "Command R+ is Cohere's latest LLM and is the first open weight model to beat GPT4 in the Chatbot Arena!", "tools": true, "websiteUrl": "https://docs.cohere.com/docs/command-r-plus", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/cohere-logo.png", "endpoints": [{ "type": "cohere", "apiKey": "YOUR_API_KEY" }] } ]` ``` ## Adding Tools Tool implementations are placed in `src/lib/server/tools`, with helpers available for easy integration with HuggingFace Zero GPU spaces. In the future, there may be an OpenAPI interface for adding tools. ## Adding Support for Additional Models The TGI implementation uses a custom tokenizer and hard coded schema for supporting tools. The Cohere implementation, on the other hand, uses the native support in the SDK to emit tool calls. This is the recommended way to add support for more models. Please see the `endpoints/cohere` section of the code for implementation details.
chat-ui/docs/source/configuration/models/tools.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/tools.md", "repo_id": "chat-ui", "token_count": 948 }
51
import readline from "readline"; import minimist from "minimist"; // @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them import { env } from "$env/dynamic/private"; import { faker } from "@faker-js/faker"; import { ObjectId } from "mongodb"; // @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them import { collections } from "$lib/server/database"; import { models } from "../src/lib/server/models.ts"; import type { User } from "../src/lib/types/User"; import type { Assistant } from "../src/lib/types/Assistant"; import type { Conversation } from "../src/lib/types/Conversation"; import type { Settings } from "../src/lib/types/Settings"; import type { CommunityToolDB, ToolLogoColor, ToolLogoIcon } from "../src/lib/types/Tool"; import { defaultEmbeddingModel } from "../src/lib/server/embeddingModels.ts"; import { Message } from "../src/lib/types/Message.ts"; import { addChildren } from "../src/lib/utils/tree/addChildren.ts"; import { generateSearchTokens } from "../src/lib/utils/searchTokens.ts"; const rl = readline.createInterface({ input: process.stdin, output: process.stdout, }); rl.on("close", function () { process.exit(0); }); const possibleFlags = ["reset", "all", "users", "settings", "assistants", "conversations", "tools"]; const argv = minimist(process.argv.slice(2)); const flags = argv["_"].filter((flag) => possibleFlags.includes(flag)); async function generateMessages(preprompt?: string): Promise<Message[]> { const isLinear = faker.datatype.boolean(0.5); const isInterrupted = faker.datatype.boolean(0.05); const messages: Message[] = []; messages.push({ id: crypto.randomUUID(), from: "system", content: preprompt ?? "", createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), }); let isUser = true; let lastId = messages[0].id; if (isLinear) { const convLength = faker.number.int({ min: 1, max: 25 }) * 2; // must always be even for (let i = 0; i < convLength; i++) { lastId = addChildren( { messages, rootMessageId: messages[0].id, }, { from: isUser ? "user" : "assistant", content: faker.lorem.sentence({ min: 10, max: isUser ? 50 : 200, }), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), interrupted: i === convLength - 1 && isInterrupted, }, lastId ); isUser = !isUser; } } else { const convLength = faker.number.int({ min: 2, max: 200 }); for (let i = 0; i < convLength; i++) { addChildren( { messages, rootMessageId: messages[0].id, }, { from: isUser ? "user" : "assistant", content: faker.lorem.sentence({ min: 10, max: isUser ? 50 : 200, }), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), interrupted: i === convLength - 1 && isInterrupted, }, faker.helpers.arrayElement([ messages[0].id, ...messages.filter((m) => m.from === (isUser ? "assistant" : "user")).map((m) => m.id), ]) ); isUser = !isUser; } } return messages; } async function seed() { console.log("Seeding..."); const modelIds = models.map((model) => model.id); if (flags.includes("reset")) { console.log("Starting reset of DB"); await collections.users.deleteMany({}); await collections.settings.deleteMany({}); await collections.assistants.deleteMany({}); await collections.conversations.deleteMany({}); await collections.tools.deleteMany({}); console.log("Reset done"); } if (flags.includes("users") || flags.includes("all")) { console.log("Creating 100 new users"); const newUsers: User[] = Array.from({ length: 100 }, () => ({ _id: new ObjectId(), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), username: faker.internet.userName(), name: faker.person.fullName(), hfUserId: faker.string.alphanumeric(24), avatarUrl: faker.image.avatar(), })); await collections.users.insertMany(newUsers); console.log("Done creating users."); } const users = await collections.users.find().toArray(); if (flags.includes("settings") || flags.includes("all")) { console.log("Updating settings for all users"); users.forEach(async (user) => { const settings: Settings = { userId: user._id, shareConversationsWithModelAuthors: faker.datatype.boolean(0.25), hideEmojiOnSidebar: faker.datatype.boolean(0.25), ethicsModalAcceptedAt: faker.date.recent({ days: 30 }), activeModel: faker.helpers.arrayElement(modelIds), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), customPrompts: {}, assistants: [], }; await collections.settings.updateOne( { userId: user._id }, { $set: { ...settings } }, { upsert: true } ); }); console.log("Done updating settings."); } if (flags.includes("assistants") || flags.includes("all")) { console.log("Creating assistants for all users"); await Promise.all( users.map(async (user) => { const name = faker.animal.insect(); const assistants = faker.helpers.multiple<Assistant>( () => ({ _id: new ObjectId(), name, createdById: user._id, createdByName: user.username, createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), userCount: faker.number.int({ min: 1, max: 100000 }), featured: faker.datatype.boolean(0.25), modelId: faker.helpers.arrayElement(modelIds), description: faker.lorem.sentence(), preprompt: faker.hacker.phrase(), exampleInputs: faker.helpers.multiple(() => faker.lorem.sentence(), { count: faker.number.int({ min: 0, max: 4 }), }), searchTokens: generateSearchTokens(name), last24HoursCount: faker.number.int({ min: 0, max: 1000 }), }), { count: faker.number.int({ min: 3, max: 10 }) } ); await collections.assistants.insertMany(assistants); await collections.settings.updateOne( { userId: user._id }, { $set: { assistants: assistants.map((a) => a._id.toString()) } }, { upsert: true } ); }) ); console.log("Done creating assistants."); } if (flags.includes("conversations") || flags.includes("all")) { console.log("Creating conversations for all users"); await Promise.all( users.map(async (user) => { const conversations = faker.helpers.multiple( async () => { const settings = await collections.settings.findOne<Settings>({ userId: user._id }); const assistantId = settings?.assistants && settings.assistants.length > 0 && faker.datatype.boolean(0.1) ? faker.helpers.arrayElement<ObjectId>(settings.assistants) : undefined; const preprompt = (assistantId ? await collections.assistants .findOne({ _id: assistantId }) .then((assistant: Assistant) => assistant?.preprompt ?? "") : faker.helpers.maybe(() => faker.hacker.phrase(), { probability: 0.5 })) ?? ""; const messages = await generateMessages(preprompt); const conv = { _id: new ObjectId(), userId: user._id, assistantId, preprompt, createdAt: faker.date.recent({ days: 145 }), updatedAt: faker.date.recent({ days: 145 }), model: faker.helpers.arrayElement(modelIds), title: faker.internet.emoji() + " " + faker.hacker.phrase(), embeddingModel: defaultEmbeddingModel.id, messages, rootMessageId: messages[0].id, } satisfies Conversation; return conv; }, { count: faker.number.int({ min: 10, max: 200 }) } ); await collections.conversations.insertMany(await Promise.all(conversations)); }) ); console.log("Done creating conversations."); } // generate Community Tools if (flags.includes("tools") || flags.includes("all")) { const tools = await Promise.all( faker.helpers.multiple( () => { const _id = new ObjectId(); const displayName = faker.company.catchPhrase(); const description = faker.company.catchPhrase(); const color = faker.helpers.arrayElement([ "purple", "blue", "green", "yellow", "red", ]) satisfies ToolLogoColor; const icon = faker.helpers.arrayElement([ "wikis", "tools", "camera", "code", "email", "cloud", "terminal", "game", "chat", "speaker", "video", ]) satisfies ToolLogoIcon; const baseUrl = faker.helpers.arrayElement([ "stabilityai/stable-diffusion-3-medium", "multimodalart/cosxl", "gokaygokay/SD3-Long-Captioner", "xichenhku/MimicBrush", ]); // keep empty for populate for now const user: User = faker.helpers.arrayElement(users); const createdById = user._id; const createdByName = user.username ?? user.name; return { type: "community" as const, _id, createdById, createdByName, displayName, name: displayName.toLowerCase().replace(" ", "_"), endpoint: "/test", description, color, icon, baseUrl, inputs: [], outputPath: null, outputType: "str" as const, showOutput: false, useCount: faker.number.int({ min: 0, max: 100000 }), last24HoursUseCount: faker.number.int({ min: 0, max: 1000 }), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), searchTokens: generateSearchTokens(displayName), featured: faker.datatype.boolean(), }; }, { count: faker.number.int({ min: 10, max: 200 }) } ) ); await collections.tools.insertMany(tools satisfies CommunityToolDB[]); } } // run seed (async () => { try { rl.question( "You're about to run a seeding script on the following MONGODB_URL: \x1b[31m" + env.MONGODB_URL + "\x1b[0m\n\n With the following flags: \x1b[31m" + flags.join("\x1b[0m , \x1b[31m") + "\x1b[0m\n \n\n Are you sure you want to continue? (yes/no): ", async (confirm) => { if (confirm !== "yes") { console.log("Not 'yes', exiting."); rl.close(); process.exit(0); } console.log("Starting seeding..."); await seed(); console.log("Seeding done."); rl.close(); } ); } catch (e) { console.error(e); process.exit(1); } })();
chat-ui/scripts/populate.ts/0
{ "file_path": "chat-ui/scripts/populate.ts", "repo_id": "chat-ui", "token_count": 4379 }
52
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/stores"; import { env as envPublic } from "$env/dynamic/public"; import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte"; import Modal from "$lib/components/Modal.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled"; import Logo from "./icons/Logo.svelte"; const settings = useSettingsStore(); </script> <Modal> <div class="from-primary-500/40 via-primary-500/10 to-primary-500/0 flex w-full flex-col items-center gap-6 bg-gradient-to-b px-5 pb-8 pt-9 text-center sm:px-6" > <h2 class="flex items-center text-2xl font-semibold text-gray-800"> <Logo classNames="mr-1" /> {envPublic.PUBLIC_APP_NAME} </h2> <p class="text-lg font-semibold leading-snug text-gray-800" style="text-wrap: balance;"> {envPublic.PUBLIC_APP_DESCRIPTION} </p> <p class="text-sm text-gray-500"> {envPublic.PUBLIC_APP_DISCLAIMER_MESSAGE} </p> <div class="flex w-full flex-col items-center gap-2"> {#if $page.data.guestMode || !$page.data.loginEnabled} <button class="w-full justify-center rounded-full border-2 border-gray-300 bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" class:bg-white={$page.data.loginEnabled} class:text-gray-800={$page.data.loginEnabled} class:hover:bg-slate-100={$page.data.loginEnabled} on:click|preventDefault|stopPropagation={() => { if (!cookiesAreEnabled()) { window.open(window.location.href, "_blank"); } $settings.ethicsModalAccepted = true; }} > {#if $page.data.loginEnabled} Try as guest {:else} Start chatting {/if} </button> {/if} {#if $page.data.loginEnabled} <form action="{base}/login" target="_parent" method="POST" class="w-full"> <button type="submit" class="flex w-full flex-wrap items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" > Sign in {#if envPublic.PUBLIC_APP_NAME === "HuggingChat"} <span class="flex items-center"> &nbsp;with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5 flex-none" /> Hugging Face </span> {/if} </button> </form> {/if} </div> </div> </Modal>
chat-ui/src/lib/components/DisclaimerModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/DisclaimerModal.svelte", "repo_id": "chat-ui", "token_count": 1074 }
53
<script lang="ts"> export let checked: boolean; export let name: string; </script> <input bind:checked type="checkbox" {name} class="peer pointer-events-none absolute opacity-0" /> <div aria-checked={checked} aria-roledescription="switch" aria-label="switch" role="switch" tabindex="0" class="relative inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full bg-gray-300 p-1 shadow-inner ring-gray-400 transition-all peer-checked:bg-blue-600 peer-focus-visible:ring peer-focus-visible:ring-offset-1 hover:bg-gray-400 dark:bg-gray-600 peer-checked:[&>div]:translate-x-3.5" > <div class="h-3.5 w-3.5 rounded-full bg-white shadow-sm transition-all" /> </div>
chat-ui/src/lib/components/Switch.svelte/0
{ "file_path": "chat-ui/src/lib/components/Switch.svelte", "repo_id": "chat-ui", "token_count": 239 }
54
<script lang="ts"> import { MessageToolUpdateType, type MessageToolUpdate } from "$lib/types/MessageUpdate"; import { isMessageToolCallUpdate, isMessageToolErrorUpdate, isMessageToolResultUpdate, } from "$lib/utils/messageUpdates"; import CarbonTools from "~icons/carbon/tools"; import { ToolResultStatus, type ToolFront } from "$lib/types/Tool"; import { page } from "$app/stores"; import { onDestroy } from "svelte"; import { browser } from "$app/environment"; export let tool: MessageToolUpdate[]; export let loading: boolean = false; const toolFnName = tool.find(isMessageToolCallUpdate)?.call.name; $: toolError = tool.some(isMessageToolErrorUpdate); $: toolDone = tool.some(isMessageToolResultUpdate); $: eta = tool.find((el) => el.subtype === MessageToolUpdateType.ETA)?.eta; const availableTools: ToolFront[] = $page.data.tools; let loadingBarEl: HTMLDivElement; let animation: Animation | undefined = undefined; let isShowingLoadingBar = false; $: !toolError && !toolDone && loading && loadingBarEl && eta && (() => { loadingBarEl.classList.remove("hidden"); isShowingLoadingBar = true; animation = loadingBarEl.animate([{ width: "0%" }, { width: "calc(100%+1rem)" }], { duration: eta * 1000, fill: "forwards", }); })(); onDestroy(() => { if (animation) { animation.cancel(); } }); // go to 100% quickly if loading is done $: (!loading || toolDone || toolError) && browser && loadingBarEl && isShowingLoadingBar && (() => { isShowingLoadingBar = false; loadingBarEl.classList.remove("hidden"); animation?.cancel(); animation = loadingBarEl.animate( [{ width: loadingBarEl.style.width }, { width: "calc(100%+1rem)" }], { duration: 300, fill: "forwards", } ); setTimeout(() => { loadingBarEl.classList.add("hidden"); }, 300); })(); </script> {#if toolFnName && toolFnName !== "websearch"} <details class="group/tool my-2.5 w-fit cursor-pointer rounded-lg border border-gray-200 bg-white pl-1 pr-2.5 text-sm shadow-sm transition-all open:mb-3 open:border-purple-500/10 open:bg-purple-600/5 open:shadow-sm dark:border-gray-800 dark:bg-gray-900 open:dark:border-purple-800/40 open:dark:bg-purple-800/10" > <summary class="relative flex select-none list-none items-center gap-1.5 py-1 group-open/tool:text-purple-700 group-open/tool:dark:text-purple-300" > <div bind:this={loadingBarEl} class="absolute -m-1 hidden h-full w-[calc(100%+1rem)] rounded-lg bg-purple-500/5 transition-all dark:bg-purple-500/10" /> <div class="relative grid size-[22px] place-items-center rounded bg-purple-600/10 dark:bg-purple-600/20" > <svg class="absolute inset-0 text-purple-500/40 transition-opacity" class:invisible={toolDone || toolError} width="22" height="22" viewBox="0 0 38 38" fill="none" xmlns="http://www.w3.org/2000/svg" > <path class="loading-path" d="M8 2.5H30C30 2.5 35.5 2.5 35.5 8V30C35.5 30 35.5 35.5 30 35.5H8C8 35.5 2.5 35.5 2.5 30V8C2.5 8 2.5 2.5 8 2.5Z" stroke="currentColor" stroke-width="1" stroke-linecap="round" id="shape" /> </svg> <CarbonTools class="text-xs text-purple-700 dark:text-purple-500" /> </div> <span> {toolError ? "Error calling" : toolDone ? "Called" : "Calling"} tool <span class="font-semibold" >{availableTools.find((tool) => tool.name === toolFnName)?.displayName ?? toolFnName}</span > </span> </summary> {#each tool as toolUpdate} {#if toolUpdate.subtype === MessageToolUpdateType.Call} <div class="mt-1 flex items-center gap-2 opacity-80"> <h3 class="text-sm">Parameters</h3> <div class="h-px flex-1 bg-gradient-to-r from-gray-500/20" /> </div> <ul class="py-1 text-sm"> {#each Object.entries(toolUpdate.call.parameters ?? {}) as [k, v]} {#if v !== null} <li> <span class="font-semibold">{k}</span>: <span>{v}</span> </li> {/if} {/each} </ul> {:else if toolUpdate.subtype === MessageToolUpdateType.Error} <div class="mt-1 flex items-center gap-2 opacity-80"> <h3 class="text-sm">Error</h3> <div class="h-px flex-1 bg-gradient-to-r from-gray-500/20" /> </div> <p class="text-sm">{toolUpdate.message}</p> {:else if isMessageToolResultUpdate(toolUpdate) && toolUpdate.result.status === ToolResultStatus.Success && toolUpdate.result.display} <div class="mt-1 flex items-center gap-2 opacity-80"> <h3 class="text-sm">Result</h3> <div class="h-px flex-1 bg-gradient-to-r from-gray-500/20" /> </div> <ul class="py-1 text-sm"> {#each toolUpdate.result.outputs as output} {#each Object.entries(output) as [k, v]} {#if v !== null} <li> <span class="font-semibold">{k}</span>: <span>{v}</span> </li> {/if} {/each} {/each} </ul> {/if} {/each} </details> {/if} <style> details summary::-webkit-details-marker { display: none; } .loading-path { stroke-dasharray: 61.45; animation: loading 2s linear infinite; } </style>
chat-ui/src/lib/components/chat/ToolUpdate.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ToolUpdate.svelte", "repo_id": "chat-ui", "token_count": 2271 }
55
import { afterEach, assert, describe, expect, it } from "vitest"; import { migrations } from "./routines"; import { acquireLock, isDBLocked, refreshLock, releaseLock } from "./lock"; import { collections } from "$lib/server/database"; const LOCK_KEY = "migrations.test"; describe("migrations", () => { it("should not have duplicates guid", async () => { const guids = migrations.map((m) => m._id.toString()); const uniqueGuids = [...new Set(guids)]; expect(uniqueGuids.length).toBe(guids.length); }); it("should acquire only one lock on DB", async () => { const results = await Promise.all(new Array(1000).fill(0).map(() => acquireLock(LOCK_KEY))); const locks = results.filter((r) => r); const semaphores = await collections.semaphores.find({}).toArray(); expect(locks.length).toBe(1); expect(semaphores).toBeDefined(); expect(semaphores.length).toBe(1); expect(semaphores?.[0].key).toBe(LOCK_KEY); }); it("should read the lock correctly", async () => { const lockId = await acquireLock(LOCK_KEY); assert(lockId); expect(await isDBLocked(LOCK_KEY)).toBe(true); expect(!!(await acquireLock(LOCK_KEY))).toBe(false); await releaseLock(LOCK_KEY, lockId); expect(await isDBLocked(LOCK_KEY)).toBe(false); }); it("should refresh the lock", async () => { const lockId = await acquireLock(LOCK_KEY); assert(lockId); // get the updatedAt time const updatedAtInitially = (await collections.semaphores.findOne({}))?.updatedAt; await refreshLock(LOCK_KEY, lockId); const updatedAtAfterRefresh = (await collections.semaphores.findOne({}))?.updatedAt; expect(updatedAtInitially).toBeDefined(); expect(updatedAtAfterRefresh).toBeDefined(); expect(updatedAtInitially).not.toBe(updatedAtAfterRefresh); }); }); afterEach(async () => { await collections.semaphores.deleteMany({}); await collections.migrationResults.deleteMany({}); });
chat-ui/src/lib/migrations/migrations.spec.ts/0
{ "file_path": "chat-ui/src/lib/migrations/migrations.spec.ts", "repo_id": "chat-ui", "token_count": 665 }
56
import { z } from "zod"; import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints"; import { chunk } from "$lib/utils/chunk"; import { env } from "$env/dynamic/private"; import { logger } from "$lib/server/logger"; export const embeddingEndpointTeiParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("tei"), url: z.string().url(), authorization: z .string() .optional() .transform((v) => (!v && env.HF_TOKEN ? "Bearer " + env.HF_TOKEN : v)), // if the header is not set but HF_TOKEN is, use it as the authorization header }); const getModelInfoByUrl = async (url: string, authorization?: string) => { const { origin } = new URL(url); const response = await fetch(`${origin}/info`, { headers: { Accept: "application/json", "Content-Type": "application/json", ...(authorization ? { Authorization: authorization } : {}), }, }); try { const json = await response.json(); return { max_client_batch_size: 32, max_batch_tokens: 16384, ...json }; } catch { logger.debug("Could not get info from TEI embedding endpoint. Using defaults."); return { max_client_batch_size: 32, max_batch_tokens: 16384 }; } }; export async function embeddingEndpointTei( input: z.input<typeof embeddingEndpointTeiParametersSchema> ): Promise<EmbeddingEndpoint> { const { url, model, authorization } = embeddingEndpointTeiParametersSchema.parse(input); const { max_client_batch_size, max_batch_tokens } = await getModelInfoByUrl(url); const maxBatchSize = Math.min( max_client_batch_size, Math.floor(max_batch_tokens / model.chunkCharLength) ); return async ({ inputs }) => { const { origin } = new URL(url); const batchesInputs = chunk(inputs, maxBatchSize); const batchesResults = await Promise.all( batchesInputs.map(async (batchInputs) => { const response = await fetch(`${origin}/embed`, { method: "POST", headers: { Accept: "application/json", "Content-Type": "application/json", ...(authorization ? { Authorization: authorization } : {}), }, body: JSON.stringify({ inputs: batchInputs, normalize: true, truncate: true }), }); const embeddings: Embedding[] = await response.json(); return embeddings; }) ); const flatAllEmbeddings = batchesResults.flat(); return flatAllEmbeddings; }; }
chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 838 }
57
import { buildPrompt } from "$lib/buildPrompt"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { z } from "zod"; export const endpointOllamaParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("ollama"), url: z.string().url().default("http://127.0.0.1:11434"), ollamaName: z.string().min(1).optional(), }); export function endpointOllama(input: z.input<typeof endpointOllamaParametersSchema>): Endpoint { const { url, model, ollamaName } = endpointOllamaParametersSchema.parse(input); return async ({ messages, preprompt, continueMessage, generateSettings }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const parameters = { ...model.parameters, ...generateSettings }; const requestInfo = await fetch(`${url}/api/tags`, { method: "GET", headers: { "Content-Type": "application/json", }, }); const tags = await requestInfo.json(); if (!tags.models.some((m: { name: string }) => m.name === ollamaName)) { // if its not in the tags, pull but dont wait for the answer fetch(`${url}/api/pull`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ name: ollamaName ?? model.name, stream: false, }), }); throw new Error("Currently pulling model from Ollama, please try again later."); } const r = await fetch(`${url}/api/generate`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ prompt, model: ollamaName ?? model.name, raw: true, options: { top_p: parameters.top_p, top_k: parameters.top_k, temperature: parameters.temperature, repeat_penalty: parameters.repetition_penalty, stop: parameters.stop, num_predict: parameters.max_new_tokens, }, }), }); if (!r.ok) { throw new Error(`Failed to generate text: ${await r.text()}`); } const encoder = new TextDecoderStream(); const reader = r.body?.pipeThrough(encoder).getReader(); return (async function* () { let generatedText = ""; let tokenId = 0; let stop = false; while (!stop) { // read the stream and log the outputs to console const out = (await reader?.read()) ?? { done: false, value: undefined }; // we read, if it's done we cancel if (out.done) { reader?.cancel(); return; } if (!out.value) { return; } let data = null; try { data = JSON.parse(out.value); } catch (e) { return; } if (!data.done) { generatedText += data.response; yield { token: { id: tokenId++, text: data.response ?? "", logprob: 0, special: false, }, generated_text: null, details: null, } satisfies TextGenerationStreamOutput; } else { stop = true; yield { token: { id: tokenId++, text: data.response ?? "", logprob: 0, special: true, }, generated_text: generatedText, details: null, } satisfies TextGenerationStreamOutput; } } })(); }; } export default endpointOllama;
chat-ui/src/lib/server/endpoints/ollama/endpointOllama.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/ollama/endpointOllama.ts", "repo_id": "chat-ui", "token_count": 1380 }
58
import { isURLLocal } from "../isURLLocal"; import { env } from "$env/dynamic/private"; import { collections } from "$lib/server/database"; import type { Assistant } from "$lib/types/Assistant"; import type { ObjectId } from "mongodb"; export async function processPreprompt(preprompt: string) { const urlRegex = /{{\s?url=(.*?)\s?}}/g; for (const match of preprompt.matchAll(urlRegex)) { try { const url = new URL(match[1]); if ((await isURLLocal(url)) && env.ENABLE_LOCAL_FETCH !== "true") { throw new Error("URL couldn't be fetched, it resolved to a local address."); } const res = await fetch(url.href); if (!res.ok) { throw new Error("URL couldn't be fetched, error " + res.status); } const text = await res.text(); preprompt = preprompt.replaceAll(match[0], text); } catch (e) { preprompt = preprompt.replaceAll(match[0], (e as Error).message); } } return preprompt; } export async function getAssistantById(id?: ObjectId) { return collections.assistants .findOne<Pick<Assistant, "rag" | "dynamicPrompt" | "generateSettings" | "tools">>( { _id: id }, { projection: { rag: 1, dynamicPrompt: 1, generateSettings: 1, tools: 1 } } ) .then((a) => a ?? undefined); } export function assistantHasWebSearch(assistant?: Pick<Assistant, "rag"> | null) { return ( env.ENABLE_ASSISTANTS_RAG === "true" && !!assistant?.rag && (assistant.rag.allowedLinks.length > 0 || assistant.rag.allowedDomains.length > 0 || assistant.rag.allowAllDomains) ); } export function assistantHasDynamicPrompt(assistant?: Pick<Assistant, "dynamicPrompt">) { return env.ENABLE_ASSISTANTS_RAG === "true" && Boolean(assistant?.dynamicPrompt); }
chat-ui/src/lib/server/textGeneration/assistant.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/assistant.ts", "repo_id": "chat-ui", "token_count": 621 }
59
import type { MarkdownElement } from "../markdown/types"; export function flattenTree(elem: MarkdownElement): MarkdownElement[] { if ("children" in elem) return [elem, ...elem.children.flatMap(flattenTree)]; return [elem]; }
chat-ui/src/lib/server/websearch/embed/tree.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/embed/tree.ts", "repo_id": "chat-ui", "token_count": 74 }
60
import { env } from "$env/dynamic/private"; import { getJson, type GoogleParameters } from "serpapi"; import type { WebSearchSource } from "$lib/types/WebSearch"; import { isURL } from "$lib/utils/isUrl"; type SerpApiResponse = { organic_results: { link: string; }[]; }; export default async function searchWebSerpApi(query: string): Promise<WebSearchSource[]> { const params = { q: query, hl: "en", gl: "us", google_domain: "google.com", api_key: env.SERPAPI_KEY, } satisfies GoogleParameters; // Show result as JSON const response = (await getJson("google", params)) as unknown as SerpApiResponse; return response.organic_results.filter(({ link }) => isURL(link)); }
chat-ui/src/lib/server/websearch/search/endpoints/serpApi.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serpApi.ts", "repo_id": "chat-ui", "token_count": 236 }
61
export function switchTheme() { const { classList } = document.querySelector("html") as HTMLElement; const metaTheme = document.querySelector('meta[name="theme-color"]') as HTMLMetaElement; if (classList.contains("dark")) { classList.remove("dark"); metaTheme.setAttribute("content", "rgb(249, 250, 251)"); localStorage.theme = "light"; } else { classList.add("dark"); metaTheme.setAttribute("content", "rgb(26, 36, 50)"); localStorage.theme = "dark"; } }
chat-ui/src/lib/switchTheme.ts/0
{ "file_path": "chat-ui/src/lib/switchTheme.ts", "repo_id": "chat-ui", "token_count": 164 }
62
import type { Conversation } from "./Conversation"; export type SharedConversation = Pick< Conversation, | "model" | "embeddingModel" | "title" | "rootMessageId" | "messages" | "preprompt" | "assistantId" | "createdAt" | "updatedAt" > & { _id: string; hash: string; };
chat-ui/src/lib/types/SharedConversation.ts/0
{ "file_path": "chat-ui/src/lib/types/SharedConversation.ts", "repo_id": "chat-ui", "token_count": 114 }
63
import { base } from "$app/paths"; import { env as envPublic } from "$env/dynamic/public"; export function getShareUrl(url: URL, shareId: string): string { return `${ envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || url.origin}${base}` }/r/${shareId}`; }
chat-ui/src/lib/utils/getShareUrl.ts/0
{ "file_path": "chat-ui/src/lib/utils/getShareUrl.ts", "repo_id": "chat-ui", "token_count": 102 }
64
import type { Message } from "$lib/types/Message"; import Handlebars from "handlebars"; Handlebars.registerHelper("ifUser", function (this: Pick<Message, "from" | "content">, options) { if (this.from == "user") return options.fn(this); }); Handlebars.registerHelper( "ifAssistant", function (this: Pick<Message, "from" | "content">, options) { if (this.from == "assistant") return options.fn(this); } ); export function compileTemplate<T>(input: string, model: { preprompt: string }) { const template = Handlebars.compile<T>(input, { knownHelpers: { ifUser: true, ifAssistant: true }, knownHelpersOnly: true, noEscape: true, strict: true, preventIndent: true, }); return function render(inputs: T, options?: RuntimeOptions) { return template({ ...model, ...inputs }, options); }; }
chat-ui/src/lib/utils/template.ts/0
{ "file_path": "chat-ui/src/lib/utils/template.ts", "repo_id": "chat-ui", "token_count": 266 }
65
<script lang="ts"> import "../styles/main.css"; import { onDestroy, onMount } from "svelte"; import { goto, invalidate } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/stores"; import { env as envPublic } from "$env/dynamic/public"; import { error } from "$lib/stores/errors"; import { createSettingsStore } from "$lib/stores/settings"; import { shareConversation } from "$lib/shareConversation"; import { UrlDependency } from "$lib/types/UrlDependency"; import Toast from "$lib/components/Toast.svelte"; import NavMenu from "$lib/components/NavMenu.svelte"; import MobileNav from "$lib/components/MobileNav.svelte"; import titleUpdate from "$lib/stores/titleUpdate"; import DisclaimerModal from "$lib/components/DisclaimerModal.svelte"; import ExpandNavigation from "$lib/components/ExpandNavigation.svelte"; import { PUBLIC_APP_DISCLAIMER } from "$env/static/public"; export let data; let isNavOpen = false; let isNavCollapsed = false; let errorToastTimeout: ReturnType<typeof setTimeout>; let currentError: string | null; async function onError() { // If a new different error comes, wait for the current error to hide first if ($error && currentError && $error !== currentError) { clearTimeout(errorToastTimeout); currentError = null; await new Promise((resolve) => setTimeout(resolve, 300)); } currentError = $error; errorToastTimeout = setTimeout(() => { $error = null; currentError = null; }, 3000); } async function deleteConversation(id: string) { try { const res = await fetch(`${base}/conversation/${id}`, { method: "DELETE", headers: { "Content-Type": "application/json", }, }); if (!res.ok) { $error = "Error while deleting conversation, try again."; return; } if ($page.params.id !== id) { await invalidate(UrlDependency.ConversationList); } else { await goto(`${base}/`, { invalidateAll: true }); } } catch (err) { console.error(err); $error = String(err); } } async function editConversationTitle(id: string, title: string) { try { const res = await fetch(`${base}/conversation/${id}`, { method: "PATCH", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ title }), }); if (!res.ok) { $error = "Error while editing title, try again."; return; } await invalidate(UrlDependency.ConversationList); } catch (err) { console.error(err); $error = String(err); } } onDestroy(() => { clearTimeout(errorToastTimeout); }); $: if ($error) onError(); $: if ($titleUpdate) { const convIdx = data.conversations.findIndex(({ id }) => id === $titleUpdate?.convId); if (convIdx != -1) { data.conversations[convIdx].title = $titleUpdate?.title ?? data.conversations[convIdx].title; } // update data.conversations data.conversations = [...data.conversations]; $titleUpdate = null; } const settings = createSettingsStore(data.settings); onMount(async () => { if ($page.url.searchParams.has("model")) { await settings .instantSet({ activeModel: $page.url.searchParams.get("model") ?? $settings.activeModel, }) .then(async () => { const query = new URLSearchParams($page.url.searchParams.toString()); query.delete("model"); await goto(`${base}/?${query.toString()}`, { invalidateAll: true, }); }); } if ($page.url.searchParams.has("tools")) { const tools = $page.url.searchParams.get("tools")?.split(","); await settings .instantSet({ tools: [...($settings.tools ?? []), ...(tools ?? [])], }) .then(async () => { const query = new URLSearchParams($page.url.searchParams.toString()); query.delete("tools"); await goto(`${base}/?${query.toString()}`, { invalidateAll: true, }); }); } }); $: mobileNavTitle = ["/models", "/assistants", "/privacy"].includes($page.route.id ?? "") ? "" : data.conversations.find((conv) => conv.id === $page.params.id)?.title; </script> <svelte:head> <title>{envPublic.PUBLIC_APP_NAME}</title> <meta name="description" content="The first open source alternative to ChatGPT. 💪" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <!-- use those meta tags everywhere except on the share assistant page --> <!-- feel free to refacto if there's a better way --> {#if !$page.url.pathname.includes("/assistant/") && $page.route.id !== "/assistants" && !$page.url.pathname.includes("/models/")} <meta property="og:title" content={envPublic.PUBLIC_APP_NAME} /> <meta property="og:type" content="website" /> <meta property="og:url" content="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}" /> <meta property="og:image" content="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/thumbnail.png" /> <meta property="og:description" content={envPublic.PUBLIC_APP_DESCRIPTION} /> {/if} <link rel="icon" href="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/favicon.ico" sizes="32x32" /> <link rel="icon" href="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/icon.svg" type="image/svg+xml" /> <link rel="apple-touch-icon" href="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/apple-touch-icon.png" /> <link rel="manifest" href="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/manifest.json" /> {#if envPublic.PUBLIC_PLAUSIBLE_SCRIPT_URL && envPublic.PUBLIC_ORIGIN} <script defer data-domain={new URL(envPublic.PUBLIC_ORIGIN).hostname} src={envPublic.PUBLIC_PLAUSIBLE_SCRIPT_URL} ></script> {/if} {#if envPublic.PUBLIC_APPLE_APP_ID} <meta name="apple-itunes-app" content={`app-id=${envPublic.PUBLIC_APPLE_APP_ID}`} /> {/if} </svelte:head> {#if !$settings.ethicsModalAccepted && $page.url.pathname !== `${base}/privacy` && PUBLIC_APP_DISCLAIMER === "1"} <DisclaimerModal /> {/if} <ExpandNavigation isCollapsed={isNavCollapsed} on:click={() => (isNavCollapsed = !isNavCollapsed)} classNames="absolute inset-y-0 z-10 my-auto {!isNavCollapsed ? 'left-[280px]' : 'left-0'} *:transition-transform" /> <div class="grid h-full w-screen grid-cols-1 grid-rows-[auto,1fr] overflow-hidden text-smd {!isNavCollapsed ? 'md:grid-cols-[280px,1fr]' : 'md:grid-cols-[0px,1fr]'} transition-[300ms] [transition-property:grid-template-columns] dark:text-gray-300 md:grid-rows-[1fr]" > <MobileNav isOpen={isNavOpen} on:toggle={(ev) => (isNavOpen = ev.detail)} title={mobileNavTitle}> <NavMenu conversations={data.conversations} user={data.user} canLogin={data.user === undefined && data.loginEnabled} on:shareConversation={(ev) => shareConversation(ev.detail.id, ev.detail.title)} on:deleteConversation={(ev) => deleteConversation(ev.detail)} on:editConversationTitle={(ev) => editConversationTitle(ev.detail.id, ev.detail.title)} /> </MobileNav> <nav class=" grid max-h-screen grid-cols-1 grid-rows-[auto,1fr,auto] overflow-hidden *:w-[280px] max-md:hidden" > <NavMenu conversations={data.conversations} user={data.user} canLogin={data.user === undefined && data.loginEnabled} on:shareConversation={(ev) => shareConversation(ev.detail.id, ev.detail.title)} on:deleteConversation={(ev) => deleteConversation(ev.detail)} on:editConversationTitle={(ev) => editConversationTitle(ev.detail.id, ev.detail.title)} /> </nav> {#if currentError} <Toast message={currentError} /> {/if} <slot /> </div>
chat-ui/src/routes/+layout.svelte/0
{ "file_path": "chat-ui/src/routes/+layout.svelte", "repo_id": "chat-ui", "token_count": 3051 }
66
import ChatThumbnail from "./ChatThumbnail.svelte"; import { collections } from "$lib/server/database"; import { error, type RequestHandler } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import type { SvelteComponent } from "svelte"; import { Resvg } from "@resvg/resvg-js"; import satori from "satori"; import { html } from "satori-html"; import InterRegular from "../../../../../static/fonts/Inter-Regular.ttf"; import InterBold from "../../../../../static/fonts/Inter-Bold.ttf"; import sharp from "sharp"; export const GET: RequestHandler = (async ({ params }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { error(404, "Assistant not found."); } let avatar = ""; const fileId = collections.bucket.find({ filename: assistant._id.toString() }); const file = await fileId.next(); if (file) { avatar = await (async () => { const fileStream = collections.bucket.openDownloadStream(file?._id); const fileBuffer = await new Promise<Buffer>((resolve, reject) => { const chunks: Uint8Array[] = []; fileStream.on("data", (chunk) => chunks.push(chunk)); fileStream.on("error", reject); fileStream.on("end", () => resolve(Buffer.concat(chunks))); }); return fileBuffer; })() .then(async (buf) => sharp(buf).jpeg().toBuffer()) // convert to jpeg bc satori png is really slow .then(async (buf) => "data:image/jpeg;base64," + buf.toString("base64")); } const renderedComponent = (ChatThumbnail as unknown as SvelteComponent).render({ name: assistant.name, description: assistant.description, createdByName: assistant.createdByName, avatar, }); const reactLike = html( "<style>" + renderedComponent.css.code + "</style>" + renderedComponent.html ); const svg = await satori(reactLike, { width: 1200, height: 648, fonts: [ { name: "Inter", data: InterRegular as unknown as ArrayBuffer, weight: 500, }, { name: "Inter", data: InterBold as unknown as ArrayBuffer, weight: 700, }, ], }); const png = new Resvg(svg, { fitTo: { mode: "original" }, }) .render() .asPng(); return new Response(png, { headers: { "Content-Type": "image/png", }, }); }) satisfies RequestHandler;
chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/+server.ts/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/+server.ts", "repo_id": "chat-ui", "token_count": 832 }
67
import { redirect, error } from "@sveltejs/kit"; import { getOIDCUserData, validateAndParseCsrfToken } from "$lib/server/auth"; import { z } from "zod"; import { base } from "$app/paths"; import { updateUser } from "./updateUser"; import { env } from "$env/dynamic/private"; import JSON5 from "json5"; const allowedUserEmails = z .array(z.string().email()) .optional() .default([]) .parse(JSON5.parse(env.ALLOWED_USER_EMAILS)); export async function load({ url, locals, cookies, request, getClientAddress }) { const { error: errorName, error_description: errorDescription } = z .object({ error: z.string().optional(), error_description: z.string().optional(), }) .parse(Object.fromEntries(url.searchParams.entries())); if (errorName) { error(400, errorName + (errorDescription ? ": " + errorDescription : "")); } const { code, state, iss } = z .object({ code: z.string(), state: z.string(), iss: z.string().optional(), }) .parse(Object.fromEntries(url.searchParams.entries())); const csrfToken = Buffer.from(state, "base64").toString("utf-8"); const validatedToken = await validateAndParseCsrfToken(csrfToken, locals.sessionId); if (!validatedToken) { error(403, "Invalid or expired CSRF token"); } const { userData } = await getOIDCUserData( { redirectURI: validatedToken.redirectUrl }, code, iss ); // Filter by allowed user emails if (allowedUserEmails.length > 0) { if (!userData.email) { error(403, "User not allowed: email not returned"); } const emailVerified = userData.email_verified ?? true; if (!emailVerified) { error(403, "User not allowed: email not verified"); } if (!allowedUserEmails.includes(userData.email)) { error(403, "User not allowed"); } } await updateUser({ userData, locals, cookies, userAgent: request.headers.get("user-agent") ?? undefined, ip: getClientAddress(), }); redirect(302, `${base}/`); }
chat-ui/src/routes/login/callback/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/login/callback/+page.server.ts", "repo_id": "chat-ui", "token_count": 690 }
68
import { collections } from "$lib/server/database"; import { type Actions, fail, redirect } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { authCondition } from "$lib/server/auth"; import { base } from "$app/paths"; import { env as envPublic } from "$env/dynamic/public"; import { env } from "$env/dynamic/private"; import { z } from "zod"; import type { Assistant } from "$lib/types/Assistant"; import { logger } from "$lib/server/logger"; async function assistantOnlyIfAuthor(locals: App.Locals, assistantId?: string) { const assistant = await collections.assistants.findOne({ _id: new ObjectId(assistantId) }); if (!assistant) { throw Error("Assistant not found"); } if ( assistant.createdById.toString() !== (locals.user?._id ?? locals.sessionId).toString() && !locals.user?.isAdmin ) { throw Error("You are not the author of this assistant"); } return assistant; } export const actions: Actions = { delete: async ({ params, locals }) => { let assistant; try { assistant = await assistantOnlyIfAuthor(locals, params.assistantId); } catch (e) { return fail(400, { error: true, message: (e as Error).message }); } await collections.assistants.deleteOne({ _id: assistant._id }); // and remove it from all users settings await collections.settings.updateMany( { assistants: { $in: [assistant._id] }, }, { $pull: { assistants: assistant._id }, } ); // and delete all avatars const fileCursor = collections.bucket.find({ filename: assistant._id.toString() }); // Step 2: Delete the existing file if it exists let fileId = await fileCursor.next(); while (fileId) { await collections.bucket.delete(fileId._id); fileId = await fileCursor.next(); } redirect(302, `${base}/settings`); }, report: async ({ request, params, locals, url }) => { // is there already a report from this user for this model ? const report = await collections.reports.findOne({ createdBy: locals.user?._id ?? locals.sessionId, object: "assistant", contentId: new ObjectId(params.assistantId), }); if (report) { return fail(400, { error: true, message: "Already reported" }); } const formData = await request.formData(); const result = z.string().min(1).max(128).safeParse(formData?.get("reportReason")); if (!result.success) { return fail(400, { error: true, message: "Invalid report reason" }); } const { acknowledged } = await collections.reports.insertOne({ _id: new ObjectId(), contentId: new ObjectId(params.assistantId), object: "assistant", createdBy: locals.user?._id ?? locals.sessionId, createdAt: new Date(), updatedAt: new Date(), reason: result.data, }); if (!acknowledged) { return fail(500, { error: true, message: "Failed to report assistant" }); } if (env.WEBHOOK_URL_REPORT_ASSISTANT) { const prefixUrl = envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || url.origin}${base}`; const assistantUrl = `${prefixUrl}/assistant/${params.assistantId}`; const assistant = await collections.assistants.findOne<Pick<Assistant, "name">>( { _id: new ObjectId(params.assistantId) }, { projection: { name: 1 } } ); const username = locals.user?.username; const res = await fetch(env.WEBHOOK_URL_REPORT_ASSISTANT, { method: "POST", headers: { "Content-type": "application/json", }, body: JSON.stringify({ text: `Assistant <${assistantUrl}|${assistant?.name}> reported by ${ username ? `<http://hf.co/${username}|${username}>` : "non-logged in user" }.\n\n> ${result.data}`, }), }); if (!res.ok) { logger.error(`Webhook assistant report failed. ${res.statusText} ${res.text}`); } } return { from: "report", ok: true, message: "Assistant reported" }; }, subscribe: async ({ params, locals }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } // don't push if it's already there const settings = await collections.settings.findOne(authCondition(locals)); if (settings?.assistants?.includes(assistant._id)) { return fail(400, { error: true, message: "Already subscribed" }); } const result = await collections.settings.updateOne(authCondition(locals), { $addToSet: { assistants: assistant._id }, }); // reduce count only if push succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: 1 } }); } return { from: "subscribe", ok: true, message: "Assistant added" }; }, unsubscribe: async ({ params, locals }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } const result = await collections.settings.updateOne(authCondition(locals), { $pull: { assistants: assistant._id }, }); // reduce count only if pull succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: -1 } }); } redirect(302, `${base}/settings`); }, unfeature: async ({ params, locals }) => { if (!locals.user?.isAdmin) { return fail(403, { error: true, message: "Permission denied" }); } const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } const result = await collections.assistants.updateOne( { _id: assistant._id }, { $set: { featured: false } } ); if (result.modifiedCount === 0) { return fail(500, { error: true, message: "Failed to unfeature assistant" }); } return { from: "unfeature", ok: true, message: "Assistant unfeatured" }; }, feature: async ({ params, locals }) => { if (!locals.user?.isAdmin) { return fail(403, { error: true, message: "Permission denied" }); } const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } const result = await collections.assistants.updateOne( { _id: assistant._id }, { $set: { featured: true } } ); if (result.modifiedCount === 0) { return fail(500, { error: true, message: "Failed to feature assistant" }); } return { from: "feature", ok: true, message: "Assistant featured" }; }, };
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.server.ts", "repo_id": "chat-ui", "token_count": 2355 }
69
<script lang="ts"> export let type: string; export let value: string | boolean | number; export let disabled: boolean = false; let innerValue: string | boolean | number = (() => { if (type === "bool") { return Boolean(value) || false; } else if (type === "int" || type === "float") { return Number(value) || 0; } else { return value || ""; } })(); let previousValue: string | boolean | number = innerValue; $: value = typeof innerValue === "string" ? innerValue : innerValue.toString(); </script> {#if type === "str" && typeof innerValue === "string"} <input type="text" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={innerValue} {disabled} /> {:else if type === "int" && typeof innerValue === "number"} <input type="number" step="1" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" {disabled} on:input={(e) => { const value = e.currentTarget.value; if (value === "" || isNaN(parseInt(value))) { innerValue = previousValue; e.currentTarget.value = previousValue.toString(); return; } else { innerValue = parseFloat(value); previousValue = innerValue; } }} value={innerValue} /> {:else if type === "float" && typeof innerValue === "number"} <input type="number" step="0.001" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" {disabled} on:input={(e) => { const value = e.currentTarget.value; if (value === "" || isNaN(parseFloat(value))) { innerValue = previousValue; e.currentTarget.value = previousValue.toString(); return; } else { innerValue = parseFloat(value); previousValue = innerValue; } }} value={innerValue} /> {:else if type === "bool" && typeof innerValue === "boolean"} <input type="checkbox" class="peer my-auto mr-4 size-6 rounded-lg border-2 border-gray-200 bg-gray-100 p-1" bind:checked={innerValue} /> <!-- Literal['bigvgan_24khz_100band', 'bigvgan_base_24khz_100band', 'bigvgan_22khz_80band', 'bigvgan_base_22khz_80band', 'bigvgan_v2_22khz_80band_256x', 'bigvgan_v2_22khz_80band_fmax8k_256x', 'bigvgan_v2_24khz_100band_256x', 'bigvgan_v2_44khz_128band_256x', 'bigvgan_v2_44khz_128band_512x'] --> {:else if type.startsWith("Literal[") && typeof innerValue === "string"} {@const options = type .slice(8, -1) .split(",") .map((option) => option.trim().replaceAll("'", ""))} <select class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={innerValue} {disabled} > {#each options as option} <option value={option}>{option}</option> {/each} </select> {:else} <span class="font-mono text-red-800">{innerValue}-{typeof innerValue}</span> {/if}
chat-ui/src/routes/tools/ToolInputComponent.svelte/0
{ "file_path": "chat-ui/src/routes/tools/ToolInputComponent.svelte", "repo_id": "chat-ui", "token_count": 1112 }
70
import { sveltekit } from "@sveltejs/kit/vite"; import Icons from "unplugin-icons/vite"; import { promises } from "fs"; import { defineConfig } from "vitest/config"; // used to load fonts server side for thumbnail generation function loadTTFAsArrayBuffer() { return { name: "load-ttf-as-array-buffer", async transform(_src, id) { if (id.endsWith(".ttf")) { return `export default new Uint8Array([ ${new Uint8Array(await promises.readFile(id))} ]).buffer`; } }, }; } export default defineConfig({ plugins: [ sveltekit(), Icons({ compiler: "svelte", }), loadTTFAsArrayBuffer(), ], optimizeDeps: { include: [ "browser-image-resizer", "uuid", "@huggingface/transformers", "sharp", "@gradio/client", ], }, server: { open: "/", }, test: { setupFiles: ["./scripts/setupTest.ts"], deps: { inline: ["@sveltejs/kit"] }, globals: true, }, });
chat-ui/vite.config.ts/0
{ "file_path": "chat-ui/vite.config.ts", "repo_id": "chat-ui", "token_count": 385 }
71
repos: - repo: https://github.com/charliermarsh/ruff-pre-commit # https://github.com/charliermarsh/ruff#usage rev: 'v0.3.0' hooks: # Run the linter. - id: ruff args: [ --fix ] # Run the formatter. - id: ruff-format
datasets/.pre-commit-config.yaml/0
{ "file_path": "datasets/.pre-commit-config.yaml", "repo_id": "datasets", "token_count": 122 }
72
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 500_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def map(dataset: datasets.Dataset, **kwargs): _ = dataset.map(**kwargs) @get_duration def filter(dataset: datasets.Dataset, **kwargs): _ = dataset.filter(**kwargs) def benchmark_map_filter(): times = {"num examples": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")}) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES ) tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=True) def tokenize(examples): return tokenizer(examples["text"]) times["map identity"] = map(dataset) times["map identity batched"] = map(dataset, batched=True) times["map no-op batched"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="numpy"): times["map no-op batched numpy"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="pandas"): times["map no-op batched pandas"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="torch", columns="numbers"): times["map no-op batched pytorch"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="tensorflow", columns="numbers"): times["map no-op batched tensorflow"] = map(dataset, function=lambda x: None, batched=True) times["map fast-tokenizer batched"] = map(dataset, function=tokenize, batched=True) times["filter"] = filter(dataset) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
datasets/benchmarks/benchmark_map_filter.py/0
{ "file_path": "datasets/benchmarks/benchmark_map_filter.py", "repo_id": "datasets", "token_count": 996 }
73
# Build and load Nearly every deep learning workflow begins with loading a dataset, which makes it one of the most important steps. With 🤗 Datasets, there are more than 900 datasets available to help you get started with your NLP task. All you have to do is call: [`load_dataset`] to take your first step. This function is a true workhorse in every sense because it builds and loads every dataset you use. ## ELI5: `load_dataset` Let's begin with a basic Explain Like I'm Five. A dataset is a directory that contains: - Some data files in generic formats (JSON, CSV, Parquet, text, etc.) - A dataset card named `README.md` that contains documentation about the dataset as well as a YAML header to define the datasets tags and configurations - An optional dataset script if it requires some code to read the data files. This is sometimes used to load files of specific formats and structures. The [`load_dataset`] function fetches the requested dataset locally or from the Hugging Face Hub. The Hub is a central repository where all the Hugging Face datasets and models are stored. If the dataset only contains data files, then [`load_dataset`] automatically infers how to load the data files from their extensions (json, csv, parquet, txt, etc.). Under the hood, 🤗 Datasets will use an appropriate [`DatasetBuilder`] based on the data files format. There exist one builder per data file format in 🤗 Datasets: * [`datasets.packaged_modules.text.Text`] for text * [`datasets.packaged_modules.csv.Csv`] for CSV and TSV * [`datasets.packaged_modules.json.Json`] for JSON and JSONL * [`datasets.packaged_modules.parquet.Parquet`] for Parquet * [`datasets.packaged_modules.arrow.Arrow`] for Arrow (streaming file format) * [`datasets.packaged_modules.sql.Sql`] for SQL databases * [`datasets.packaged_modules.imagefolder.ImageFolder`] for image folders * [`datasets.packaged_modules.audiofolder.AudioFolder`] for audio folders If the dataset has a dataset script, then it downloads and imports it from the Hugging Face Hub. Code in the dataset script defines a custom [`DatasetBuilder`] the dataset information (description, features, URL to the original files, etc.), and tells 🤗 Datasets how to generate and display examples from it. <Tip> Read the [Share](./upload_dataset) section to learn more about how to share a dataset. This section also provides a step-by-step guide on how to write your own dataset loading script! </Tip> 🤗 Datasets downloads the dataset files from the original URL, generates the dataset and caches it in an Arrow table on your drive. If you've downloaded the dataset before, then 🤗 Datasets will reload it from the cache to save you the trouble of downloading it again. Now that you have a high-level understanding about how datasets are built, let's take a closer look at the nuts and bolts of how all this works. ## Building a dataset When you load a dataset for the first time, 🤗 Datasets takes the raw data file and builds it into a table of rows and typed columns. There are two main classes responsible for building a dataset: [`BuilderConfig`] and [`DatasetBuilder`]. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/builderconfig.png"/> </div> ### BuilderConfig[[datasets-builderconfig]] [`BuilderConfig`] is the configuration class of [`DatasetBuilder`]. The [`BuilderConfig`] contains the following basic attributes about a dataset: | Attribute | Description | |---------------|--------------------------------------------------------------| | `name` | Short name of the dataset. | | `version` | Dataset version identifier. | | `data_dir` | Stores the path to a local folder containing the data files. | | `data_files` | Stores paths to local data files. | | `description` | Description of the dataset. | If you want to add additional attributes to your dataset such as the class labels, you can subclass the base [`BuilderConfig`] class. There are two ways to populate the attributes of a [`BuilderConfig`] class or subclass: - Provide a list of predefined [`BuilderConfig`] class (or subclass) instances in the datasets [`DatasetBuilder.BUILDER_CONFIGS`] attribute. - When you call [`load_dataset`], any keyword arguments that are not specific to the method will be used to set the associated attributes of the [`BuilderConfig`] class. This will override the predefined attributes if a specific configuration was selected. You can also set the [`DatasetBuilder.BUILDER_CONFIG_CLASS`] to any custom subclass of [`BuilderConfig`]. ### DatasetBuilder[[datasets-datasetbuilder]] [`DatasetBuilder`] accesses all the attributes inside [`BuilderConfig`] to build the actual dataset. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasetbuilder.png"/> </div> There are three main methods in [`DatasetBuilder`]: 1. [`DatasetBuilder._info`] is in charge of defining the dataset attributes. When you call `dataset.info`, 🤗 Datasets returns the information stored here. Likewise, the [`Features`] are also specified here. Remember, the [`Features`] are like the skeleton of the dataset. It provides the names and types of each column. 2. [`DatasetBuilder._split_generator`] downloads or retrieves the requested data files, organizes them into splits, and defines specific arguments for the generation process. This method has a [`DownloadManager`] that downloads files or fetches them from your local filesystem. Within the [`DownloadManager`], there is a [`DownloadManager.download_and_extract`] method that accepts a dictionary of URLs to the original data files, and downloads the requested files. Accepted inputs include: a single URL or path, or a list/dictionary of URLs or paths. Any compressed file types like TAR, GZIP and ZIP archives will be automatically extracted. Once the files are downloaded, [`SplitGenerator`] organizes them into splits. The [`SplitGenerator`] contains the name of the split, and any keyword arguments that are provided to the [`DatasetBuilder._generate_examples`] method. The keyword arguments can be specific to each split, and typically comprise at least the local path to the data files for each split. 3. [`DatasetBuilder._generate_examples`] reads and parses the data files for a split. Then it yields dataset examples according to the format specified in the `features` from [`DatasetBuilder._info`]. The input of [`DatasetBuilder._generate_examples`] is actually the `filepath` provided in the keyword arguments of the last method. The dataset is generated with a Python generator, which doesn't load all the data in memory. As a result, the generator can handle large datasets. However, before the generated samples are flushed to the dataset file on disk, they are stored in an `ArrowWriter` buffer. This means the generated samples are written by batch. If your dataset samples consumes a lot of memory (images or videos), then make sure to specify a low value for the `DEFAULT_WRITER_BATCH_SIZE` attribute in [`DatasetBuilder`]. We recommend not exceeding a size of 200 MB. ## Maintaining integrity To ensure a dataset is complete, [`load_dataset`] will perform a series of tests on the downloaded files to make sure everything is there. This way, you don't encounter any surprises when your requested dataset doesn't get generated as expected. [`load_dataset`] verifies: - The number of splits in the generated `DatasetDict`. - The number of samples in each split of the generated `DatasetDict`. - The list of downloaded files. - The SHA256 checksums of the downloaded files (disabled by defaut). If the dataset doesn't pass the verifications, it is likely that the original host of the dataset made some changes in the data files. <Tip> If it is your own dataset, you'll need to recompute the information above and update the `README.md` file in your dataset repository. Take a look at this [section](dataset_script#optional-generate-dataset-metadata) to learn how to generate and update this metadata. </Tip> In this case, an error is raised to alert that the dataset has changed. To ignore the error, one needs to specify `verification_mode="no_checks"` in [`load_dataset`]. Anytime you see a verification error, feel free to open a discussion or pull request in the corresponding dataset "Community" tab, so that the integrity checks for that dataset are updated. ## Security The dataset repositories on the Hub are scanned for malware, see more information [here](https://huggingface.co/docs/hub/security#malware-scanning). Moreover the datasets without a namespace (originally contributed on our GitHub repository) have all been reviewed by our maintainers. The code of these datasets is considered **safe**. It concerns datasets that are not under a namespace, e.g. "squad" or "glue", unlike the other datasets that are named "username/dataset_name" or "org/dataset_name".
datasets/docs/source/about_dataset_load.mdx/0
{ "file_path": "datasets/docs/source/about_dataset_load.mdx", "repo_id": "datasets", "token_count": 2537 }
74
# Image classification Image classification datasets are used to train a model to classify an entire image. There are a wide variety of applications enabled by these datasets such as identifying endangered wildlife species or screening for disease in medical images. This guide will show you how to apply transformations to an image classification dataset. Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ```bash pip install -U albumentations opencv-python ``` This guide uses the [Beans](https://huggingface.co/datasets/beans) dataset for identifying the type of bean plant disease based on an image of its leaf. Load the dataset and take a look at an example: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("beans") >>> dataset["train"][10] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x7F8D2F4D7A10>, 'image_file_path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/angular_leaf_spot/angular_leaf_spot_train.204.jpg', 'labels': 0} ``` The dataset has three fields: * `image`: a PIL image object. * `image_file_path`: the path to the image file. * `labels`: the label or category of the image. Next, check out an image: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf.png"> </div> Now apply some augmentations with `albumentations`. You'll randomly crop the image, flip it horizontally, and adjust its brightness. ```py >>> import cv2 >>> import albumentations >>> import numpy as np >>> transform = albumentations.Compose([ ... albumentations.RandomCrop(width=256, height=256), ... albumentations.HorizontalFlip(p=0.5), ... albumentations.RandomBrightnessContrast(p=0.2), ... ]) ``` Create a function to apply the transformation to the images: ```py >>> def transforms(examples): ... examples["pixel_values"] = [ ... transform(image=np.array(image))["image"] for image in examples["image"] ... ] ... ... return examples ``` Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space: ```py >>> dataset.set_transform(transforms) ``` You can verify the transformation worked by indexing into the `pixel_values` of the first example: ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset["train"][0]["pixel_values"] >>> plt.imshow(img) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"/> </div> <Tip> Now that you know how to process a dataset for image classification, learn [how to train an image classification model](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb) and use it for inference. </Tip>
datasets/docs/source/image_classification.mdx/0
{ "file_path": "datasets/docs/source/image_classification.mdx", "repo_id": "datasets", "token_count": 1043 }
75
# Table Classes Each `Dataset` object is backed by a PyArrow Table. A Table can be loaded from either the disk (memory mapped) or in memory. Several Table types are available, and they all inherit from [`table.Table`]. ## Table [[autodoc]] datasets.table.Table - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes ## InMemoryTable [[autodoc]] datasets.table.InMemoryTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file - from_buffer - from_pandas - from_arrays - from_pydict - from_batches ## MemoryMappedTable [[autodoc]] datasets.table.MemoryMappedTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file ## ConcatenationTable [[autodoc]] datasets.table.ConcatenationTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_blocks - from_tables ## Utils [[autodoc]] datasets.table.concat_tables [[autodoc]] datasets.table.list_table_cache_files
datasets/docs/source/package_reference/table_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/table_classes.mdx", "repo_id": "datasets", "token_count": 1029 }
76
# Using Datasets with TensorFlow This document is a quick introduction to using `datasets` with TensorFlow, with a particular focus on how to get `tf.Tensor` objects out of our datasets, and how to stream data from Hugging Face `Dataset` objects to Keras methods like `model.fit()`. ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc. To get TensorFlow tensors instead, you can set the format of the dataset to `tf`: ```py >>> from datasets import Dataset >>> data = [[1, 2],[3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([1, 2])>} >>> ds[:2] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` <Tip> A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to TensorFlow tensors. </Tip> This can be useful for converting your dataset to a dict of `Tensor` objects, or for writing a generator to load TF samples from it. If you wish to convert the entire dataset to `Tensor`, simply query the full dataset: ```py >>> ds[:] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` ### N-dimensional arrays If your dataset consists of N-dimensional arrays, you will see that by default they are considered as the same tensor if the shape is fixed: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] # fixed shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` Otherwise, a TensorFlow formatted dataset outputs a `RaggedTensor` instead of a single tensor: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3]],[[4, 5, 6],[7, 8]]] # varying shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("torch") >>> ds[0] {'data': <tf.RaggedTensor [[1, 2], [3]]>} ``` However this logic often requires slow shape comparisons and data copies. To avoid this, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: ```py >>> from datasets import Dataset, Features, Array2D >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] >>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) >>> ds = Dataset.from_dict({"data": data}, features=features) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} >>> ds[:2] {'data': <tf.Tensor: shape=(2, 2, 2), dtype=int64, numpy= array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])>} ``` ### Other feature types [`ClassLabel`] data are properly converted to tensors: ```py >>> from datasets import Dataset, Features, ClassLabel >>> labels = [0, 0, 1] >>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) >>> ds = Dataset.from_dict({"label": labels}, features=features) >>> ds = ds.with_format("tf") >>> ds[:3] {'label': <tf.Tensor: shape=(3,), dtype=int64, numpy=array([0, 0, 1])>} ``` Strings and binary objects are also supported: ```py >>> from datasets import Dataset, Features >>> text = ["foo", "bar"] >>> data = [0, 1] >>> ds = Dataset.from_dict({"text": text, "data": data}) >>> ds = ds.with_format("tf") >>> ds[:2] {'text': <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'foo', b'bar'], dtype=object)>, 'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>} ``` You can also explicitly format certain columns and leave the other columns unformatted: ```py >>> ds = ds.with_format("tf", columns=["data"], output_all_columns=True) >>> ds[:2] {'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>, 'text': ['foo', 'bar']} ``` String and binary objects are unchanged, since PyTorch only supports numbers. The [`Image`] and [`Audio`] feature types are also supported. <Tip> To use the [`Image`] feature type, you'll need to install the `vision` extra as `pip install datasets[vision]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> images = ["path/to/image.png"] * 10 >>> features = Features({"image": Image()}) >>> ds = Dataset.from_dict({"image": images}, features=features) >>> ds = ds.with_format("tf") >>> ds[0] {'image': <tf.Tensor: shape=(512, 512, 4), dtype=uint8, numpy= array([[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]], dtype=uint8)>} >>> ds[:2] {'image': <tf.Tensor: shape=(2, 512, 512, 4), dtype=uint8, numpy= array([[[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]]], dtype=uint8)>} ``` <Tip> To use the [`Audio`] feature type, you'll need to install the `audio` extra as `pip install datasets[audio]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> audio = ["path/to/audio.wav"] * 10 >>> features = Features({"audio": Audio()}) >>> ds = Dataset.from_dict({"audio": audio}, features=features) >>> ds = ds.with_format("tf") >>> ds[0]["audio"]["array"] <tf.Tensor: shape=(202311,), dtype=float32, numpy= array([ 6.1035156e-05, 1.5258789e-05, 1.6784668e-04, ..., -1.5258789e-05, -1.5258789e-05, 1.5258789e-05], dtype=float32)> >>> ds[0]["audio"]["sampling_rate"] <tf.Tensor: shape=(), dtype=int32, numpy=44100> ``` ## Data loading Although you can load individual samples and batches just by indexing into your dataset, this won't work if you want to use Keras methods like `fit()` and `predict()`. You could write a generator function that shuffles and loads batches from your dataset and `fit()` on that, but that sounds like a lot of unnecessary work. Instead, if you want to stream data from your dataset on-the-fly, we recommend converting your dataset to a `tf.data.Dataset` using the `to_tf_dataset()` method. The `tf.data.Dataset` class covers a wide range of use-cases - it is often created from Tensors in memory, or using a load function to read files on disc or external storage. The dataset can be transformed arbitrarily with the `map()` method, or methods like `batch()` and `shuffle()` can be used to create a dataset that's ready for training. These methods do not modify the stored data in any way - instead, the methods build a data pipeline graph that will be executed when the dataset is iterated over, usually during model training or inference. This is different from the `map()` method of Hugging Face `Dataset` objects, which runs the map function immediately and saves the new or changed columns. Since the entire data preprocessing pipeline can be compiled in a `tf.data.Dataset`, this approach allows for massively parallel, asynchronous data loading and training. However, the requirement for graph compilation can be a limitation, particularly for Hugging Face tokenizers, which are usually not (yet!) compilable as part of a TF graph. As a result, we usually advise pre-processing the dataset as a Hugging Face dataset, where arbitrary Python functions can be used, and then converting to `tf.data.Dataset` afterwards using `to_tf_dataset()` to get a batched dataset ready for training. To see examples of this approach, please see the [examples](https://github.com/huggingface/transformers/tree/main/examples) or [notebooks](https://huggingface.co/docs/transformers/notebooks) for `transformers`. ### Using `to_tf_dataset()` Using `to_tf_dataset()` is straightforward. Once your dataset is preprocessed and ready, simply call it like so: ```py >>> from datasets import Dataset >>> data = {"inputs": [[1, 2],[3, 4]], "labels": [0, 1]} >>> ds = Dataset.from_dict(data) >>> tf_ds = ds.to_tf_dataset( columns=["inputs"], label_cols=["labels"], batch_size=2, shuffle=True ) ``` The returned `tf_ds` object here is now fully ready to train on, and can be passed directly to `model.fit()`. Note that you set the batch size when creating the dataset, and so you don't need to specify it when calling `fit()`: ```py >>> model.fit(tf_ds, epochs=2) ``` For a full description of the arguments, please see the [`~Dataset.to_tf_dataset`] documentation. In many cases, you will also need to add a `collate_fn` to your call. This is a function that takes multiple elements of the dataset and combines them into a single batch. When all elements have the same length, the built-in default collator will suffice, but for more complex tasks a custom collator may be necessary. In particular, many tasks have samples with varying sequence lengths which will require a [data collator](https://huggingface.co/docs/transformers/main/en/main_classes/data_collator) that can pad batches correctly. You can see examples of this in the `transformers` NLP [examples](https://github.com/huggingface/transformers/tree/main/examples) and [notebooks](https://huggingface.co/docs/transformers/notebooks), where variable sequence lengths are very common. If you find that loading with `to_tf_dataset` is slow, you can also use the `num_workers` argument. This spins up multiple subprocesses to load data in parallel. This feature is recent and still somewhat experimental - please file an issue if you encounter any bugs while using it! ### When to use to_tf_dataset The astute reader may have noticed at this point that we have offered two approaches to achieve the same goal - if you want to pass your dataset to a TensorFlow model, you can either convert the dataset to a `Tensor` or `dict` of `Tensors` using `.with_format('tf')`, or you can convert the dataset to a `tf.data.Dataset` with `to_tf_dataset()`. Either of these can be passed to `model.fit()`, so which should you choose? The key thing to recognize is that when you convert the whole dataset to `Tensor`s, it is static and fully loaded into RAM. This is simple and convenient, but if any of the following apply, you should probably use `to_tf_dataset()` instead: - Your dataset is too large to fit in RAM. `to_tf_dataset()` streams only one batch at a time, so even very large datasets can be handled with this method. - You want to apply random transformations using `dataset.with_transform()` or the `collate_fn`. This is common in several modalities, such as image augmentations when training vision models, or random masking when training masked language models. Using `to_tf_dataset()` will apply those transformations at the moment when a batch is loaded, which means the same samples will get different augmentations each time they are loaded. This is usually what you want. - Your data has a variable dimension, such as input texts in NLP that consist of varying numbers of tokens. When you create a batch with samples with a variable dimension, the standard solution is to pad the shorter samples to the length of the longest one. When you stream samples from a dataset with `to_tf_dataset`, you can apply this padding to each batch via your `collate_fn`. However, if you want to convert such a dataset to dense `Tensor`s, then you will have to pad samples to the length of the longest sample in *the entire dataset!* This can result in huge amounts of padding, which wastes memory and reduces your model's speed. ### Caveats and limitations Right now, `to_tf_dataset()` always returns a batched dataset - we will add support for unbatched datasets soon!
datasets/docs/source/use_with_tensorflow.mdx/0
{ "file_path": "datasets/docs/source/use_with_tensorflow.mdx", "repo_id": "datasets", "token_count": 3825 }
77
import platform from argparse import ArgumentParser import fsspec import huggingface_hub import pandas import pyarrow from datasets import __version__ as version from datasets.commands import BaseDatasetsCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env", help="Print relevant system environment info.") download_parser.set_defaults(func=info_command_factory) def run(self): info = { "`datasets` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "`huggingface_hub` version": huggingface_hub.__version__, "PyArrow version": pyarrow.__version__, "Pandas version": pandas.__version__, "`fsspec` version": fsspec.__version__, } print("\nCopy-and-paste the text below in your GitHub issue.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
datasets/src/datasets/commands/env.py/0
{ "file_path": "datasets/src/datasets/commands/env.py", "repo_id": "datasets", "token_count": 476 }
78
import importlib import shutil import warnings from typing import List import fsspec import fsspec.asyn from fsspec.implementations.local import LocalFileSystem from . import compression COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [ compression.Bz2FileSystem, compression.GzipFileSystem, compression.Lz4FileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool: """ Checks if `fs` is a remote filesystem. Args: fs (`fsspec.spec.AbstractFileSystem`): An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or `s3fs.S3FileSystem`. """ return not isinstance(fs, LocalFileSystem) def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str): """ Renames the file `src` in `fs` to `dst`. """ if not is_remote_filesystem(fs): # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst)) else: fs.mv(src, dst, recursive=True)
datasets/src/datasets/filesystems/__init__.py/0
{ "file_path": "datasets/src/datasets/filesystems/__init__.py", "repo_id": "datasets", "token_count": 564 }
79
from typing import Callable, Optional from .. import Features, NamedSplit, Split from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class GeneratorDatasetInputStream(AbstractDatasetInputStream): def __init__( self, generator: Callable, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, gen_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, split: NamedSplit = Split.TRAIN, **kwargs, ): super().__init__( features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) self.builder = Generator( cache_dir=cache_dir, features=features, generator=generator, gen_kwargs=gen_kwargs, split=split, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.builder.config.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.builder.config.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset
datasets/src/datasets/io/generator.py/0
{ "file_path": "datasets/src/datasets/io/generator.py", "repo_id": "datasets", "token_count": 920 }
80
import glob import json import os import shutil import time from pathlib import Path from typing import List, Optional, Tuple, Union import pyarrow as pa import datasets import datasets.config import datasets.data_files from datasets.naming import camelcase_to_snakecase, filenames_for_dataset_split logger = datasets.utils.logging.get_logger(__name__) def _get_modification_time(cached_directory_path): return (Path(cached_directory_path)).stat().st_mtime def _find_hash_in_cache( dataset_name: str, config_name: Optional[str], cache_dir: Optional[str], config_kwargs: dict, custom_features: Optional[datasets.Features], ) -> Tuple[str, str, str]: if config_name or config_kwargs or custom_features: config_id = datasets.BuilderConfig(config_name or "default").create_config_id( config_kwargs=config_kwargs, custom_features=custom_features ) else: config_id = None cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE)) namespace_and_dataset_name = dataset_name.split("/") namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1]) cached_relative_path = "___".join(namespace_and_dataset_name) cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path) cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob( os.path.join(cached_datasets_directory_path_root, config_id or "*", "*", "*") ) if os.path.isdir(cached_directory_path) and ( config_kwargs or custom_features or json.loads(Path(cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"] == Path(cached_directory_path).parts[-3] # no extra params => config_id == config_name ) ] if not cached_directory_paths: cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*")) if os.path.isdir(cached_directory_path) ] available_configs = sorted( {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths} ) raise ValueError( f"Couldn't find cache for {dataset_name}" + (f" for config '{config_id}'" if config_id else "") + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "") ) # get most recent cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1]) version, hash = cached_directory_path.parts[-2:] other_configs = [ Path(_cached_directory_path).parts[-3] for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash)) if os.path.isdir(_cached_directory_path) and ( config_kwargs or custom_features or json.loads(Path(_cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"] == Path(_cached_directory_path).parts[-3] # no extra params => config_id == config_name ) ] if not config_id and len(other_configs) > 1: raise ValueError( f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}" f"\nPlease specify which configuration to reload from the cache, e.g." f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')" ) config_name = cached_directory_path.parts[-3] warning_msg = ( f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} " f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})." ) logger.warning(warning_msg) return config_name, version, hash class Cache(datasets.ArrowBasedBuilder): def __init__( self, cache_dir: Optional[str] = None, dataset_name: Optional[str] = None, config_name: Optional[str] = None, version: Optional[str] = "0.0.0", hash: Optional[str] = None, base_path: Optional[str] = None, info: Optional[datasets.DatasetInfo] = None, features: Optional[datasets.Features] = None, token: Optional[Union[bool, str]] = None, repo_id: Optional[str] = None, data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None, data_dir: Optional[str] = None, storage_options: Optional[dict] = None, writer_batch_size: Optional[int] = None, **config_kwargs, ): if repo_id is None and dataset_name is None: raise ValueError("repo_id or dataset_name is required for the Cache dataset builder") if data_files is not None: config_kwargs["data_files"] = data_files if data_dir is not None: config_kwargs["data_dir"] = data_dir if hash == "auto" and version == "auto": config_name, version, hash = _find_hash_in_cache( dataset_name=repo_id or dataset_name, config_name=config_name, cache_dir=cache_dir, config_kwargs=config_kwargs, custom_features=features, ) elif hash == "auto" or version == "auto": raise NotImplementedError("Pass both hash='auto' and version='auto' instead") super().__init__( cache_dir=cache_dir, dataset_name=dataset_name, config_name=config_name, version=version, hash=hash, base_path=base_path, info=info, token=token, repo_id=repo_id, storage_options=storage_options, writer_batch_size=writer_batch_size, ) def _info(self) -> datasets.DatasetInfo: return datasets.DatasetInfo() def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs): if not os.path.exists(self.cache_dir): raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}") if output_dir is not None and output_dir != self.cache_dir: shutil.copytree(self.cache_dir, output_dir) def _split_generators(self, dl_manager): # used to stream from cache if isinstance(self.info.splits, datasets.SplitDict): split_infos: List[datasets.SplitInfo] = list(self.info.splits.values()) else: raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}") return [ datasets.SplitGenerator( name=split_info.name, gen_kwargs={ "files": filenames_for_dataset_split( self.cache_dir, dataset_name=self.dataset_name, split=split_info.name, filetype_suffix="arrow", shard_lengths=split_info.shard_lengths, ) }, ) for split_info in split_infos ] def _generate_tables(self, files): # used to stream from cache for file_idx, file in enumerate(files): with open(file, "rb") as f: try: for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", pa_table except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/cache/cache.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/cache/cache.py", "repo_id": "datasets", "token_count": 3788 }
81
import os import posixpath import uuid from dataclasses import dataclass from itertools import islice from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils import experimental from datasets.utils.py_utils import convert_file_size_to_int logger = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark import pyspark.sql @dataclass class SparkConfig(datasets.BuilderConfig): """BuilderConfig for Spark.""" features: Optional[datasets.Features] = None def __post_init__(self): super().__post_init__() def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") for partition_id in new_partition_order[1:]: partition_df = df.select("*").where(f"part_id = {partition_id}") df_combined = df_combined.union(partition_df) return df_combined def _generate_iterable_examples( df: "pyspark.sql.DataFrame", partition_order: List[int], state_dict: Optional[dict] = None, ): import pyspark df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) partition_idx_start = state_dict["partition_idx"] if state_dict else 0 partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order[partition_idx_start:]) # pipeline next partition in parallel to hide latency rows = partition_df.toLocalIterator(prefetchPartitions=True) curr_partition = None row_id = state_dict["partition_example_idx"] if state_dict else 0 for row in islice(rows, row_id, None): row_as_dict = row.asDict() part_id = row_as_dict["part_id"] row_as_dict.pop("part_id") if curr_partition != part_id: if state_dict and curr_partition is not None: state_dict["partition_idx"] += 1 curr_partition = part_id row_id = 0 if state_dict: state_dict["partition_example_idx"] = row_id + 1 yield f"{part_id}_{row_id}", row_as_dict row_id += 1 class SparkExamplesIterable(_BaseExamplesIterable): def __init__( self, df: "pyspark.sql.DataFrame", partition_order=None, ): super().__init__() self.df = df self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) def _init_state_dict(self) -> dict: self._state_dict = {"partition_idx": 0, "partition_example_idx": 0} return self._state_dict @experimental def load_state_dict(self, state_dict: dict) -> dict: return super().load_state_dict(state_dict) def __iter__(self): yield from _generate_iterable_examples(self.df, self.partition_order, self._state_dict) def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable": partition_order = list(range(self.df.rdd.getNumPartitions())) generator.shuffle(partition_order) return SparkExamplesIterable(self.df, partition_order=partition_order) def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable": partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) return SparkExamplesIterable(self.df, partition_order=partition_order) @property def n_shards(self) -> int: return len(self.partition_order) class Spark(datasets.DatasetBuilder): BUILDER_CONFIG_CLASS = SparkConfig def __init__( self, df: "pyspark.sql.DataFrame", cache_dir: str = None, working_dir: str = None, **config_kwargs, ): import pyspark self._spark = pyspark.sql.SparkSession.builder.getOrCreate() self.df = df self._working_dir = working_dir super().__init__( cache_dir=cache_dir, config_name=str(self.df.semanticHash()), **config_kwargs, ) def _validate_cache_dir(self): # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling # error due to pickling the SparkContext. cache_dir = self._cache_dir # Returns the path of the created file. def create_cache_and_write_probe(context): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(cache_dir, exist_ok=True) probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(probe_file, "a") return [probe_file] if self._spark.conf.get("spark.master", "").startswith("local"): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: probe = ( self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() ) if os.path.isfile(probe[0]): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] def _repartition_df_if_needed(self, max_shard_size): import pyspark def get_arrow_batch_size(it): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) df_num_rows = self.df.count() sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. approx_bytes_per_row = ( self.df.limit(sample_num_rows) .repartition(1) .mapInArrow(get_arrow_batch_size, "batch_bytes: long") .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes")) .collect()[0] .sample_bytes / sample_num_rows ) approx_total_size = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) self.df = self.df.repartition(new_num_partitions) def _prepare_split_single( self, fpath: str, file_format: str, max_shard_size: int, ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath embed_local_files = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. features = self.config.features writer_batch_size = self._writer_batch_size storage_options = self._fs.storage_options def write_arrow(it): # Within the same SparkContext, no two task attempts will share the same attempt ID. task_id = pyspark.TaskContext().taskAttemptId() first_batch = next(it, None) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]], names=["task_id", "num_examples", "num_bytes"], ) shard_id = 0 writer = writer_class( features=features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([first_batch]) writer.write_table(table) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) shard_id += 1 writer = writer_class( features=writer._features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([batch]) writer.write_table(table) if writer._num_bytes > 0: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(working_fpath)): dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) shutil.move(file, dest) stats = ( self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long") .groupBy("task_id") .agg( pyspark.sql.functions.sum("num_examples").alias("total_num_examples"), pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"), pyspark.sql.functions.count("num_bytes").alias("num_shards"), pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"), ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _prepare_split( self, split_generator: "datasets.SplitGenerator", file_format: str = "arrow", max_shard_size: Optional[Union[str, int]] = None, num_proc: Optional[int] = None, **kwargs, ): self._validate_cache_dir() max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) self._repartition_df_if_needed(max_shard_size) is_local = not is_remote_filesystem(self._fs) path_join = os.path.join if is_local else posixpath.join SUFFIX = "-TTTTT-SSSSS-of-NNNNN" fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" fpath = path_join(self._output_dir, fname) total_num_examples = 0 total_num_bytes = 0 total_shards = 0 task_id_and_num_shards = [] all_shard_lengths = [] for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size): ( num_examples, num_bytes, num_shards, shard_lengths, ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards)) all_shard_lengths.extend(shard_lengths) split_generator.split_info.num_examples = total_num_examples split_generator.split_info.num_bytes = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards.") if total_shards > 1: split_generator.split_info.shard_lengths = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. fs = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( task_id: int, shard_id: int, global_shard_id: int, ): rename( fs, fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), ) args = [] global_shard_id = 0 for i in range(len(task_id_and_num_shards)): task_id, num_shards = task_id_and_num_shards[i] for shard_id in range(num_shards): args.append([task_id, shard_id, global_shard_id]) global_shard_id += 1 self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() else: # don't use any pattern shard_id = 0 task_id = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace(SUFFIX, ""), ) def _get_examples_iterable_for_split( self, split_generator: "datasets.SplitGenerator", ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df)
datasets/src/datasets/packaged_modules/spark/spark.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/spark/spark.py", "repo_id": "datasets", "token_count": 6923 }
82
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Extends `dill` to support pickling more types and produce more consistent dumps.""" import os import sys from io import BytesIO from types import CodeType, FunctionType import dill from packaging import version from .. import config class Pickler(dill.Pickler): dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy()) _legacy_no_dict_keys_sorting = False def save(self, obj, save_persistent_id=True): obj_type = type(obj) if obj_type not in self.dispatch: if "regex" in sys.modules: import regex # type: ignore if obj_type is regex.Pattern: pklregister(obj_type)(_save_regexPattern) if "spacy" in sys.modules: import spacy # type: ignore if issubclass(obj_type, spacy.Language): pklregister(obj_type)(_save_spacyLanguage) if "tiktoken" in sys.modules: import tiktoken # type: ignore if obj_type is tiktoken.Encoding: pklregister(obj_type)(_save_tiktokenEncoding) if "torch" in sys.modules: import torch # type: ignore if issubclass(obj_type, torch.Tensor): pklregister(obj_type)(_save_torchTensor) if obj_type is torch.Generator: pklregister(obj_type)(_save_torchGenerator) # Unwrap `torch.compile`-ed modules if issubclass(obj_type, torch.nn.Module): obj = getattr(obj, "_orig_mod", obj) if "transformers" in sys.modules: import transformers # type: ignore if issubclass(obj_type, transformers.PreTrainedTokenizerBase): pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase) # Unwrap `torch.compile`-ed functions if obj_type is FunctionType: obj = getattr(obj, "_torchdynamo_orig_callable", obj) dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id) def _batch_setitems(self, items): if self._legacy_no_dict_keys_sorting: return super()._batch_setitems(items) # Ignore the order of keys in a dict try: # Faster, but fails for unorderable elements items = sorted(items) except Exception: # TypeError, decimal.InvalidOperation, etc. from datasets.fingerprint import Hasher items = sorted(items, key=lambda x: Hasher.hash(x[0])) dill.Pickler._batch_setitems(self, items) def memoize(self, obj): # Don't memoize strings since two identical strings can have different Python ids if type(obj) is not str: # noqa: E721 dill.Pickler.memoize(self, obj) def pklregister(t): """Register a custom reducer for the type.""" def proxy(func): Pickler.dispatch[t] = func return func return proxy def dump(obj, file): """Pickle an object to a file.""" Pickler(file, recurse=True).dump(obj) def dumps(obj): """Pickle an object to a string.""" file = BytesIO() dump(obj, file) return file.getvalue() if config.DILL_VERSION < version.parse("0.3.6"): def log(pickler, msg): dill._dill.log.info(msg) elif config.DILL_VERSION.release[:3] in [ version.parse("0.3.6").release, version.parse("0.3.7").release, version.parse("0.3.8").release, ]: def log(pickler, msg): dill._dill.logger.trace(pickler, msg) @pklregister(set) def _save_set(pickler, obj): log(pickler, f"Se: {obj}") try: # Faster, but fails for unorderable elements args = (sorted(obj),) except Exception: # TypeError, decimal.InvalidOperation, etc. from datasets.fingerprint import Hasher args = (sorted(obj, key=Hasher.hash),) pickler.save_reduce(set, args, obj=obj) log(pickler, "# Se") def _save_regexPattern(pickler, obj): import regex # type: ignore log(pickler, f"Re: {obj}") args = (obj.pattern, obj.flags) pickler.save_reduce(regex.compile, args, obj=obj) log(pickler, "# Re") def _save_tiktokenEncoding(pickler, obj): import tiktoken # type: ignore log(pickler, f"Enc: {obj}") args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens) pickler.save_reduce(tiktoken.Encoding, args, obj=obj) log(pickler, "# Enc") def _save_torchTensor(pickler, obj): import torch # type: ignore # `torch.from_numpy` is not picklable in `torch>=1.11.0` def create_torchTensor(np_array, dtype=None): tensor = torch.from_numpy(np_array) if dtype: tensor = tensor.type(dtype) return tensor log(pickler, f"To: {obj}") if obj.dtype == torch.bfloat16: args = (obj.detach().to(torch.float).cpu().numpy(), torch.bfloat16) else: args = (obj.detach().cpu().numpy(),) pickler.save_reduce(create_torchTensor, args, obj=obj) log(pickler, "# To") def _save_torchGenerator(pickler, obj): import torch # type: ignore def create_torchGenerator(state): generator = torch.Generator() generator.set_state(state) return generator log(pickler, f"Ge: {obj}") args = (obj.get_state(),) pickler.save_reduce(create_torchGenerator, args, obj=obj) log(pickler, "# Ge") def _save_spacyLanguage(pickler, obj): import spacy # type: ignore def create_spacyLanguage(config, bytes): lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"]) lang_inst = lang_cls.from_config(config) return lang_inst.from_bytes(bytes) log(pickler, f"Sp: {obj}") args = (obj.config, obj.to_bytes()) pickler.save_reduce(create_spacyLanguage, args, obj=obj) log(pickler, "# Sp") def _save_transformersPreTrainedTokenizerBase(pickler, obj): log(pickler, f"Tok: {obj}") # Ignore the `cache` attribute state = obj.__dict__ if "cache" in state and isinstance(state["cache"], dict): state["cache"] = {} pickler.save_reduce(type(obj), (), state=state, obj=obj) log(pickler, "# Tok") if config.DILL_VERSION < version.parse("0.3.6"): @pklregister(CodeType) def _save_code(pickler, obj): """ From dill._dill.save_code This is a modified version that removes the origin (filename + line no.) of functions created in notebooks or shells for example. """ dill._dill.log.info(f"Co: {obj}") # The filename of a function is the .py file where it is defined. # Filenames of functions created in notebooks or shells start with '<' # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell # Filenames of functions created in ipykernel the filename # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" # Moreover lambda functions have a special name: '<lambda>' # ex: (lambda x: x).__code__.co_name == "<lambda>" # True # # For the hashing mechanism we ignore where the function has been defined # More specifically: # - we ignore the filename of special functions (filename starts with '<') # - we always ignore the line number # - we only use the base name of the file instead of the whole path, # to be robust in case a script is moved for example. # # Only those two lines are different from the original implementation: co_filename = ( "" if obj.co_filename.startswith("<") or ( len(obj.co_filename.split(os.path.sep)) > 1 and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") ) or obj.co_name == "<lambda>" else os.path.basename(obj.co_filename) ) co_firstlineno = 1 # The rest is the same as in the original dill implementation if dill._dill.PY3: if hasattr(obj, "co_posonlyargcount"): args = ( obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) pickler.save_reduce(CodeType, args, obj=obj) dill._dill.log.info("# Co") return elif config.DILL_VERSION.release[:3] in [ version.parse("0.3.6").release, version.parse("0.3.7").release, version.parse("0.3.8").release, ]: # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104 @pklregister(CodeType) def save_code(pickler, obj): dill._dill.logger.trace(pickler, "Co: %s", obj) ############################################################################################################ # Modification here for huggingface/datasets # The filename of a function is the .py file where it is defined. # Filenames of functions created in notebooks or shells start with '<' # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell # Filenames of functions created in ipykernel the filename # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" # Moreover lambda functions have a special name: '<lambda>' # ex: (lambda x: x).__code__.co_name == "<lambda>" # True # # For the hashing mechanism we ignore where the function has been defined # More specifically: # - we ignore the filename of special functions (filename starts with '<') # - we always ignore the line number # - we only use the base name of the file instead of the whole path, # to be robust in case a script is moved for example. # # Only those two lines are different from the original implementation: co_filename = ( "" if obj.co_filename.startswith("<") or ( len(obj.co_filename.split(os.path.sep)) > 1 and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") ) or obj.co_name == "<lambda>" else os.path.basename(obj.co_filename) ) co_firstlineno = 1 # The rest is the same as in the original dill implementation, except for the replacements: # - obj.co_filename => co_filename # - obj.co_firstlineno => co_firstlineno ############################################################################################################ if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args) args = ( obj.co_lnotab, # for < python 3.10 [not counted in args] obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, obj.co_qualname, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_endlinetable, obj.co_columntable, obj.co_exceptiontable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args) args = ( obj.co_lnotab, # for < python 3.10 [not counted in args] obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, obj.co_qualname, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_exceptiontable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_linetable"): # python 3.10 (16 args) args = ( obj.co_lnotab, # for < python 3.10 [not counted in args] obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args) args = ( obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: # python 3.7 (15 args) args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) pickler.save_reduce(dill._dill._create_code, args, obj=obj) dill._dill.logger.trace(pickler, "# Co") return
datasets/src/datasets/utils/_dill.py/0
{ "file_path": "datasets/src/datasets/utils/_dill.py", "repo_id": "datasets", "token_count": 8494 }
83
{ "code": "Programming language (C++, Java, Javascript, Python, etc.)", "aa": "Afar", "aaa": "Ghotuo", "aab": "Alumu-Tesu", "aac": "Ari", "aad": "Amal", "aae": "Arbëreshë Albanian", "aaf": "Aranadan", "aag": "Ambrak", "aah": "Abu' Arapesh", "aai": "Arifama-Miniafia", "aak": "Ankave", "aal": "Afade", "aan": "Anambé", "aao": "Algerian Saharan Arabic", "aap": "Pará Arára", "aaq": "Eastern Abnaki", "aas": "Aasáx", "aat": "Arvanitika Albanian", "aau": "Abau", "aav": "Austro-Asiatic languages", "aaw": "Solong", "aax": "Mandobo Atas", "aaz": "Amarasi", "ab": "Abkhazian", "aba": "Abé", "abb": "Bankon", "abc": "Ambala Ayta", "abd": "Manide", "abe": "Western Abnaki", "abf": "Abai Sungai", "abg": "Abaga", "abh": "Tajiki Arabic", "abi": "Abidji", "abj": "Aka-Bea", "abl": "Lampung Nyo", "abm": "Abanyom", "abn": "Abua", "abo": "Abon", "abp": "Abellen Ayta", "abq": "Abaza", "abr": "Abron", "abs": "Ambonese Malay", "abt": "Ambulas", "abu": "Abure", "abv": "Baharna Arabic", "abw": "Pal", "abx": "Inabaknon", "aby": "Aneme Wake", "abz": "Abui", "aca": "Achagua", "acb": "Áncá", "acd": "Gikyode", "ace": "Achinese", "acf": "Saint Lucian Creole French", "ach": "Acoli", "aci": "Aka-Cari", "ack": "Aka-Kora", "acl": "Akar-Bale", "acm": "Mesopotamian Arabic", "acn": "Achang", "acp": "Eastern Acipa", "acq": "Ta'izzi-Adeni Arabic", "acr": "Achi", "acs": "Acroá", "act": "Achterhoeks", "acu": "Achuar-Shiwiar", "acv": "Achumawi", "acw": "Hijazi Arabic", "acx": "Omani Arabic", "acy": "Cypriot Arabic", "acz": "Acheron", "ada": "Adangme", "adb": "Atauran", "add": "Lidzonka; Dzodinka", "ade": "Adele", "adf": "Dhofari Arabic", "adg": "Andegerebinha", "adh": "Adhola", "adi": "Adi", "adj": "Adioukrou", "adl": "Galo", "adn": "Adang", "ado": "Abu", "adq": "Adangbe", "adr": "Adonara", "ads": "Adamorobe Sign Language", "adt": "Adnyamathanha", "adu": "Aduge", "adw": "Amundava", "adx": "Amdo Tibetan", "ady": "Adyghe; Adygei", "adz": "Adzera", "ae": "Avestan", "aea": "Areba", "aeb": "Tunisian Arabic", "aec": "Saidi Arabic", "aed": "Argentine Sign Language", "aee": "Northeast Pashai; Northeast Pashayi", "aek": "Haeke", "ael": "Ambele", "aem": "Arem", "aen": "Armenian Sign Language", "aeq": "Aer", "aer": "Eastern Arrernte", "aes": "Alsea", "aeu": "Akeu", "aew": "Ambakich", "aey": "Amele", "aez": "Aeka", "af": "Afrikaans", "afa": "Afro-Asiatic languages", "afb": "Gulf Arabic", "afd": "Andai", "afe": "Putukwam", "afg": "Afghan Sign Language", "afh": "Afrihili", "afi": "Akrukay; Chini", "afk": "Nanubae", "afn": "Defaka", "afo": "Eloyi", "afp": "Tapei", "afs": "Afro-Seminole Creole", "aft": "Afitti", "afu": "Awutu", "afz": "Obokuitai", "aga": "Aguano", "agb": "Legbo", "agc": "Agatu", "agd": "Agarabi", "age": "Angal", "agf": "Arguni", "agg": "Angor", "agh": "Ngelima", "agi": "Agariya", "agj": "Argobba", "agk": "Isarog Agta", "agl": "Fembe", "agm": "Angaataha", "agn": "Agutaynen", "ago": "Tainae", "agq": "Aghem", "agr": "Aguaruna", "ags": "Esimbi", "agt": "Central Cagayan Agta", "agu": "Aguacateco", "agv": "Remontado Dumagat", "agw": "Kahua", "agx": "Aghul", "agy": "Southern Alta", "agz": "Mt. Iriga Agta", "aha": "Ahanta", "ahb": "Axamb", "ahg": "Qimant", "ahh": "Aghu", "ahi": "Tiagbamrin Aizi", "ahk": "Akha", "ahl": "Igo", "ahm": "Mobumrin Aizi", "ahn": "Àhàn", "aho": "Ahom", "ahp": "Aproumu Aizi", "ahr": "Ahirani", "ahs": "Ashe", "aht": "Ahtena", "aia": "Arosi", "aib": "Ainu (China)", "aic": "Ainbai", "aid": "Alngith", "aie": "Amara", "aif": "Agi", "aig": "Antigua and Barbuda Creole English", "aih": "Ai-Cham", "aii": "Assyrian Neo-Aramaic", "aij": "Lishanid Noshan", "aik": "Ake", "ail": "Aimele", "aim": "Aimol", "ain": "Ainu (Japan)", "aio": "Aiton", "aip": "Burumakok", "aiq": "Aimaq", "air": "Airoran", "ait": "Arikem", "aiw": "Aari", "aix": "Aighon", "aiy": "Ali", "aja": "Aja (South Sudan)", "ajg": "Aja (Benin)", "aji": "Ajië", "ajn": "Andajin", "ajp": "South Levantine Arabic", "ajs": "Algerian Jewish Sign Language", "aju": "Judeo-Moroccan Arabic", "ajw": "Ajawa", "ajz": "Amri Karbi", "ak": "Akan", "akb": "Batak Angkola", "akc": "Mpur", "akd": "Ukpet-Ehom", "ake": "Akawaio", "akf": "Akpa", "akg": "Anakalangu", "akh": "Angal Heneng", "aki": "Aiome", "akj": "Aka-Jeru", "akk": "Akkadian", "akl": "Aklanon", "akm": "Aka-Bo", "ako": "Akurio", "akp": "Siwu", "akq": "Ak", "akr": "Araki", "aks": "Akaselem", "akt": "Akolet", "aku": "Akum", "akv": "Akhvakh", "akw": "Akwa", "akx": "Aka-Kede", "aky": "Aka-Kol", "akz": "Alabama", "ala": "Alago", "alc": "Qawasqar", "ald": "Alladian", "ale": "Aleut", "alf": "Alege", "alg": "Algonquian languages", "alh": "Alawa", "ali": "Amaimon", "alj": "Alangan", "alk": "Alak", "all": "Allar", "alm": "Amblong", "aln": "Gheg Albanian", "alo": "Larike-Wakasihu", "alp": "Alune", "alq": "Algonquin", "alr": "Alutor", "als": "Tosk Albanian", "alt": "Southern Altai", "alu": "'Are'are", "alv": "Atlantic-Congo languages", "alw": "Alaba-K’abeena; Wanbasana", "alx": "Amol", "aly": "Alyawarr", "alz": "Alur", "am": "Amharic", "ama": "Amanayé", "amb": "Ambo", "amc": "Amahuaca", "ame": "Yanesha'", "amf": "Hamer-Banna", "amg": "Amurdak", "ami": "Amis", "amj": "Amdang", "amk": "Ambai", "aml": "War-Jaintia", "amm": "Ama (Papua New Guinea)", "amn": "Amanab", "amo": "Amo", "amp": "Alamblak", "amq": "Amahai", "amr": "Amarakaeri", "ams": "Southern Amami-Oshima", "amt": "Amto", "amu": "Guerrero Amuzgo", "amv": "Ambelau", "amw": "Western Neo-Aramaic", "amx": "Anmatyerre", "amy": "Ami", "amz": "Atampaya", "an": "Aragonese", "ana": "Andaqui", "anb": "Andoa", "anc": "Ngas", "and": "Ansus", "ane": "Xârâcùù", "anf": "Animere", "ang": "Old English (ca. 450-1100)", "anh": "Nend", "ani": "Andi", "anj": "Anor", "ank": "Goemai", "anl": "Anu-Hkongso Chin", "anm": "Anal", "ann": "Obolo", "ano": "Andoque", "anp": "Angika", "anq": "Jarawa (India)", "anr": "Andh", "ans": "Anserma", "ant": "Antakarinya; Antikarinya", "anu": "Anuak", "anv": "Denya", "anw": "Anaang", "anx": "Andra-Hus", "any": "Anyin", "anz": "Anem", "aoa": "Angolar", "aob": "Abom", "aoc": "Pemon", "aod": "Andarum", "aoe": "Angal Enen", "aof": "Bragat", "aog": "Angoram", "aoi": "Anindilyakwa", "aoj": "Mufian", "aok": "Arhö", "aol": "Alor", "aom": "Ömie", "aon": "Bumbita Arapesh", "aor": "Aore", "aos": "Taikat", "aot": "Atong (India); A'tong", "aou": "A'ou", "aox": "Atorada", "aoz": "Uab Meto", "apa": "Apache languages", "apb": "Sa'a", "apc": "North Levantine Arabic", "apd": "Sudanese Arabic", "ape": "Bukiyip", "apf": "Pahanan Agta", "apg": "Ampanang", "aph": "Athpariya", "api": "Apiaká", "apj": "Jicarilla Apache", "apk": "Kiowa Apache", "apl": "Lipan Apache", "apm": "Mescalero-Chiricahua Apache", "apn": "Apinayé", "apo": "Ambul", "app": "Apma", "apq": "A-Pucikwar", "apr": "Arop-Lokep", "aps": "Arop-Sissano", "apt": "Apatani", "apu": "Apurinã", "apv": "Alapmunte", "apw": "Western Apache", "apx": "Aputai", "apy": "Apalaí", "apz": "Safeyoka", "aqa": "Alacalufan languages", "aqc": "Archi", "aqd": "Ampari Dogon", "aqg": "Arigidi", "aqk": "Aninka", "aql": "Algic languages", "aqm": "Atohwaim", "aqn": "Northern Alta", "aqp": "Atakapa", "aqr": "Arhâ", "aqt": "Angaité", "aqz": "Akuntsu", "ar": "Arabic", "arb": "Standard Arabic", "arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)", "ard": "Arabana", "are": "Western Arrarnta", "arh": "Arhuaco", "ari": "Arikara", "arj": "Arapaso", "ark": "Arikapú", "arl": "Arabela", "arn": "Mapudungun; Mapuche", "aro": "Araona", "arp": "Arapaho", "arq": "Algerian Arabic", "arr": "Karo (Brazil)", "ars": "Najdi Arabic", "art": "Artificial languages", "aru": "Aruá (Amazonas State); Arawá", "arv": "Arbore", "arw": "Arawak", "arx": "Aruá (Rodonia State)", "ary": "Moroccan Arabic", "arz": "Egyptian Arabic", "as": "Assamese", "asa": "Asu (Tanzania)", "asb": "Assiniboine", "asc": "Casuarina Coast Asmat", "ase": "American Sign Language", "asf": "Auslan; Australian Sign Language", "asg": "Cishingini", "ash": "Abishira", "asi": "Buruwai", "asj": "Sari", "ask": "Ashkun", "asl": "Asilulu", "asn": "Xingú Asuriní", "aso": "Dano", "asp": "Algerian Sign Language", "asq": "Austrian Sign Language", "asr": "Asuri", "ass": "Ipulo", "ast": "Asturian; Asturleonese; Bable; Leonese", "asu": "Tocantins Asurini", "asv": "Asoa", "asw": "Australian Aborigines Sign Language", "asx": "Muratayak", "asy": "Yaosakor Asmat", "asz": "As", "ata": "Pele-Ata", "atb": "Zaiwa", "atc": "Atsahuaca", "atd": "Ata Manobo", "ate": "Atemble", "atg": "Ivbie North-Okpela-Arhe", "ath": "Athapascan languages", "ati": "Attié", "atj": "Atikamekw", "atk": "Ati", "atl": "Mt. Iraya Agta", "atm": "Ata", "atn": "Ashtiani", "ato": "Atong (Cameroon)", "atp": "Pudtol Atta", "atq": "Aralle-Tabulahan", "atr": "Waimiri-Atroari", "ats": "Gros Ventre", "att": "Pamplona Atta", "atu": "Reel", "atv": "Northern Altai", "atw": "Atsugewi", "atx": "Arutani", "aty": "Aneityum", "atz": "Arta", "aua": "Asumboa", "aub": "Alugu", "auc": "Waorani", "aud": "Anuta", "auf": "Arauan languages", "aug": "Aguna", "auh": "Aushi", "aui": "Anuki", "auj": "Awjilah", "auk": "Heyo", "aul": "Aulua", "aum": "Asu (Nigeria)", "aun": "Molmo One", "auo": "Auyokawa", "aup": "Makayam", "auq": "Anus; Korur", "aur": "Aruek", "aus": "Australian languages", "aut": "Austral", "auu": "Auye", "auw": "Awyi", "aux": "Aurá", "auy": "Awiyaana", "auz": "Uzbeki Arabic", "av": "Avaric", "avb": "Avau", "avd": "Alviri-Vidari", "avi": "Avikam", "avk": "Kotava", "avl": "Eastern Egyptian Bedawi Arabic", "avm": "Angkamuthi", "avn": "Avatime", "avo": "Agavotaguerra", "avs": "Aushiri", "avt": "Au", "avu": "Avokaya", "avv": "Avá-Canoeiro", "awa": "Awadhi", "awb": "Awa (Papua New Guinea)", "awc": "Cicipu", "awd": "Arawakan languages", "awe": "Awetí", "awg": "Anguthimri", "awh": "Awbono", "awi": "Aekyom", "awk": "Awabakal", "awm": "Arawum", "awn": "Awngi", "awo": "Awak", "awr": "Awera", "aws": "South Awyu", "awt": "Araweté", "awu": "Central Awyu", "awv": "Jair Awyu", "aww": "Awun", "awx": "Awara", "awy": "Edera Awyu", "axb": "Abipon", "axe": "Ayerrerenge", "axg": "Mato Grosso Arára", "axk": "Yaka (Central African Republic)", "axl": "Lower Southern Aranda", "axm": "Middle Armenian", "axx": "Xârâgurè", "ay": "Aymara", "aya": "Awar", "ayb": "Ayizo Gbe", "ayc": "Southern Aymara", "ayd": "Ayabadhu", "aye": "Ayere", "ayg": "Ginyanga", "ayh": "Hadrami Arabic", "ayi": "Leyigha", "ayk": "Akuku", "ayl": "Libyan Arabic", "ayn": "Sanaani Arabic", "ayo": "Ayoreo", "ayp": "North Mesopotamian Arabic", "ayq": "Ayi (Papua New Guinea)", "ayr": "Central Aymara", "ays": "Sorsogon Ayta", "ayt": "Magbukun Ayta", "ayu": "Ayu", "ayz": "Mai Brat", "az": "Azerbaijani", "aza": "Azha", "azb": "South Azerbaijani", "azc": "Uto-Aztecan languages", "azd": "Eastern Durango Nahuatl", "azg": "San Pedro Amuzgos Amuzgo", "azj": "North Azerbaijani", "azm": "Ipalapa Amuzgo", "azn": "Western Durango Nahuatl", "azo": "Awing", "azt": "Faire Atta", "azz": "Highland Puebla Nahuatl", "ba": "Bashkir", "baa": "Babatana", "bab": "Bainouk-Gunyuño", "bac": "Badui", "bad": "Banda languages", "bae": "Baré", "baf": "Nubaca", "bag": "Tuki", "bah": "Bahamas Creole English", "bai": "Bamileke languages", "baj": "Barakai", "bal": "Baluchi", "ban": "Balinese", "bao": "Waimaha", "bap": "Bantawa", "bar": "Bavarian", "bas": "Basa (Cameroon)", "bat": "Baltic languages", "bau": "Bada (Nigeria)", "bav": "Vengo", "baw": "Bambili-Bambui", "bax": "Bamun", "bay": "Batuley", "bba": "Baatonum", "bbb": "Barai", "bbc": "Batak Toba", "bbd": "Bau", "bbe": "Bangba", "bbf": "Baibai", "bbg": "Barama", "bbh": "Bugan", "bbi": "Barombi", "bbj": "Ghomálá'", "bbk": "Babanki", "bbl": "Bats", "bbm": "Babango", "bbn": "Uneapa", "bbo": "Northern Bobo Madaré; Konabéré", "bbp": "West Central Banda", "bbq": "Bamali", "bbr": "Girawa", "bbs": "Bakpinka", "bbt": "Mburku", "bbu": "Kulung (Nigeria)", "bbv": "Karnai", "bbw": "Baba", "bbx": "Bubia", "bby": "Befang", "bca": "Central Bai", "bcb": "Bainouk-Samik", "bcc": "Southern Balochi", "bcd": "North Babar", "bce": "Bamenyam", "bcf": "Bamu", "bcg": "Baga Pokur", "bch": "Bariai", "bci": "Baoulé", "bcj": "Bardi", "bck": "Bunuba", "bcl": "Central Bikol", "bcm": "Bannoni", "bcn": "Bali (Nigeria)", "bco": "Kaluli", "bcp": "Bali (Democratic Republic of Congo)", "bcq": "Bench", "bcr": "Babine", "bcs": "Kohumono", "bct": "Bendi", "bcu": "Awad Bing", "bcv": "Shoo-Minda-Nye", "bcw": "Bana", "bcy": "Bacama", "bcz": "Bainouk-Gunyaamolo", "bda": "Bayot", "bdb": "Basap", "bdc": "Emberá-Baudó", "bdd": "Bunama", "bde": "Bade", "bdf": "Biage", "bdg": "Bonggi", "bdh": "Baka (South Sudan)", "bdi": "Burun", "bdj": "Bai (South Sudan); Bai", "bdk": "Budukh", "bdl": "Indonesian Bajau", "bdm": "Buduma", "bdn": "Baldemu", "bdo": "Morom", "bdp": "Bende", "bdq": "Bahnar", "bdr": "West Coast Bajau", "bds": "Burunge", "bdt": "Bokoto", "bdu": "Oroko", "bdv": "Bodo Parja", "bdw": "Baham", "bdx": "Budong-Budong", "bdy": "Bandjalang", "bdz": "Badeshi", "be": "Belarusian", "bea": "Beaver", "beb": "Bebele", "bec": "Iceve-Maci", "bed": "Bedoanas", "bee": "Byangsi", "bef": "Benabena", "beg": "Belait", "beh": "Biali", "bei": "Bekati'", "bej": "Beja; Bedawiyet", "bek": "Bebeli", "bem": "Bemba (Zambia)", "beo": "Beami", "bep": "Besoa", "beq": "Beembe", "ber": "Berber languages", "bes": "Besme", "bet": "Guiberoua Béte", "beu": "Blagar", "bev": "Daloa Bété", "bew": "Betawi", "bex": "Jur Modo", "bey": "Beli (Papua New Guinea)", "bez": "Bena (Tanzania)", "bfa": "Bari", "bfb": "Pauri Bareli", "bfc": "Panyi Bai; Northern Bai", "bfd": "Bafut", "bfe": "Betaf; Tena", "bff": "Bofi", "bfg": "Busang Kayan", "bfh": "Blafe", "bfi": "British Sign Language", "bfj": "Bafanji", "bfk": "Ban Khor Sign Language", "bfl": "Banda-Ndélé", "bfm": "Mmen", "bfn": "Bunak", "bfo": "Malba Birifor", "bfp": "Beba", "bfq": "Badaga", "bfr": "Bazigar", "bfs": "Southern Bai", "bft": "Balti", "bfu": "Gahri", "bfw": "Bondo", "bfx": "Bantayanon", "bfy": "Bagheli", "bfz": "Mahasu Pahari", "bg": "Bulgarian", "bga": "Gwamhi-Wuri", "bgb": "Bobongko", "bgc": "Haryanvi", "bgd": "Rathwi Bareli", "bge": "Bauria", "bgf": "Bangandu", "bgg": "Bugun", "bgi": "Giangan", "bgj": "Bangolan", "bgk": "Bit; Buxinhua", "bgl": "Bo (Laos)", "bgn": "Western Balochi", "bgo": "Baga Koga", "bgp": "Eastern Balochi", "bgq": "Bagri", "bgr": "Bawm Chin", "bgs": "Tagabawa", "bgt": "Bughotu", "bgu": "Mbongno", "bgv": "Warkay-Bipim", "bgw": "Bhatri", "bgx": "Balkan Gagauz Turkish", "bgy": "Benggoi", "bgz": "Banggai", "bh": "Bihari languages", "bha": "Bharia", "bhb": "Bhili", "bhc": "Biga", "bhd": "Bhadrawahi", "bhe": "Bhaya", "bhf": "Odiai", "bhg": "Binandere", "bhh": "Bukharic", "bhi": "Bhilali", "bhj": "Bahing", "bhl": "Bimin", "bhm": "Bathari", "bhn": "Bohtan Neo-Aramaic", "bho": "Bhojpuri", "bhp": "Bima", "bhq": "Tukang Besi South", "bhr": "Bara Malagasy", "bhs": "Buwal", "bht": "Bhattiyali", "bhu": "Bhunjia", "bhv": "Bahau", "bhw": "Biak", "bhx": "Bhalay", "bhy": "Bhele", "bhz": "Bada (Indonesia)", "bi": "Bislama", "bia": "Badimaya", "bib": "Bissa; Bisa", "bid": "Bidiyo", "bie": "Bepour", "bif": "Biafada", "big": "Biangai", "bik": "Bikol", "bil": "Bile", "bim": "Bimoba", "bin": "Bini; Edo", "bio": "Nai", "bip": "Bila", "biq": "Bipi", "bir": "Bisorio", "bit": "Berinomo", "biu": "Biete", "biv": "Southern Birifor", "biw": "Kol (Cameroon)", "bix": "Bijori", "biy": "Birhor", "biz": "Baloi", "bja": "Budza", "bjb": "Banggarla", "bjc": "Bariji", "bje": "Biao-Jiao Mien", "bjf": "Barzani Jewish Neo-Aramaic", "bjg": "Bidyogo", "bjh": "Bahinemo", "bji": "Burji", "bjj": "Kanauji", "bjk": "Barok", "bjl": "Bulu (Papua New Guinea)", "bjm": "Bajelani", "bjn": "Banjar", "bjo": "Mid-Southern Banda", "bjp": "Fanamaket", "bjr": "Binumarien", "bjs": "Bajan", "bjt": "Balanta-Ganja", "bju": "Busuu", "bjv": "Bedjond", "bjw": "Bakwé", "bjx": "Banao Itneg", "bjy": "Bayali", "bjz": "Baruga", "bka": "Kyak", "bkc": "Baka (Cameroon)", "bkd": "Binukid; Talaandig", "bkf": "Beeke", "bkg": "Buraka", "bkh": "Bakoko", "bki": "Baki", "bkj": "Pande", "bkk": "Brokskat", "bkl": "Berik", "bkm": "Kom (Cameroon)", "bkn": "Bukitan", "bko": "Kwa'", "bkp": "Boko (Democratic Republic of Congo)", "bkq": "Bakairí", "bkr": "Bakumpai", "bks": "Northern Sorsoganon", "bkt": "Boloki", "bku": "Buhid", "bkv": "Bekwarra", "bkw": "Bekwel", "bkx": "Baikeno", "bky": "Bokyi", "bkz": "Bungku", "bla": "Siksika", "blb": "Bilua", "blc": "Bella Coola", "bld": "Bolango", "ble": "Balanta-Kentohe", "blf": "Buol", "blh": "Kuwaa", "bli": "Bolia", "blj": "Bolongan", "blk": "Pa'o Karen; Pa'O", "bll": "Biloxi", "blm": "Beli (South Sudan)", "bln": "Southern Catanduanes Bikol", "blo": "Anii", "blp": "Blablanga", "blq": "Baluan-Pam", "blr": "Blang", "bls": "Balaesang", "blt": "Tai Dam", "blv": "Kibala; Bolo", "blw": "Balangao", "blx": "Mag-Indi Ayta", "bly": "Notre", "blz": "Balantak", "bm": "Bambara", "bma": "Lame", "bmb": "Bembe", "bmc": "Biem", "bmd": "Baga Manduri", "bme": "Limassa", "bmf": "Bom-Kim", "bmg": "Bamwe", "bmh": "Kein", "bmi": "Bagirmi", "bmj": "Bote-Majhi", "bmk": "Ghayavi", "bml": "Bomboli", "bmm": "Northern Betsimisaraka Malagasy", "bmn": "Bina (Papua New Guinea)", "bmo": "Bambalang", "bmp": "Bulgebi", "bmq": "Bomu", "bmr": "Muinane", "bms": "Bilma Kanuri", "bmt": "Biao Mon", "bmu": "Somba-Siawari", "bmv": "Bum", "bmw": "Bomwali", "bmx": "Baimak", "bmz": "Baramu", "bn": "Bengali; Bangla", "bna": "Bonerate", "bnb": "Bookan", "bnc": "Bontok", "bnd": "Banda (Indonesia)", "bne": "Bintauna", "bnf": "Masiwang", "bng": "Benga", "bni": "Bangi", "bnj": "Eastern Tawbuid", "bnk": "Bierebo", "bnl": "Boon", "bnm": "Batanga", "bnn": "Bunun", "bno": "Bantoanon", "bnp": "Bola", "bnq": "Bantik", "bnr": "Butmas-Tur", "bns": "Bundeli", "bnt": "Bantu languages", "bnu": "Bentong", "bnv": "Bonerif; Beneraf; Edwas", "bnw": "Bisis", "bnx": "Bangubangu", "bny": "Bintulu", "bnz": "Beezen", "bo": "Tibetan", "boa": "Bora", "bob": "Aweer", "boe": "Mundabli", "bof": "Bolon", "bog": "Bamako Sign Language", "boh": "Boma", "boi": "Barbareño", "boj": "Anjam", "bok": "Bonjo", "bol": "Bole", "bom": "Berom", "bon": "Bine", "boo": "Tiemacèwè Bozo", "bop": "Bonkiman", "boq": "Bogaya", "bor": "Borôro", "bot": "Bongo", "bou": "Bondei", "bov": "Tuwuli", "bow": "Rema", "box": "Buamu", "boy": "Bodo (Central African Republic)", "boz": "Tiéyaxo Bozo", "bpa": "Daakaka", "bpc": "Mbuk", "bpd": "Banda-Banda", "bpe": "Bauni", "bpg": "Bonggo", "bph": "Botlikh", "bpi": "Bagupi", "bpj": "Binji", "bpk": "Orowe; 'Ôrôê", "bpl": "Broome Pearling Lugger Pidgin", "bpm": "Biyom", "bpn": "Dzao Min", "bpo": "Anasi", "bpp": "Kaure", "bpq": "Banda Malay", "bpr": "Koronadal Blaan", "bps": "Sarangani Blaan", "bpt": "Barrow Point", "bpu": "Bongu", "bpv": "Bian Marind", "bpw": "Bo (Papua New Guinea)", "bpx": "Palya Bareli", "bpy": "Bishnupriya", "bpz": "Bilba", "bqa": "Tchumbuli", "bqb": "Bagusa", "bqc": "Boko (Benin); Boo", "bqd": "Bung", "bqf": "Baga Kaloum", "bqg": "Bago-Kusuntu", "bqh": "Baima", "bqi": "Bakhtiari", "bqj": "Bandial", "bqk": "Banda-Mbrès", "bql": "Bilakura", "bqm": "Wumboko", "bqn": "Bulgarian Sign Language", "bqo": "Balo", "bqp": "Busa", "bqq": "Biritai", "bqr": "Burusu", "bqs": "Bosngun", "bqt": "Bamukumbit", "bqu": "Boguru", "bqv": "Koro Wachi; Begbere-Ejar", "bqw": "Buru (Nigeria)", "bqx": "Baangi", "bqy": "Bengkala Sign Language", "bqz": "Bakaka", "br": "Breton", "bra": "Braj", "brb": "Brao; Lave", "brc": "Berbice Creole Dutch", "brd": "Baraamu", "brf": "Bira", "brg": "Baure", "brh": "Brahui", "bri": "Mokpwe", "brj": "Bieria", "brk": "Birked", "brl": "Birwa", "brm": "Barambu", "brn": "Boruca", "bro": "Brokkat", "brp": "Barapasi", "brq": "Breri", "brr": "Birao", "brs": "Baras", "brt": "Bitare", "bru": "Eastern Bru", "brv": "Western Bru", "brw": "Bellari", "brx": "Bodo (India)", "bry": "Burui", "brz": "Bilbil", "bs": "Bosnian", "bsa": "Abinomn", "bsb": "Brunei Bisaya", "bsc": "Bassari; Oniyan", "bse": "Wushi", "bsf": "Bauchi", "bsg": "Bashkardi", "bsh": "Kati", "bsi": "Bassossi", "bsj": "Bangwinji", "bsk": "Burushaski", "bsl": "Basa-Gumna", "bsm": "Busami", "bsn": "Barasana-Eduria", "bso": "Buso", "bsp": "Baga Sitemu", "bsq": "Bassa", "bsr": "Bassa-Kontagora", "bss": "Akoose", "bst": "Basketo", "bsu": "Bahonsuai", "bsv": "Baga Sobané", "bsw": "Baiso", "bsx": "Yangkam", "bsy": "Sabah Bisaya", "bta": "Bata", "btc": "Bati (Cameroon)", "btd": "Batak Dairi", "bte": "Gamo-Ningi", "btf": "Birgit", "btg": "Gagnoa Bété", "bth": "Biatah Bidayuh", "bti": "Burate", "btj": "Bacanese Malay", "btk": "Batak languages", "btm": "Batak Mandailing", "btn": "Ratagnon", "bto": "Rinconada Bikol", "btp": "Budibud", "btq": "Batek", "btr": "Baetora", "bts": "Batak Simalungun", "btt": "Bete-Bendi", "btu": "Batu", "btv": "Bateri", "btw": "Butuanon", "btx": "Batak Karo", "bty": "Bobot", "btz": "Batak Alas-Kluet", "bua": "Buriat", "bub": "Bua", "buc": "Bushi", "bud": "Ntcham", "bue": "Beothuk", "buf": "Bushoong", "bug": "Buginese", "buh": "Younuo Bunu", "bui": "Bongili", "buj": "Basa-Gurmana", "buk": "Bugawac", "bum": "Bulu (Cameroon)", "bun": "Sherbro", "buo": "Terei", "bup": "Busoa", "buq": "Brem", "bus": "Bokobaru", "but": "Bungain", "buu": "Budu", "buv": "Bun", "buw": "Bubi", "bux": "Boghom", "buy": "Bullom So", "buz": "Bukwen", "bva": "Barein", "bvb": "Bube", "bvc": "Baelelea", "bvd": "Baeggu", "bve": "Berau Malay", "bvf": "Boor", "bvg": "Bonkeng", "bvh": "Bure", "bvi": "Belanda Viri", "bvj": "Baan", "bvk": "Bukat", "bvl": "Bolivian Sign Language", "bvm": "Bamunka", "bvn": "Buna", "bvo": "Bolgo", "bvp": "Bumang", "bvq": "Birri", "bvr": "Burarra", "bvt": "Bati (Indonesia)", "bvu": "Bukit Malay", "bvv": "Baniva", "bvw": "Boga", "bvx": "Dibole", "bvy": "Baybayanon", "bvz": "Bauzi", "bwa": "Bwatoo", "bwb": "Namosi-Naitasiri-Serua", "bwc": "Bwile", "bwd": "Bwaidoka", "bwe": "Bwe Karen", "bwf": "Boselewa", "bwg": "Barwe", "bwh": "Bishuo", "bwi": "Baniwa", "bwj": "Láá Láá Bwamu", "bwk": "Bauwaki", "bwl": "Bwela", "bwm": "Biwat", "bwn": "Wunai Bunu", "bwo": "Boro (Ethiopia); Borna (Ethiopia)", "bwp": "Mandobo Bawah", "bwq": "Southern Bobo Madaré", "bwr": "Bura-Pabir", "bws": "Bomboma", "bwt": "Bafaw-Balong", "bwu": "Buli (Ghana)", "bww": "Bwa", "bwx": "Bu-Nao Bunu", "bwy": "Cwi Bwamu", "bwz": "Bwisi", "bxa": "Tairaha", "bxb": "Belanda Bor", "bxc": "Molengue", "bxd": "Pela", "bxe": "Birale", "bxf": "Bilur; Minigir", "bxg": "Bangala", "bxh": "Buhutu", "bxi": "Pirlatapa", "bxj": "Bayungu", "bxk": "Bukusu; Lubukusu", "bxl": "Jalkunan", "bxm": "Mongolia Buriat", "bxn": "Burduna", "bxo": "Barikanchi", "bxp": "Bebil", "bxq": "Beele", "bxr": "Russia Buriat", "bxs": "Busam", "bxu": "China Buriat", "bxv": "Berakou", "bxw": "Bankagooma", "bxz": "Binahari", "bya": "Batak", "byb": "Bikya", "byc": "Ubaghara", "byd": "Benyadu'", "bye": "Pouye", "byf": "Bete", "byg": "Baygo", "byh": "Bhujel", "byi": "Buyu", "byj": "Bina (Nigeria)", "byk": "Biao", "byl": "Bayono", "bym": "Bidjara", "byn": "Bilin; Blin", "byo": "Biyo", "byp": "Bumaji", "byq": "Basay", "byr": "Baruya; Yipma", "bys": "Burak", "byt": "Berti", "byv": "Medumba", "byw": "Belhariya", "byx": "Qaqet", "byz": "Banaro", "bza": "Bandi", "bzb": "Andio", "bzc": "Southern Betsimisaraka Malagasy", "bzd": "Bribri", "bze": "Jenaama Bozo", "bzf": "Boikin", "bzg": "Babuza", "bzh": "Mapos Buang", "bzi": "Bisu", "bzj": "Belize Kriol English", "bzk": "Nicaragua Creole English", "bzl": "Boano (Sulawesi)", "bzm": "Bolondo", "bzn": "Boano (Maluku)", "bzo": "Bozaba", "bzp": "Kemberano", "bzq": "Buli (Indonesia)", "bzr": "Biri", "bzs": "Brazilian Sign Language", "bzt": "Brithenig", "bzu": "Burmeso", "bzv": "Naami", "bzw": "Basa (Nigeria)", "bzx": "Kɛlɛngaxo Bozo", "bzy": "Obanliku", "bzz": "Evant", "ca": "Catalan; Valencian", "caa": "Chortí", "cab": "Garifuna", "cac": "Chuj", "cad": "Caddo", "cae": "Lehar; Laalaa", "caf": "Southern Carrier", "cag": "Nivaclé", "cah": "Cahuarano", "cai": "Central American Indian languages", "caj": "Chané", "cak": "Kaqchikel; Cakchiquel", "cal": "Carolinian", "cam": "Cemuhî", "can": "Chambri", "cao": "Chácobo", "cap": "Chipaya", "caq": "Car Nicobarese", "car": "Galibi Carib", "cas": "Tsimané", "cau": "Caucasian languages", "cav": "Cavineña", "caw": "Callawalla", "cax": "Chiquitano", "cay": "Cayuga", "caz": "Canichana", "cba": "Chibchan languages", "cbb": "Cabiyarí", "cbc": "Carapana", "cbd": "Carijona", "cbg": "Chimila", "cbi": "Chachi", "cbj": "Ede Cabe", "cbk": "Chavacano", "cbl": "Bualkhaw Chin", "cbn": "Nyahkur", "cbo": "Izora", "cbq": "Tsucuba; Cuba", "cbr": "Cashibo-Cacataibo", "cbs": "Cashinahua", "cbt": "Chayahuita", "cbu": "Candoshi-Shapra", "cbv": "Cacua", "cbw": "Kinabalian", "cby": "Carabayo", "ccc": "Chamicuro", "ccd": "Cafundo Creole", "cce": "Chopi", "ccg": "Samba Daka", "cch": "Atsam", "ccj": "Kasanga", "ccl": "Cutchi-Swahili", "ccm": "Malaccan Creole Malay", "ccn": "North Caucasian languages", "cco": "Comaltepec Chinantec", "ccp": "Chakma", "ccr": "Cacaopera", "ccs": "South Caucasian languages", "cda": "Choni", "cdc": "Chadic languages", "cdd": "Caddoan languages", "cde": "Chenchu", "cdf": "Chiru", "cdh": "Chambeali", "cdi": "Chodri", "cdj": "Churahi", "cdm": "Chepang", "cdn": "Chaudangsi", "cdo": "Min Dong Chinese", "cdr": "Cinda-Regi-Tiyal", "cds": "Chadian Sign Language", "cdy": "Chadong", "cdz": "Koda", "ce": "Chechen", "cea": "Lower Chehalis", "ceb": "Cebuano", "ceg": "Chamacoco", "cek": "Eastern Khumi Chin", "cel": "Celtic languages", "cen": "Cen", "cet": "Centúúm", "cey": "Ekai Chin", "cfa": "Dijim-Bwilim", "cfd": "Cara", "cfg": "Como Karim", "cfm": "Falam Chin", "cga": "Changriwa", "cgc": "Kagayanen", "cgg": "Chiga", "cgk": "Chocangacakha", "ch": "Chamorro", "chb": "Chibcha", "chc": "Catawba", "chd": "Highland Oaxaca Chontal", "chf": "Tabasco Chontal", "chg": "Chagatai", "chh": "Chinook", "chj": "Ojitlán Chinantec", "chk": "Chuukese", "chl": "Cahuilla", "chm": "Mari (Russia)", "chn": "Chinook jargon", "cho": "Choctaw", "chp": "Chipewyan; Dene Suline", "chq": "Quiotepec Chinantec", "chr": "Cherokee", "cht": "Cholón", "chw": "Chuwabu", "chx": "Chantyal", "chy": "Cheyenne", "chz": "Ozumacín Chinantec", "cia": "Cia-Cia", "cib": "Ci Gbe", "cic": "Chickasaw", "cid": "Chimariko", "cie": "Cineni", "cih": "Chinali", "cik": "Chitkuli Kinnauri", "cim": "Cimbrian", "cin": "Cinta Larga", "cip": "Chiapanec", "cir": "Tiri; Haméa; Méa", "ciw": "Chippewa", "ciy": "Chaima", "cja": "Western Cham", "cje": "Chru", "cjh": "Upper Chehalis", "cji": "Chamalal", "cjk": "Chokwe", "cjm": "Eastern Cham", "cjn": "Chenapian", "cjo": "Ashéninka Pajonal", "cjp": "Cabécar", "cjs": "Shor", "cjv": "Chuave", "cjy": "Jinyu Chinese", "ckb": "Central Kurdish", "ckh": "Chak", "ckl": "Cibak", "ckm": "Chakavian", "ckn": "Kaang Chin", "cko": "Anufo", "ckq": "Kajakse", "ckr": "Kairak", "cks": "Tayo", "ckt": "Chukot", "cku": "Koasati", "ckv": "Kavalan", "ckx": "Caka", "cky": "Cakfem-Mushere", "ckz": "Cakchiquel-Quiché Mixed Language", "cla": "Ron", "clc": "Chilcotin", "cld": "Chaldean Neo-Aramaic", "cle": "Lealao Chinantec", "clh": "Chilisso", "cli": "Chakali", "clj": "Laitu Chin", "clk": "Idu-Mishmi", "cll": "Chala", "clm": "Clallam", "clo": "Lowland Oaxaca Chontal", "clt": "Lautu Chin", "clu": "Caluyanun", "clw": "Chulym", "cly": "Eastern Highland Chatino", "cma": "Maa", "cmc": "Chamic languages", "cme": "Cerma", "cmg": "Classical Mongolian", "cmi": "Emberá-Chamí", "cml": "Campalagian", "cmm": "Michigamea", "cmn": "Mandarin Chinese", "cmo": "Central Mnong", "cmr": "Mro-Khimi Chin", "cms": "Messapic", "cmt": "Camtho", "cna": "Changthang", "cnb": "Chinbon Chin", "cnc": "Côông", "cng": "Northern Qiang", "cnh": "Hakha Chin; Haka Chin", "cni": "Asháninka", "cnk": "Khumi Chin", "cnl": "Lalana Chinantec", "cno": "Con", "cnp": "Northern Ping Chinese; Northern Pinghua", "cnq": "Chung", "cnr": "Montenegrin", "cns": "Central Asmat", "cnt": "Tepetotutla Chinantec", "cnu": "Chenoua", "cnw": "Ngawn Chin", "cnx": "Middle Cornish", "co": "Corsican", "coa": "Cocos Islands Malay", "cob": "Chicomuceltec", "coc": "Cocopa", "cod": "Cocama-Cocamilla", "coe": "Koreguaje", "cof": "Colorado", "cog": "Chong", "coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma", "coj": "Cochimi", "cok": "Santa Teresa Cora", "col": "Columbia-Wenatchi", "com": "Comanche", "con": "Cofán", "coo": "Comox", "cop": "Coptic", "coq": "Coquille", "cot": "Caquinte", "cou": "Wamey", "cov": "Cao Miao", "cow": "Cowlitz", "cox": "Nanti", "coz": "Chochotec", "cpa": "Palantla Chinantec", "cpb": "Ucayali-Yurúa Ashéninka", "cpc": "Ajyíninka Apurucayali", "cpe": "English-based creoles and pidgins", "cpf": "French-based creoles and pidgins", "cpg": "Cappadocian Greek", "cpi": "Chinese Pidgin English", "cpn": "Cherepon", "cpo": "Kpeego", "cpp": "Portuguese-based creoles and pidgins", "cps": "Capiznon", "cpu": "Pichis Ashéninka", "cpx": "Pu-Xian Chinese", "cpy": "South Ucayali Ashéninka", "cqd": "Chuanqiandian Cluster Miao", "cr": "Cree", "cra": "Chara", "crb": "Island Carib", "crc": "Lonwolwol", "crd": "Coeur d'Alene", "crf": "Caramanta", "crg": "Michif", "crh": "Crimean Tatar; Crimean Turkish", "cri": "Sãotomense", "crj": "Southern East Cree", "crk": "Plains Cree", "crl": "Northern East Cree", "crm": "Moose Cree", "crn": "El Nayar Cora", "cro": "Crow", "crp": "Creoles and pidgins", "crq": "Iyo'wujwa Chorote", "crr": "Carolina Algonquian", "crs": "Seselwa Creole French", "crt": "Iyojwa'ja Chorote", "crv": "Chaura", "crw": "Chrau", "crx": "Carrier", "cry": "Cori", "crz": "Cruzeño", "cs": "Czech", "csa": "Chiltepec Chinantec", "csb": "Kashubian", "csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana", "csd": "Chiangmai Sign Language", "cse": "Czech Sign Language", "csf": "Cuba Sign Language", "csg": "Chilean Sign Language", "csh": "Asho Chin", "csi": "Coast Miwok", "csj": "Songlai Chin", "csk": "Jola-Kasa", "csl": "Chinese Sign Language", "csm": "Central Sierra Miwok", "csn": "Colombian Sign Language", "cso": "Sochiapam Chinantec; Sochiapan Chinantec", "csp": "Southern Ping Chinese; Southern Pinghua", "csq": "Croatia Sign Language", "csr": "Costa Rican Sign Language", "css": "Southern Ohlone", "cst": "Northern Ohlone", "csu": "Central Sudanic languages", "csv": "Sumtu Chin", "csw": "Swampy Cree", "csx": "Cambodian Sign Language", "csy": "Siyin Chin", "csz": "Coos", "cta": "Tataltepec Chatino", "ctc": "Chetco", "ctd": "Tedim Chin", "cte": "Tepinapa Chinantec", "ctg": "Chittagonian", "cth": "Thaiphum Chin", "ctl": "Tlacoatzintepec Chinantec", "ctm": "Chitimacha", "ctn": "Chhintange", "cto": "Emberá-Catío", "ctp": "Western Highland Chatino", "cts": "Northern Catanduanes Bikol", "ctt": "Wayanad Chetti", "ctu": "Chol", "cty": "Moundadan Chetty", "ctz": "Zacatepec Chatino", "cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic", "cua": "Cua", "cub": "Cubeo", "cuc": "Usila Chinantec", "cuh": "Chuka; Gichuka", "cui": "Cuiba", "cuj": "Mashco Piro", "cuk": "San Blas Kuna", "cul": "Culina; Kulina", "cuo": "Cumanagoto", "cup": "Cupeño", "cuq": "Cun", "cur": "Chhulung", "cus": "Cushitic languages", "cut": "Teutila Cuicatec", "cuu": "Tai Ya", "cuv": "Cuvok", "cuw": "Chukwa", "cux": "Tepeuxila Cuicatec", "cuy": "Cuitlatec", "cv": "Chuvash", "cvg": "Chug", "cvn": "Valle Nacional Chinantec", "cwa": "Kabwa", "cwb": "Maindo", "cwd": "Woods Cree", "cwe": "Kwere", "cwg": "Chewong; Cheq Wong", "cwt": "Kuwaataay", "cy": "Welsh", "cya": "Nopala Chatino", "cyb": "Cayubaba", "cyo": "Cuyonon", "czh": "Huizhou Chinese", "czk": "Knaanic", "czn": "Zenzontepec Chatino", "czo": "Min Zhong Chinese", "czt": "Zotung Chin", "da": "Danish", "daa": "Dangaléat", "dac": "Dambi", "dad": "Marik", "dae": "Duupa", "dag": "Dagbani", "dah": "Gwahatike", "dai": "Day", "daj": "Dar Fur Daju", "dak": "Dakota", "dal": "Dahalo", "dam": "Damakawa", "dao": "Daai Chin", "daq": "Dandami Maria", "dar": "Dargwa", "das": "Daho-Doo", "dau": "Dar Sila Daju", "dav": "Taita; Dawida", "daw": "Davawenyo", "dax": "Dayi", "day": "Land Dayak languages", "daz": "Dao", "dba": "Bangime", "dbb": "Deno", "dbd": "Dadiya", "dbe": "Dabe", "dbf": "Edopi", "dbg": "Dogul Dom Dogon", "dbi": "Doka", "dbj": "Ida'an", "dbl": "Dyirbal", "dbm": "Duguri", "dbn": "Duriankere", "dbo": "Dulbu", "dbp": "Duwai", "dbq": "Daba", "dbr": "Dabarre", "dbt": "Ben Tey Dogon", "dbu": "Bondum Dom Dogon", "dbv": "Dungu", "dbw": "Bankan Tey Dogon", "dby": "Dibiyaso", "dcc": "Deccan", "dcr": "Negerhollands", "dda": "Dadi Dadi", "ddd": "Dongotono", "dde": "Doondo", "ddg": "Fataluku", "ddi": "West Goodenough", "ddj": "Jaru", "ddn": "Dendi (Benin)", "ddo": "Dido", "ddr": "Dhudhuroa", "dds": "Donno So Dogon", "ddw": "Dawera-Daweloor", "de": "German", "dec": "Dagik", "ded": "Dedua", "dee": "Dewoin", "def": "Dezfuli", "deg": "Degema", "deh": "Dehwari", "dei": "Demisa", "dek": "Dek", "del": "Delaware", "dem": "Dem", "den": "Slave (Athapascan)", "dep": "Pidgin Delaware", "deq": "Dendi (Central African Republic)", "der": "Deori", "des": "Desano", "dev": "Domung", "dez": "Dengese", "dga": "Southern Dagaare", "dgb": "Bunoge Dogon", "dgc": "Casiguran Dumagat Agta", "dgd": "Dagaari Dioula", "dge": "Degenan", "dgg": "Doga", "dgh": "Dghwede", "dgi": "Northern Dagara", "dgk": "Dagba", "dgl": "Andaandi; Dongolawi", "dgn": "Dagoman", "dgo": "Dogri (individual language)", "dgr": "Dogrib; Tłı̨chǫ", "dgs": "Dogoso", "dgt": "Ndra'ngith", "dgw": "Daungwurrung", "dgx": "Doghoro", "dgz": "Daga", "dhd": "Dhundari", "dhg": "Dhangu-Djangu; Dhangu; Djangu", "dhi": "Dhimal", "dhl": "Dhalandji", "dhm": "Zemba", "dhn": "Dhanki", "dho": "Dhodia", "dhr": "Dhargari", "dhs": "Dhaiso", "dhu": "Dhurga", "dhv": "Dehu; Drehu", "dhw": "Dhanwar (Nepal)", "dhx": "Dhungaloo", "dia": "Dia", "dib": "South Central Dinka", "dic": "Lakota Dida", "did": "Didinga", "dif": "Dieri; Diyari", "dig": "Digo; Chidigo", "dih": "Kumiai", "dii": "Dimbong", "dij": "Dai", "dik": "Southwestern Dinka", "dil": "Dilling", "dim": "Dime", "din": "Dinka", "dio": "Dibo", "dip": "Northeastern Dinka", "diq": "Dimli (individual language)", "dir": "Dirim", "dis": "Dimasa", "diu": "Diriku", "diw": "Northwestern Dinka", "dix": "Dixon Reef", "diy": "Diuwe", "diz": "Ding", "dja": "Djadjawurrung", "djb": "Djinba", "djc": "Dar Daju Daju", "djd": "Djamindjung; Ngaliwurru", "dje": "Zarma", "djf": "Djangun", "dji": "Djinang", "djj": "Djeebbana", "djk": "Eastern Maroon Creole; Businenge Tongo; Nenge", "djm": "Jamsay Dogon", "djn": "Jawoyn; Djauan", "djo": "Jangkang", "djr": "Djambarrpuyngu", "dju": "Kapriman", "djw": "Djawi", "dka": "Dakpakha", "dkg": "Kadung", "dkk": "Dakka", "dkr": "Kuijau", "dks": "Southeastern Dinka", "dkx": "Mazagway", "dlg": "Dolgan", "dlk": "Dahalik", "dlm": "Dalmatian", "dln": "Darlong", "dma": "Duma", "dmb": "Mombo Dogon", "dmc": "Gavak", "dmd": "Madhi Madhi", "dme": "Dugwor", "dmf": "Medefaidrin", "dmg": "Upper Kinabatangan", "dmk": "Domaaki", "dml": "Dameli", "dmm": "Dama", "dmn": "Mande languages", "dmo": "Kemedzung", "dmr": "East Damar", "dms": "Dampelas", "dmu": "Dubu; Tebi", "dmv": "Dumpas", "dmw": "Mudburra", "dmx": "Dema", "dmy": "Demta; Sowari", "dna": "Upper Grand Valley Dani", "dnd": "Daonda", "dne": "Ndendeule", "dng": "Dungan", "dni": "Lower Grand Valley Dani", "dnj": "Dan", "dnk": "Dengka", "dnn": "Dzùùngoo", "dno": "Ndrulo; Northern Lendu", "dnr": "Danaru", "dnt": "Mid Grand Valley Dani", "dnu": "Danau", "dnv": "Danu", "dnw": "Western Dani", "dny": "Dení", "doa": "Dom", "dob": "Dobu", "doc": "Northern Dong", "doe": "Doe", "dof": "Domu", "doh": "Dong", "doi": "Dogri (macrolanguage)", "dok": "Dondo", "dol": "Doso", "don": "Toura (Papua New Guinea)", "doo": "Dongo", "dop": "Lukpa", "doq": "Dominican Sign Language", "dor": "Dori'o", "dos": "Dogosé", "dot": "Dass", "dov": "Dombe", "dow": "Doyayo", "dox": "Bussa", "doy": "Dompo", "doz": "Dorze", "dpp": "Papar", "dra": "Dravidian languages", "drb": "Dair", "drc": "Minderico", "drd": "Darmiya", "dre": "Dolpo", "drg": "Rungus", "dri": "C'Lela", "drl": "Paakantyi", "drn": "West Damar", "dro": "Daro-Matu Melanau", "drq": "Dura", "drs": "Gedeo", "drt": "Drents", "dru": "Rukai", "dry": "Darai", "dsb": "Lower Sorbian", "dse": "Dutch Sign Language", "dsh": "Daasanach", "dsi": "Disa", "dsl": "Danish Sign Language", "dsn": "Dusner", "dso": "Desiya", "dsq": "Tadaksahak", "dsz": "Mardin Sign Language", "dta": "Daur", "dtb": "Labuk-Kinabatangan Kadazan", "dtd": "Ditidaht", "dth": "Adithinngithigh", "dti": "Ana Tinga Dogon", "dtk": "Tene Kan Dogon", "dtm": "Tomo Kan Dogon", "dtn": "Daatsʼíin", "dto": "Tommo So Dogon", "dtp": "Kadazan Dusun; Central Dusun", "dtr": "Lotud", "dts": "Toro So Dogon", "dtt": "Toro Tegu Dogon", "dtu": "Tebul Ure Dogon", "dty": "Dotyali", "dua": "Duala", "dub": "Dubli", "duc": "Duna", "due": "Umiray Dumaget Agta", "duf": "Dumbea; Drubea", "dug": "Duruma; Chiduruma", "duh": "Dungra Bhil", "dui": "Dumun", "duk": "Uyajitaya", "dul": "Alabat Island Agta", "dum": "Middle Dutch (ca. 1050-1350)", "dun": "Dusun Deyah", "duo": "Dupaninan Agta", "dup": "Duano", "duq": "Dusun Malang", "dur": "Dii", "dus": "Dumi", "duu": "Drung", "duv": "Duvle", "duw": "Dusun Witu", "dux": "Duungooma", "duy": "Dicamay Agta", "duz": "Duli-Gey", "dv": "Dhivehi; Divehi; Maldivian", "dva": "Duau", "dwa": "Diri", "dwk": "Dawik Kui", "dwr": "Dawro", "dws": "Dutton World Speedwords", "dwu": "Dhuwal", "dww": "Dawawa", "dwy": "Dhuwaya", "dwz": "Dewas Rai", "dya": "Dyan", "dyb": "Dyaberdyaber", "dyd": "Dyugun", "dyg": "Villa Viciosa Agta", "dyi": "Djimini Senoufo", "dym": "Yanda Dom Dogon", "dyn": "Dyangadi; Dhanggatti", "dyo": "Jola-Fonyi", "dyu": "Dyula", "dyy": "Djabugay; Dyaabugay", "dz": "Dzongkha", "dza": "Tunzu", "dze": "Djiwarli", "dzg": "Dazaga", "dzl": "Dzalakha", "dzn": "Dzando", "eaa": "Karenggapa", "ebc": "Beginci", "ebg": "Ebughu", "ebk": "Eastern Bontok", "ebo": "Teke-Ebo", "ebr": "Ebrié", "ebu": "Embu; Kiembu", "ecr": "Eteocretan", "ecs": "Ecuadorian Sign Language", "ecy": "Eteocypriot", "ee": "Ewe", "eee": "E", "efa": "Efai", "efe": "Efe", "efi": "Efik", "ega": "Ega", "egl": "Emilian", "egm": "Benamanga", "ego": "Eggon", "egx": "Egyptian languages", "egy": "Egyptian (Ancient)", "ehs": "Miyakubo Sign Language", "ehu": "Ehueun", "eip": "Eipomek", "eit": "Eitiep", "eiv": "Askopan", "eja": "Ejamat", "eka": "Ekajuk", "eke": "Ekit", "ekg": "Ekari", "eki": "Eki", "ekk": "Standard Estonian", "ekl": "Kol (Bangladesh); Kol", "ekm": "Elip", "eko": "Koti", "ekp": "Ekpeye", "ekr": "Yace", "eky": "Eastern Kayah", "el": "Modern Greek (1453-)", "ele": "Elepi", "elh": "El Hugeirat", "eli": "Nding", "elk": "Elkei", "elm": "Eleme", "elo": "El Molo", "elu": "Elu", "elx": "Elamite", "ema": "Emai-Iuleha-Ora", "emb": "Embaloh", "eme": "Emerillon", "emg": "Eastern Meohang", "emi": "Mussau-Emira", "emk": "Eastern Maninkakan", "emm": "Mamulique", "emn": "Eman", "emp": "Northern Emberá", "emq": "Eastern Minyag", "ems": "Pacific Gulf Yupik", "emu": "Eastern Muria", "emw": "Emplawas", "emx": "Erromintxela", "emy": "Epigraphic Mayan", "emz": "Mbessa", "en": "English", "ena": "Apali", "enb": "Markweeta", "enc": "En", "end": "Ende", "enf": "Forest Enets", "enh": "Tundra Enets", "enl": "Enlhet", "enm": "Middle English (1100-1500)", "enn": "Engenni", "eno": "Enggano", "enq": "Enga", "enr": "Emumu; Emem", "enu": "Enu", "env": "Enwan (Edo State)", "enw": "Enwan (Akwa Ibom State)", "enx": "Enxet", "eo": "Esperanto", "eot": "Beti (Côte d'Ivoire)", "epi": "Epie", "era": "Eravallan", "erg": "Sie", "erh": "Eruwa", "eri": "Ogea", "erk": "South Efate", "ero": "Horpa", "err": "Erre", "ers": "Ersu", "ert": "Eritai", "erw": "Erokwanas", "es": "Spanish; Castilian", "ese": "Ese Ejja", "esg": "Aheri Gondi", "esh": "Eshtehardi", "esi": "North Alaskan Inupiatun", "esk": "Northwest Alaska Inupiatun", "esl": "Egypt Sign Language", "esm": "Esuma", "esn": "Salvadoran Sign Language", "eso": "Estonian Sign Language", "esq": "Esselen", "ess": "Central Siberian Yupik", "esu": "Central Yupik", "esx": "Eskimo-Aleut languages", "esy": "Eskayan", "et": "Estonian", "etb": "Etebi", "etc": "Etchemin", "eth": "Ethiopian Sign Language", "etn": "Eton (Vanuatu)", "eto": "Eton (Cameroon)", "etr": "Edolo", "ets": "Yekhee", "ett": "Etruscan", "etu": "Ejagham", "etx": "Eten", "etz": "Semimi", "eu": "Basque", "euq": "Basque (family)", "eve": "Even", "evh": "Uvbie", "evn": "Evenki", "ewo": "Ewondo", "ext": "Extremaduran", "eya": "Eyak", "eyo": "Keiyo", "eza": "Ezaa", "eze": "Uzekwe", "fa": "Persian", "faa": "Fasu", "fab": "Fa d'Ambu", "fad": "Wagi", "faf": "Fagani", "fag": "Finongan", "fah": "Baissa Fali", "fai": "Faiwol", "faj": "Faita", "fak": "Fang (Cameroon)", "fal": "South Fali", "fam": "Fam", "fan": "Fang (Equatorial Guinea)", "fap": "Paloor", "far": "Fataleka", "fat": "Fanti", "fau": "Fayu", "fax": "Fala", "fay": "Southwestern Fars", "faz": "Northwestern Fars", "fbl": "West Albay Bikol", "fcs": "Quebec Sign Language", "fer": "Feroge", "ff": "Fulah", "ffi": "Foia Foia", "ffm": "Maasina Fulfulde", "fgr": "Fongoro", "fi": "Finnish", "fia": "Nobiin", "fie": "Fyer", "fif": "Faifi", "fil": "Filipino; Pilipino", "fip": "Fipa", "fir": "Firan", "fit": "Tornedalen Finnish; Meänkieli", "fiu": "Finno-Ugrian languages", "fiw": "Fiwaga", "fj": "Fijian", "fkk": "Kirya-Konzəl", "fkv": "Kven Finnish", "fla": "Kalispel-Pend d'Oreille", "flh": "Foau", "fli": "Fali", "fll": "North Fali", "fln": "Flinders Island", "flr": "Fuliiru", "fly": "Flaaitaal; Tsotsitaal", "fmp": "Fe'fe'", "fmu": "Far Western Muria", "fnb": "Fanbak", "fng": "Fanagalo", "fni": "Fania", "fo": "Faroese", "fod": "Foodo", "foi": "Foi", "fom": "Foma", "fon": "Fon", "for": "Fore", "fos": "Siraya", "fox": "Formosan languages", "fpe": "Fernando Po Creole English", "fqs": "Fas", "fr": "French", "frc": "Cajun French", "frd": "Fordata", "frk": "Frankish", "frm": "Middle French (ca. 1400-1600)", "fro": "Old French (842-ca. 1400)", "frp": "Arpitan; Francoprovençal", "frq": "Forak", "frr": "Northern Frisian", "frs": "Eastern Frisian", "frt": "Fortsenal", "fse": "Finnish Sign Language", "fsl": "French Sign Language", "fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli", "fub": "Adamawa Fulfulde", "fuc": "Pulaar", "fud": "East Futuna", "fue": "Borgu Fulfulde", "fuf": "Pular", "fuh": "Western Niger Fulfulde", "fui": "Bagirmi Fulfulde", "fuj": "Ko", "fum": "Fum", "fun": "Fulniô", "fuq": "Central-Eastern Niger Fulfulde", "fur": "Friulian", "fut": "Futuna-Aniwa", "fuu": "Furu", "fuv": "Nigerian Fulfulde", "fuy": "Fuyug", "fvr": "Fur", "fwa": "Fwâi", "fwe": "Fwe", "fy": "Western Frisian", "ga": "Irish", "gaa": "Ga", "gab": "Gabri", "gac": "Mixed Great Andamanese", "gad": "Gaddang", "gae": "Guarequena", "gaf": "Gende", "gag": "Gagauz", "gah": "Alekano", "gai": "Borei", "gaj": "Gadsup", "gak": "Gamkonora", "gal": "Galolen", "gam": "Kandawo", "gan": "Gan Chinese", "gao": "Gants", "gap": "Gal", "gaq": "Gata'", "gar": "Galeya", "gas": "Adiwasi Garasia", "gat": "Kenati", "gau": "Mudhili Gadaba", "gaw": "Nobonob", "gax": "Borana-Arsi-Guji Oromo", "gay": "Gayo", "gaz": "West Central Oromo", "gba": "Gbaya (Central African Republic)", "gbb": "Kaytetye", "gbd": "Karajarri", "gbe": "Niksek", "gbf": "Gaikundi", "gbg": "Gbanziri", "gbh": "Defi Gbe", "gbi": "Galela", "gbj": "Bodo Gadaba", "gbk": "Gaddi", "gbl": "Gamit", "gbm": "Garhwali", "gbn": "Mo'da", "gbo": "Northern Grebo", "gbp": "Gbaya-Bossangoa", "gbq": "Gbaya-Bozoum", "gbr": "Gbagyi", "gbs": "Gbesi Gbe", "gbu": "Gagadu", "gbv": "Gbanu", "gbw": "Gabi-Gabi", "gbx": "Eastern Xwla Gbe", "gby": "Gbari", "gbz": "Zoroastrian Dari", "gcc": "Mali", "gcd": "Ganggalida", "gce": "Galice", "gcf": "Guadeloupean Creole French", "gcl": "Grenadian Creole English", "gcn": "Gaina", "gcr": "Guianese Creole French", "gct": "Colonia Tovar German", "gd": "Scottish Gaelic; Gaelic", "gda": "Gade Lohar", "gdb": "Pottangi Ollar Gadaba", "gdc": "Gugu Badhun", "gdd": "Gedaged", "gde": "Gude", "gdf": "Guduf-Gava", "gdg": "Ga'dang", "gdh": "Gadjerawang; Gajirrabeng", "gdi": "Gundi", "gdj": "Gurdjar", "gdk": "Gadang", "gdl": "Dirasha", "gdm": "Laal", "gdn": "Umanakaina", "gdo": "Ghodoberi", "gdq": "Mehri", "gdr": "Wipi", "gds": "Ghandruk Sign Language", "gdt": "Kungardutyi", "gdu": "Gudu", "gdx": "Godwari", "gea": "Geruma", "geb": "Kire", "gec": "Gboloo Grebo", "ged": "Gade", "gef": "Gerai", "geg": "Gengle", "geh": "Hutterite German; Hutterisch", "gei": "Gebe", "gej": "Gen", "gek": "Ywom", "gel": "ut-Ma'in", "gem": "Germanic languages", "geq": "Geme", "ges": "Geser-Gorom", "gev": "Eviya", "gew": "Gera", "gex": "Garre", "gey": "Enya", "gez": "Geez", "gfk": "Patpatar", "gft": "Gafat", "gga": "Gao", "ggb": "Gbii", "ggd": "Gugadj", "gge": "Gurr-goni", "ggg": "Gurgula", "ggk": "Kungarakany", "ggl": "Ganglau", "ggt": "Gitua", "ggu": "Gagu; Gban", "ggw": "Gogodala", "gha": "Ghadamès", "ghc": "Hiberno-Scottish Gaelic", "ghe": "Southern Ghale", "ghh": "Northern Ghale", "ghk": "Geko Karen", "ghl": "Ghulfan", "ghn": "Ghanongga", "gho": "Ghomara", "ghr": "Ghera", "ghs": "Guhu-Samane", "ght": "Kuke; Kutang Ghale", "gia": "Kija", "gib": "Gibanawa", "gic": "Gail", "gid": "Gidar", "gie": "Gaɓogbo; Guébie", "gig": "Goaria", "gih": "Githabul", "gii": "Girirra", "gil": "Gilbertese", "gim": "Gimi (Eastern Highlands)", "gin": "Hinukh", "gip": "Gimi (West New Britain)", "giq": "Green Gelao", "gir": "Red Gelao", "gis": "North Giziga", "git": "Gitxsan", "giu": "Mulao", "giw": "White Gelao", "gix": "Gilima", "giy": "Giyug", "giz": "South Giziga", "gjk": "Kachi Koli", "gjm": "Gunditjmara", "gjn": "Gonja", "gjr": "Gurindji Kriol", "gju": "Gujari", "gka": "Guya", "gkd": "Magɨ (Madang Province)", "gke": "Ndai", "gkn": "Gokana", "gko": "Kok-Nar", "gkp": "Guinea Kpelle", "gku": "ǂUngkue", "gl": "Galician", "glb": "Belning", "glc": "Bon Gula", "gld": "Nanai", "glh": "Northwest Pashai; Northwest Pashayi", "glj": "Gula Iro", "glk": "Gilaki", "gll": "Garlali", "glo": "Galambu", "glr": "Glaro-Twabo", "glu": "Gula (Chad)", "glw": "Glavda", "gly": "Gule", "gma": "Gambera", "gmb": "Gula'alaa", "gmd": "Mághdì", "gme": "East Germanic languages", "gmg": "Magɨyi", "gmh": "Middle High German (ca. 1050-1500)", "gml": "Middle Low German", "gmm": "Gbaya-Mbodomo", "gmn": "Gimnime", "gmq": "North Germanic languages", "gmr": "Mirning; Mirniny", "gmu": "Gumalu", "gmv": "Gamo", "gmw": "West Germanic languages", "gmx": "Magoma", "gmy": "Mycenaean Greek", "gmz": "Mgbolizhia", "gn": "Guarani", "gna": "Kaansa", "gnb": "Gangte", "gnc": "Guanche", "gnd": "Zulgo-Gemzek", "gne": "Ganang", "gng": "Ngangam", "gnh": "Lere", "gni": "Gooniyandi", "gnj": "Ngen", "gnk": "ǁGana", "gnl": "Gangulu", "gnm": "Ginuman", "gnn": "Gumatj", "gno": "Northern Gondi", "gnq": "Gana", "gnr": "Gureng Gureng", "gnt": "Guntai", "gnu": "Gnau", "gnw": "Western Bolivian Guaraní", "gnz": "Ganzi", "goa": "Guro", "gob": "Playero", "goc": "Gorakor", "god": "Godié", "goe": "Gongduk", "gof": "Gofa", "gog": "Gogo", "goh": "Old High German (ca. 750-1050)", "goi": "Gobasi", "goj": "Gowlan", "gok": "Gowli", "gol": "Gola", "gom": "Goan Konkani", "gon": "Gondi", "goo": "Gone Dau", "gop": "Yeretuar", "goq": "Gorap", "gor": "Gorontalo", "gos": "Gronings", "got": "Gothic", "gou": "Gavar", "gov": "Goo", "gow": "Gorowa", "gox": "Gobu", "goy": "Goundo", "goz": "Gozarkhani", "gpa": "Gupa-Abawa", "gpe": "Ghanaian Pidgin English", "gpn": "Taiap", "gqa": "Ga'anda", "gqi": "Guiqiong", "gqn": "Guana (Brazil)", "gqr": "Gor", "gqu": "Qau", "gra": "Rajput Garasia", "grb": "Grebo", "grc": "Ancient Greek (to 1453)", "grd": "Guruntum-Mbaaru", "grg": "Madi", "grh": "Gbiri-Niragu", "gri": "Ghari", "grj": "Southern Grebo", "grk": "Greek languages", "grm": "Kota Marudu Talantang", "gro": "Groma", "grq": "Gorovu", "grr": "Taznatit", "grs": "Gresi", "grt": "Garo", "gru": "Kistane", "grv": "Central Grebo", "grw": "Gweda", "grx": "Guriaso", "gry": "Barclayville Grebo", "grz": "Guramalum", "gse": "Ghanaian Sign Language", "gsg": "German Sign Language", "gsl": "Gusilay", "gsm": "Guatemalan Sign Language", "gsn": "Nema; Gusan", "gso": "Southwest Gbaya", "gsp": "Wasembo", "gss": "Greek Sign Language", "gsw": "Swiss German; Alemannic; Alsatian", "gta": "Guató", "gtu": "Aghu-Tharnggala", "gu": "Gujarati", "gua": "Shiki", "gub": "Guajajára", "guc": "Wayuu", "gud": "Yocoboué Dida", "gue": "Gurindji", "guf": "Gupapuyngu", "gug": "Paraguayan Guaraní", "guh": "Guahibo", "gui": "Eastern Bolivian Guaraní", "guk": "Gumuz", "gul": "Sea Island Creole English", "gum": "Guambiano", "gun": "Mbyá Guaraní", "guo": "Guayabero", "gup": "Gunwinggu", "guq": "Aché", "gur": "Farefare", "gus": "Guinean Sign Language", "gut": "Maléku Jaíka", "guu": "Yanomamö", "guw": "Gun", "gux": "Gourmanchéma", "guz": "Gusii; Ekegusii", "gv": "Manx", "gva": "Guana (Paraguay)", "gvc": "Guanano", "gve": "Duwet", "gvf": "Golin", "gvj": "Guajá", "gvl": "Gulay", "gvm": "Gurmana", "gvn": "Kuku-Yalanji", "gvo": "Gavião Do Jiparaná", "gvp": "Pará Gavião", "gvr": "Gurung", "gvs": "Gumawana", "gvy": "Guyani", "gwa": "Mbato", "gwb": "Gwa", "gwc": "Gawri; Kalami", "gwd": "Gawwada", "gwe": "Gweno", "gwf": "Gowro", "gwg": "Moo", "gwi": "Gwichʼin", "gwj": "ǀGwi", "gwm": "Awngthim", "gwn": "Gwandara", "gwr": "Gwere", "gwt": "Gawar-Bati", "gwu": "Guwamu", "gww": "Kwini", "gwx": "Gua", "gxx": "Wè Southern", "gya": "Northwest Gbaya", "gyb": "Garus", "gyd": "Kayardild", "gye": "Gyem", "gyf": "Gungabula", "gyg": "Gbayi", "gyi": "Gyele", "gyl": "Gayil", "gym": "Ngäbere", "gyn": "Guyanese Creole English", "gyo": "Gyalsumdo", "gyr": "Guarayu", "gyy": "Gunya", "gyz": "Geji; Gyaazi", "gza": "Ganza", "gzi": "Gazi", "gzn": "Gane", "ha": "Hausa", "haa": "Han", "hab": "Hanoi Sign Language", "hac": "Gurani", "had": "Hatam", "hae": "Eastern Oromo", "haf": "Haiphong Sign Language", "hag": "Hanga", "hah": "Hahon", "hai": "Haida", "haj": "Hajong", "hak": "Hakka Chinese", "hal": "Halang", "ham": "Hewa", "han": "Hangaza", "hao": "Hakö", "hap": "Hupla", "haq": "Ha", "har": "Harari", "has": "Haisla", "hav": "Havu", "haw": "Hawaiian", "hax": "Southern Haida", "hay": "Haya", "haz": "Hazaragi", "hba": "Hamba", "hbb": "Huba", "hbn": "Heiban", "hbo": "Ancient Hebrew", "hbu": "Habu", "hca": "Andaman Creole Hindi", "hch": "Huichol", "hdn": "Northern Haida", "hds": "Honduras Sign Language", "hdy": "Hadiyya", "he": "Hebrew", "hea": "Northern Qiandong Miao", "hed": "Herdé", "heg": "Helong", "heh": "Hehe", "hei": "Heiltsuk", "hem": "Hemba", "hgm": "Haiǁom", "hgw": "Haigwai", "hhi": "Hoia Hoia", "hhr": "Kerak", "hhy": "Hoyahoya", "hi": "Hindi", "hia": "Lamang", "hib": "Hibito", "hid": "Hidatsa", "hif": "Fiji Hindi", "hig": "Kamwe", "hih": "Pamosu", "hii": "Hinduri", "hij": "Hijuk", "hik": "Seit-Kaitetu", "hil": "Hiligaynon", "him": "Himachali languages; Western Pahari languages", "hio": "Tsoa", "hir": "Himarimã", "hit": "Hittite", "hiw": "Hiw", "hix": "Hixkaryána", "hji": "Haji", "hka": "Kahe", "hke": "Hunde", "hkh": "Khah; Poguli", "hkk": "Hunjara-Kaina Ke", "hkn": "Mel-Khaonh", "hks": "Hong Kong Sign Language; Heung Kong Sau Yue", "hla": "Halia", "hlb": "Halbi", "hld": "Halang Doan", "hle": "Hlersu", "hlt": "Matu Chin", "hlu": "Hieroglyphic Luwian", "hma": "Southern Mashan Hmong; Southern Mashan Miao", "hmb": "Humburi Senni Songhay", "hmc": "Central Huishui Hmong; Central Huishui Miao", "hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao", "hme": "Eastern Huishui Hmong; Eastern Huishui Miao", "hmf": "Hmong Don", "hmg": "Southwestern Guiyang Hmong", "hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao", "hmi": "Northern Huishui Hmong; Northern Huishui Miao", "hmj": "Ge; Gejia", "hmk": "Maek", "hml": "Luopohe Hmong; Luopohe Miao", "hmm": "Central Mashan Hmong; Central Mashan Miao", "hmn": "Hmong; Mong", "hmp": "Northern Mashan Hmong; Northern Mashan Miao", "hmq": "Eastern Qiandong Miao", "hmr": "Hmar", "hms": "Southern Qiandong Miao", "hmt": "Hamtai", "hmu": "Hamap", "hmv": "Hmong Dô", "hmw": "Western Mashan Hmong; Western Mashan Miao", "hmx": "Hmong-Mien languages", "hmy": "Southern Guiyang Hmong; Southern Guiyang Miao", "hmz": "Hmong Shua; Sinicized Miao", "hna": "Mina (Cameroon)", "hnd": "Southern Hindko", "hne": "Chhattisgarhi", "hng": "Hungu", "hnh": "ǁAni", "hni": "Hani", "hnj": "Hmong Njua; Mong Leng; Mong Njua", "hnn": "Hanunoo", "hno": "Northern Hindko", "hns": "Caribbean Hindustani", "hnu": "Hung", "ho": "Hiri Motu", "hoa": "Hoava", "hob": "Mari (Madang Province)", "hoc": "Ho", "hod": "Holma", "hoe": "Horom", "hoh": "Hobyót", "hoi": "Holikachuk", "hoj": "Hadothi; Haroti", "hok": "Hokan languages", "hol": "Holu", "hom": "Homa", "hoo": "Holoholo", "hop": "Hopi", "hor": "Horo", "hos": "Ho Chi Minh City Sign Language", "hot": "Hote; Malê", "hov": "Hovongan", "how": "Honi", "hoy": "Holiya", "hoz": "Hozo", "hpo": "Hpon", "hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language", "hr": "Croatian", "hra": "Hrangkhol", "hrc": "Niwer Mil", "hre": "Hre", "hrk": "Haruku", "hrm": "Horned Miao", "hro": "Haroi", "hrp": "Nhirrpi", "hrt": "Hértevin", "hru": "Hruso", "hrw": "Warwar Feni", "hrx": "Hunsrik", "hrz": "Harzani", "hsb": "Upper Sorbian", "hsh": "Hungarian Sign Language", "hsl": "Hausa Sign Language", "hsn": "Xiang Chinese", "hss": "Harsusi", "ht": "Haitian; Haitian Creole", "hti": "Hoti", "hto": "Minica Huitoto", "hts": "Hadza", "htu": "Hitu", "htx": "Middle Hittite", "hu": "Hungarian", "hub": "Huambisa", "huc": "ǂHua; ǂʼAmkhoe", "hud": "Huaulu", "hue": "San Francisco Del Mar Huave", "huf": "Humene", "hug": "Huachipaeri", "huh": "Huilliche", "hui": "Huli", "huj": "Northern Guiyang Hmong; Northern Guiyang Miao", "huk": "Hulung", "hul": "Hula", "hum": "Hungana", "huo": "Hu", "hup": "Hupa", "huq": "Tsat", "hur": "Halkomelem", "hus": "Huastec", "hut": "Humla", "huu": "Murui Huitoto", "huv": "San Mateo Del Mar Huave", "huw": "Hukumina", "hux": "Nüpode Huitoto", "huy": "Hulaulá", "huz": "Hunzib", "hvc": "Haitian Vodoun Culture Language", "hve": "San Dionisio Del Mar Huave", "hvk": "Haveke", "hvn": "Sabu", "hvv": "Santa María Del Mar Huave", "hwa": "Wané", "hwc": "Hawai'i Creole English; Hawai'i Pidgin", "hwo": "Hwana", "hy": "Armenian", "hya": "Hya", "hyw": "Western Armenian", "hyx": "Armenian (family)", "hz": "Herero", "ia": "Interlingua (International Auxiliary Language Association)", "iai": "Iaai", "ian": "Iatmul", "iar": "Purari", "iba": "Iban", "ibb": "Ibibio", "ibd": "Iwaidja", "ibe": "Akpes", "ibg": "Ibanag", "ibh": "Bih", "ibl": "Ibaloi", "ibm": "Agoi", "ibn": "Ibino", "ibr": "Ibuoro", "ibu": "Ibu", "iby": "Ibani", "ica": "Ede Ica", "ich": "Etkywan", "icl": "Icelandic Sign Language", "icr": "Islander Creole English", "id": "Indonesian", "ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi", "idb": "Indo-Portuguese", "idc": "Idon; Ajiya", "idd": "Ede Idaca", "ide": "Idere", "idi": "Idi", "idr": "Indri", "ids": "Idesa", "idt": "Idaté", "idu": "Idoma", "ie": "Interlingue; Occidental", "ifa": "Amganad Ifugao", "ifb": "Batad Ifugao; Ayangan Ifugao", "ife": "Ifè", "iff": "Ifo", "ifk": "Tuwali Ifugao", "ifm": "Teke-Fuumu", "ifu": "Mayoyao Ifugao", "ify": "Keley-I Kallahan", "ig": "Igbo", "igb": "Ebira", "ige": "Igede", "igg": "Igana", "igl": "Igala", "igm": "Kanggape", "ign": "Ignaciano", "igo": "Isebe", "igs": "Interglossa", "igw": "Igwe", "ihb": "Iha Based Pidgin", "ihi": "Ihievbe", "ihp": "Iha", "ihw": "Bidhawal", "ii": "Sichuan Yi; Nuosu", "iin": "Thiin", "iir": "Indo-Iranian languages", "ijc": "Izon", "ije": "Biseni", "ijj": "Ede Ije", "ijn": "Kalabari", "ijo": "Ijo languages", "ijs": "Southeast Ijo", "ik": "Inupiaq", "ike": "Eastern Canadian Inuktitut", "iki": "Iko", "ikk": "Ika", "ikl": "Ikulu", "iko": "Olulumo-Ikom", "ikp": "Ikpeshi", "ikr": "Ikaranggal", "iks": "Inuit Sign Language", "ikt": "Inuinnaqtun; Western Canadian Inuktitut", "ikv": "Iku-Gora-Ankwa", "ikw": "Ikwere", "ikx": "Ik", "ikz": "Ikizu", "ila": "Ile Ape", "ilb": "Ila", "ilg": "Garig-Ilgar", "ili": "Ili Turki", "ilk": "Ilongot", "ilm": "Iranun (Malaysia)", "ilo": "Iloko", "ilp": "Iranun (Philippines)", "ils": "International Sign", "ilu": "Ili'uun", "ilv": "Ilue", "ima": "Mala Malasar", "imi": "Anamgura", "iml": "Miluk", "imn": "Imonda", "imo": "Imbongu", "imr": "Imroing", "ims": "Marsian", "imt": "Imotong", "imy": "Milyan", "inb": "Inga", "inc": "Indic languages", "ine": "Indo-European languages", "ing": "Degexit'an", "inh": "Ingush", "inj": "Jungle Inga", "inl": "Indonesian Sign Language", "inm": "Minaean", "inn": "Isinai", "ino": "Inoke-Yate", "inp": "Iñapari", "ins": "Indian Sign Language", "int": "Intha", "inz": "Ineseño", "io": "Ido", "ior": "Inor", "iou": "Tuma-Irumu", "iow": "Iowa-Oto", "ipi": "Ipili", "ipo": "Ipiko", "iqu": "Iquito", "iqw": "Ikwo", "ira": "Iranian languages", "ire": "Iresim", "irh": "Irarutu", "iri": "Rigwe; Irigwe", "irk": "Iraqw", "irn": "Irántxe", "iro": "Iroquoian languages", "irr": "Ir", "iru": "Irula", "irx": "Kamberau", "iry": "Iraya", "is": "Icelandic", "isa": "Isabi", "isc": "Isconahua", "isd": "Isnag", "ise": "Italian Sign Language", "isg": "Irish Sign Language", "ish": "Esan", "isi": "Nkem-Nkum", "isk": "Ishkashimi", "ism": "Masimasi", "isn": "Isanzu", "iso": "Isoko", "isr": "Israeli Sign Language", "ist": "Istriot", "isu": "Isu (Menchum Division)", "it": "Italian", "itb": "Binongan Itneg", "itc": "Italic languages", "itd": "Southern Tidung", "ite": "Itene", "iti": "Inlaod Itneg", "itk": "Judeo-Italian", "itl": "Itelmen", "itm": "Itu Mbon Uzo", "ito": "Itonama", "itr": "Iteri", "its": "Isekiri", "itt": "Maeng Itneg", "itv": "Itawit", "itw": "Ito", "itx": "Itik", "ity": "Moyadan Itneg", "itz": "Itzá", "iu": "Inuktitut", "ium": "Iu Mien", "ivb": "Ibatan", "ivv": "Ivatan", "iwk": "I-Wak", "iwm": "Iwam", "iwo": "Iwur", "iws": "Sepik Iwam", "ixc": "Ixcatec", "ixl": "Ixil", "iya": "Iyayu", "iyo": "Mesaka", "iyx": "Yaka (Congo)", "izh": "Ingrian", "izr": "Izere", "izz": "Izii", "ja": "Japanese", "jaa": "Jamamadí", "jab": "Hyam", "jac": "Popti'; Jakalteko", "jad": "Jahanka", "jae": "Yabem", "jaf": "Jara", "jah": "Jah Hut", "jaj": "Zazao", "jak": "Jakun", "jal": "Yalahatan", "jam": "Jamaican Creole English", "jan": "Jandai", "jao": "Yanyuwa", "jaq": "Yaqay", "jas": "New Caledonian Javanese", "jat": "Jakati", "jau": "Yaur", "jax": "Jambi Malay", "jay": "Yan-nhangu; Nhangu", "jaz": "Jawe", "jbe": "Judeo-Berber", "jbi": "Badjiri", "jbj": "Arandai", "jbk": "Barikewa", "jbm": "Bijim", "jbn": "Nafusi", "jbo": "Lojban", "jbr": "Jofotek-Bromnya", "jbt": "Jabutí", "jbu": "Jukun Takum", "jbw": "Yawijibaya", "jcs": "Jamaican Country Sign Language", "jct": "Krymchak", "jda": "Jad", "jdg": "Jadgali", "jdt": "Judeo-Tat", "jeb": "Jebero", "jee": "Jerung", "jeh": "Jeh", "jei": "Yei", "jek": "Jeri Kuo", "jel": "Yelmek", "jen": "Dza", "jer": "Jere", "jet": "Manem", "jeu": "Jonkor Bourmataguil", "jgb": "Ngbee", "jge": "Judeo-Georgian", "jgk": "Gwak", "jgo": "Ngomba", "jhi": "Jehai", "jhs": "Jhankot Sign Language", "jia": "Jina", "jib": "Jibu", "jic": "Tol", "jid": "Bu (Kaduna State)", "jie": "Jilbe", "jig": "Jingulu; Djingili", "jih": "sTodsde; Shangzhai", "jii": "Jiiddu", "jil": "Jilim", "jim": "Jimi (Cameroon)", "jio": "Jiamao", "jiq": "Guanyinqiao; Lavrung", "jit": "Jita", "jiu": "Youle Jinuo", "jiv": "Shuar", "jiy": "Buyuan Jinuo", "jje": "Jejueo", "jjr": "Bankal", "jka": "Kaera", "jkm": "Mobwa Karen", "jko": "Kubo", "jkp": "Paku Karen", "jkr": "Koro (India)", "jks": "Amami Koniya Sign Language", "jku": "Labir", "jle": "Ngile", "jls": "Jamaican Sign Language", "jma": "Dima", "jmb": "Zumbun", "jmc": "Machame", "jmd": "Yamdena", "jmi": "Jimi (Nigeria)", "jml": "Jumli", "jmn": "Makuri Naga", "jmr": "Kamara", "jms": "Mashi (Nigeria)", "jmw": "Mouwase", "jmx": "Western Juxtlahuaca Mixtec", "jna": "Jangshung", "jnd": "Jandavra", "jng": "Yangman", "jni": "Janji", "jnj": "Yemsa", "jnl": "Rawat", "jns": "Jaunsari", "job": "Joba", "jod": "Wojenaka", "jog": "Jogi", "jor": "Jorá", "jos": "Jordanian Sign Language", "jow": "Jowulu", "jpa": "Jewish Palestinian Aramaic", "jpr": "Judeo-Persian", "jpx": "Japanese (family)", "jqr": "Jaqaru", "jra": "Jarai", "jrb": "Judeo-Arabic", "jrr": "Jiru", "jrt": "Jakattoe", "jru": "Japrería", "jsl": "Japanese Sign Language", "jua": "Júma", "jub": "Wannu", "juc": "Jurchen", "jud": "Worodougou", "juh": "Hõne", "jui": "Ngadjuri", "juk": "Wapan", "jul": "Jirel", "jum": "Jumjum", "jun": "Juang", "juo": "Jiba", "jup": "Hupdë", "jur": "Jurúna", "jus": "Jumla Sign Language", "jut": "Jutish", "juu": "Ju", "juw": "Wãpha", "juy": "Juray", "jv": "Javanese", "jvd": "Javindo", "jvn": "Caribbean Javanese", "jwi": "Jwira-Pepesa", "jya": "Jiarong", "jye": "Judeo-Yemeni Arabic", "jyy": "Jaya", "ka": "Georgian", "kaa": "Kara-Kalpak; Karakalpak", "kab": "Kabyle", "kac": "Kachin; Jingpho", "kad": "Adara", "kae": "Ketangalan", "kaf": "Katso", "kag": "Kajaman", "kah": "Kara (Central African Republic)", "kai": "Karekare", "kaj": "Jju", "kak": "Kalanguya; Kayapa Kallahan", "kam": "Kamba (Kenya)", "kao": "Xaasongaxango", "kap": "Bezhta", "kaq": "Capanahua", "kar": "Karen languages", "kav": "Katukína", "kaw": "Kawi", "kax": "Kao", "kay": "Kamayurá", "kba": "Kalarko", "kbb": "Kaxuiâna", "kbc": "Kadiwéu", "kbd": "Kabardian", "kbe": "Kanju", "kbg": "Khamba", "kbh": "Camsá", "kbi": "Kaptiau", "kbj": "Kari", "kbk": "Grass Koiari", "kbl": "Kanembu", "kbm": "Iwal", "kbn": "Kare (Central African Republic)", "kbo": "Keliko", "kbp": "Kabiyè", "kbq": "Kamano", "kbr": "Kafa", "kbs": "Kande", "kbt": "Abadi", "kbu": "Kabutra", "kbv": "Dera (Indonesia)", "kbw": "Kaiep", "kbx": "Ap Ma", "kby": "Manga Kanuri", "kbz": "Duhwa", "kca": "Khanty", "kcb": "Kawacha", "kcc": "Lubila", "kcd": "Ngkâlmpw Kanum", "kce": "Kaivi", "kcf": "Ukaan", "kcg": "Tyap", "kch": "Vono", "kci": "Kamantan", "kcj": "Kobiana", "kck": "Kalanga", "kcl": "Kela (Papua New Guinea); Kala", "kcm": "Gula (Central African Republic)", "kcn": "Nubi", "kco": "Kinalakna", "kcp": "Kanga", "kcq": "Kamo", "kcr": "Katla", "kcs": "Koenoem", "kct": "Kaian", "kcu": "Kami (Tanzania)", "kcv": "Kete", "kcw": "Kabwari", "kcx": "Kachama-Ganjule", "kcy": "Korandje", "kcz": "Konongo", "kda": "Worimi", "kdc": "Kutu", "kdd": "Yankunytjatjara", "kde": "Makonde", "kdf": "Mamusi", "kdg": "Seba", "kdh": "Tem", "kdi": "Kumam", "kdj": "Karamojong", "kdk": "Numèè; Kwényi", "kdl": "Tsikimba", "kdm": "Kagoma", "kdn": "Kunda", "kdo": "Kordofanian languages", "kdp": "Kaningdon-Nindem", "kdq": "Koch", "kdr": "Karaim", "kdt": "Kuy", "kdu": "Kadaru", "kdw": "Koneraw", "kdx": "Kam", "kdy": "Keder; Keijar", "kdz": "Kwaja", "kea": "Kabuverdianu", "keb": "Kélé", "kec": "Keiga", "ked": "Kerewe", "kee": "Eastern Keres", "kef": "Kpessi", "keg": "Tese", "keh": "Keak", "kei": "Kei", "kej": "Kadar", "kek": "Kekchí", "kel": "Kela (Democratic Republic of Congo)", "kem": "Kemak", "ken": "Kenyang", "keo": "Kakwa", "kep": "Kaikadi", "keq": "Kamar", "ker": "Kera", "kes": "Kugbo", "ket": "Ket", "keu": "Akebu", "kev": "Kanikkaran", "kew": "West Kewa", "kex": "Kukna", "key": "Kupia", "kez": "Kukele", "kfa": "Kodava", "kfb": "Northwestern Kolami", "kfc": "Konda-Dora", "kfd": "Korra Koraga", "kfe": "Kota (India)", "kff": "Koya", "kfg": "Kudiya", "kfh": "Kurichiya", "kfi": "Kannada Kurumba", "kfj": "Kemiehua", "kfk": "Kinnauri", "kfl": "Kung", "kfm": "Khunsari", "kfn": "Kuk", "kfo": "Koro (Côte d'Ivoire)", "kfp": "Korwa", "kfq": "Korku", "kfr": "Kachhi; Kutchi", "kfs": "Bilaspuri", "kft": "Kanjari", "kfu": "Katkari", "kfv": "Kurmukar", "kfw": "Kharam Naga", "kfx": "Kullu Pahari", "kfy": "Kumaoni", "kfz": "Koromfé", "kg": "Kongo", "kga": "Koyaga", "kgb": "Kawe", "kge": "Komering", "kgf": "Kube", "kgg": "Kusunda", "kgi": "Selangor Sign Language", "kgj": "Gamale Kham", "kgk": "Kaiwá", "kgl": "Kunggari", "kgm": "Karipúna", "kgn": "Karingani", "kgo": "Krongo", "kgp": "Kaingang", "kgq": "Kamoro", "kgr": "Abun", "kgs": "Kumbainggar", "kgt": "Somyev", "kgu": "Kobol", "kgv": "Karas", "kgw": "Karon Dori", "kgx": "Kamaru", "kgy": "Kyerung", "kha": "Khasi", "khb": "Lü", "khc": "Tukang Besi North", "khd": "Bädi Kanum", "khe": "Korowai", "khf": "Khuen", "khg": "Khams Tibetan", "khh": "Kehu", "khi": "Khoisan languages", "khj": "Kuturmi", "khk": "Halh Mongolian", "khl": "Lusi", "khn": "Khandesi", "kho": "Khotanese; Sakan", "khp": "Kapori; Kapauri", "khq": "Koyra Chiini Songhay", "khr": "Kharia", "khs": "Kasua", "kht": "Khamti", "khu": "Nkhumbi", "khv": "Khvarshi", "khw": "Khowar", "khx": "Kanu", "khy": "Kele (Democratic Republic of Congo)", "khz": "Keapara", "ki": "Kikuyu; Gikuyu", "kia": "Kim", "kib": "Koalib", "kic": "Kickapoo", "kid": "Koshin", "kie": "Kibet", "kif": "Eastern Parbate Kham", "kig": "Kimaama; Kimaghima", "kih": "Kilmeri", "kii": "Kitsai", "kij": "Kilivila", "kil": "Kariya", "kim": "Karagas", "kio": "Kiowa", "kip": "Sheshi Kham", "kiq": "Kosadle; Kosare", "kis": "Kis", "kit": "Agob", "kiu": "Kirmanjki (individual language)", "kiv": "Kimbu", "kiw": "Northeast Kiwai", "kix": "Khiamniungan Naga", "kiy": "Kirikiri", "kiz": "Kisi", "kj": "Kuanyama; Kwanyama", "kja": "Mlap", "kjb": "Q'anjob'al; Kanjobal", "kjc": "Coastal Konjo", "kjd": "Southern Kiwai", "kje": "Kisar", "kjg": "Khmu", "kjh": "Khakas", "kji": "Zabana", "kjj": "Khinalugh", "kjk": "Highland Konjo", "kjl": "Western Parbate Kham", "kjm": "Kháng", "kjn": "Kunjen", "kjo": "Harijan Kinnauri", "kjp": "Pwo Eastern Karen", "kjq": "Western Keres", "kjr": "Kurudu", "kjs": "East Kewa", "kjt": "Phrae Pwo Karen", "kju": "Kashaya", "kjv": "Kaikavian Literary Language", "kjx": "Ramopa", "kjy": "Erave", "kjz": "Bumthangkha", "kk": "Kazakh", "kka": "Kakanda", "kkb": "Kwerisa", "kkc": "Odoodee", "kkd": "Kinuku", "kke": "Kakabe", "kkf": "Kalaktang Monpa", "kkg": "Mabaka Valley Kalinga", "kkh": "Khün", "kki": "Kagulu", "kkj": "Kako", "kkk": "Kokota", "kkl": "Kosarek Yale", "kkm": "Kiong", "kkn": "Kon Keu", "kko": "Karko", "kkp": "Gugubera; Koko-Bera", "kkq": "Kaeku", "kkr": "Kir-Balar", "kks": "Giiwo", "kkt": "Koi", "kku": "Tumi", "kkv": "Kangean", "kkw": "Teke-Kukuya", "kkx": "Kohin", "kky": "Guugu Yimidhirr; Guguyimidjir", "kkz": "Kaska", "kl": "Kalaallisut; Greenlandic", "kla": "Klamath-Modoc", "klb": "Kiliwa", "klc": "Kolbila", "kld": "Gamilaraay", "kle": "Kulung (Nepal)", "klf": "Kendeje", "klg": "Tagakaulo", "klh": "Weliki", "kli": "Kalumpang", "klj": "Khalaj", "klk": "Kono (Nigeria)", "kll": "Kagan Kalagan", "klm": "Migum", "kln": "Kalenjin", "klo": "Kapya", "klp": "Kamasa", "klq": "Rumu", "klr": "Khaling", "kls": "Kalasha", "klt": "Nukna", "klu": "Klao", "klv": "Maskelynes", "klw": "Tado; Lindu", "klx": "Koluwawa", "kly": "Kalao", "klz": "Kabola", "km": "Khmer; Central Khmer", "kma": "Konni", "kmb": "Kimbundu", "kmc": "Southern Dong", "kmd": "Majukayang Kalinga", "kme": "Bakole", "kmf": "Kare (Papua New Guinea)", "kmg": "Kâte", "kmh": "Kalam", "kmi": "Kami (Nigeria)", "kmj": "Kumarbhag Paharia", "kmk": "Limos Kalinga", "kml": "Tanudan Kalinga", "kmm": "Kom (India)", "kmn": "Awtuw", "kmo": "Kwoma", "kmp": "Gimme", "kmq": "Kwama", "kmr": "Northern Kurdish", "kms": "Kamasau", "kmt": "Kemtuik", "kmu": "Kanite", "kmv": "Karipúna Creole French", "kmw": "Komo (Democratic Republic of Congo)", "kmx": "Waboda", "kmy": "Koma", "kmz": "Khorasani Turkish", "kn": "Kannada", "kna": "Dera (Nigeria)", "knb": "Lubuagan Kalinga", "knc": "Central Kanuri", "knd": "Konda", "kne": "Kankanaey", "knf": "Mankanya", "kng": "Koongo", "kni": "Kanufi", "knj": "Western Kanjobal", "knk": "Kuranko", "knl": "Keninjal", "knm": "Kanamarí", "knn": "Konkani (individual language)", "kno": "Kono (Sierra Leone)", "knp": "Kwanja", "knq": "Kintaq", "knr": "Kaningra", "kns": "Kensiu", "knt": "Panoan Katukína", "knu": "Kono (Guinea)", "knv": "Tabo", "knw": "Kung-Ekoka", "knx": "Kendayan; Salako", "kny": "Kanyok", "knz": "Kalamsé", "ko": "Korean", "koa": "Konomala", "koc": "Kpati", "kod": "Kodi", "koe": "Kacipo-Bale Suri", "kof": "Kubi", "kog": "Cogui; Kogi", "koh": "Koyo", "koi": "Komi-Permyak", "kok": "Konkani (macrolanguage)", "kol": "Kol (Papua New Guinea)", "koo": "Konzo", "kop": "Waube", "koq": "Kota (Gabon)", "kos": "Kosraean", "kot": "Lagwan", "kou": "Koke", "kov": "Kudu-Camo", "kow": "Kugama", "koy": "Koyukon", "koz": "Korak", "kpa": "Kutto", "kpb": "Mullu Kurumba", "kpc": "Curripaco", "kpd": "Koba", "kpe": "Kpelle", "kpf": "Komba", "kpg": "Kapingamarangi", "kph": "Kplang", "kpi": "Kofei", "kpj": "Karajá", "kpk": "Kpan", "kpl": "Kpala", "kpm": "Koho", "kpn": "Kepkiriwát", "kpo": "Ikposo", "kpq": "Korupun-Sela", "kpr": "Korafe-Yegha", "kps": "Tehit", "kpt": "Karata", "kpu": "Kafoa", "kpv": "Komi-Zyrian", "kpw": "Kobon", "kpx": "Mountain Koiali", "kpy": "Koryak", "kpz": "Kupsabiny", "kqa": "Mum", "kqb": "Kovai", "kqc": "Doromu-Koki", "kqd": "Koy Sanjaq Surat", "kqe": "Kalagan", "kqf": "Kakabai", "kqg": "Khe", "kqh": "Kisankasa", "kqi": "Koitabu", "kqj": "Koromira", "kqk": "Kotafon Gbe", "kql": "Kyenele", "kqm": "Khisa", "kqn": "Kaonde", "kqo": "Eastern Krahn", "kqp": "Kimré", "kqq": "Krenak", "kqr": "Kimaragang", "kqs": "Northern Kissi", "kqt": "Klias River Kadazan", "kqu": "Seroa", "kqv": "Okolod", "kqw": "Kandas", "kqx": "Mser", "kqy": "Koorete", "kqz": "Korana", "kr": "Kanuri", "kra": "Kumhali", "krb": "Karkin", "krc": "Karachay-Balkar", "krd": "Kairui-Midiki", "kre": "Panará", "krf": "Koro (Vanuatu)", "krh": "Kurama", "kri": "Krio", "krj": "Kinaray-A", "krk": "Kerek", "krl": "Karelian", "krn": "Sapo", "kro": "Kru languages", "krp": "Korop", "krr": "Krung", "krs": "Gbaya (Sudan)", "krt": "Tumari Kanuri", "kru": "Kurukh", "krv": "Kavet", "krw": "Western Krahn", "krx": "Karon", "kry": "Kryts", "krz": "Sota Kanum", "ks": "Kashmiri", "ksa": "Shuwa-Zamani", "ksb": "Shambala", "ksc": "Southern Kalinga", "ksd": "Kuanua", "kse": "Kuni", "ksf": "Bafia", "ksg": "Kusaghe", "ksh": "Kölsch", "ksi": "Krisa; I'saka", "ksj": "Uare", "ksk": "Kansa", "ksl": "Kumalu", "ksm": "Kumba", "ksn": "Kasiguranin", "kso": "Kofa", "ksp": "Kaba", "ksq": "Kwaami", "ksr": "Borong", "kss": "Southern Kisi", "kst": "Winyé", "ksu": "Khamyang", "ksv": "Kusu", "ksw": "S'gaw Karen", "ksx": "Kedang", "ksy": "Kharia Thar", "ksz": "Kodaku", "kta": "Katua", "ktb": "Kambaata", "ktc": "Kholok", "ktd": "Kokata; Kukatha", "kte": "Nubri", "ktf": "Kwami", "ktg": "Kalkutung", "kth": "Karanga", "kti": "North Muyu", "ktj": "Plapo Krumen", "ktk": "Kaniet", "ktl": "Koroshi", "ktm": "Kurti", "ktn": "Karitiâna", "kto": "Kuot", "ktp": "Kaduo", "ktq": "Katabaga", "kts": "South Muyu", "ktt": "Ketum", "ktu": "Kituba (Democratic Republic of Congo)", "ktv": "Eastern Katu", "ktw": "Kato", "ktx": "Kaxararí", "kty": "Kango (Bas-Uélé District)", "ktz": "Juǀʼhoan; Juǀʼhoansi", "ku": "Kurdish", "kub": "Kutep", "kuc": "Kwinsu", "kud": "'Auhelawa", "kue": "Kuman (Papua New Guinea)", "kuf": "Western Katu", "kug": "Kupa", "kuh": "Kushi", "kui": "Kuikúro-Kalapálo; Kalapalo", "kuj": "Kuria", "kuk": "Kepo'", "kul": "Kulere", "kum": "Kumyk", "kun": "Kunama", "kuo": "Kumukio", "kup": "Kunimaipa", "kuq": "Karipuna", "kus": "Kusaal", "kut": "Kutenai", "kuu": "Upper Kuskokwim", "kuv": "Kur", "kuw": "Kpagua", "kux": "Kukatja", "kuy": "Kuuku-Ya'u", "kuz": "Kunza", "kv": "Komi", "kva": "Bagvalal", "kvb": "Kubu", "kvc": "Kove", "kvd": "Kui (Indonesia)", "kve": "Kalabakan", "kvf": "Kabalai", "kvg": "Kuni-Boazi", "kvh": "Komodo", "kvi": "Kwang", "kvj": "Psikye", "kvk": "Korean Sign Language", "kvl": "Kayaw", "kvm": "Kendem", "kvn": "Border Kuna", "kvo": "Dobel", "kvp": "Kompane", "kvq": "Geba Karen", "kvr": "Kerinci", "kvt": "Lahta Karen; Lahta", "kvu": "Yinbaw Karen", "kvv": "Kola", "kvw": "Wersing", "kvx": "Parkari Koli", "kvy": "Yintale Karen; Yintale", "kvz": "Tsakwambo; Tsaukambo", "kw": "Cornish", "kwa": "Dâw", "kwb": "Kwa", "kwc": "Likwala", "kwd": "Kwaio", "kwe": "Kwerba", "kwf": "Kwara'ae", "kwg": "Sara Kaba Deme", "kwh": "Kowiai", "kwi": "Awa-Cuaiquer", "kwj": "Kwanga", "kwk": "Kwakiutl", "kwl": "Kofyar", "kwm": "Kwambi", "kwn": "Kwangali", "kwo": "Kwomtari", "kwp": "Kodia", "kwr": "Kwer", "kws": "Kwese", "kwt": "Kwesten", "kwu": "Kwakum", "kwv": "Sara Kaba Náà", "kww": "Kwinti", "kwx": "Khirwar", "kwy": "San Salvador Kongo", "kwz": "Kwadi", "kxa": "Kairiru", "kxb": "Krobu", "kxc": "Konso; Khonso", "kxd": "Brunei", "kxf": "Manumanaw Karen; Manumanaw", "kxh": "Karo (Ethiopia)", "kxi": "Keningau Murut", "kxj": "Kulfa", "kxk": "Zayein Karen", "kxm": "Northern Khmer", "kxn": "Kanowit-Tanjong Melanau", "kxo": "Kanoé", "kxp": "Wadiyara Koli", "kxq": "Smärky Kanum", "kxr": "Koro (Papua New Guinea)", "kxs": "Kangjia", "kxt": "Koiwat", "kxv": "Kuvi", "kxw": "Konai", "kxx": "Likuba", "kxy": "Kayong", "kxz": "Kerewo", "ky": "Kirghiz; Kyrgyz", "kya": "Kwaya", "kyb": "Butbut Kalinga", "kyc": "Kyaka", "kyd": "Karey", "kye": "Krache", "kyf": "Kouya", "kyg": "Keyagana", "kyh": "Karok", "kyi": "Kiput", "kyj": "Karao", "kyk": "Kamayo", "kyl": "Kalapuya", "kym": "Kpatili", "kyn": "Northern Binukidnon", "kyo": "Kelon", "kyp": "Kang", "kyq": "Kenga", "kyr": "Kuruáya", "kys": "Baram Kayan", "kyt": "Kayagar", "kyu": "Western Kayah", "kyv": "Kayort", "kyw": "Kudmali", "kyx": "Rapoisi", "kyy": "Kambaira", "kyz": "Kayabí", "kza": "Western Karaboro", "kzb": "Kaibobo", "kzc": "Bondoukou Kulango", "kzd": "Kadai", "kze": "Kosena", "kzf": "Da'a Kaili", "kzg": "Kikai", "kzi": "Kelabit", "kzk": "Kazukuru", "kzl": "Kayeli", "kzm": "Kais", "kzn": "Kokola", "kzo": "Kaningi", "kzp": "Kaidipang", "kzq": "Kaike", "kzr": "Karang", "kzs": "Sugut Dusun", "kzu": "Kayupulau", "kzv": "Komyandaret", "kzw": "Karirí-Xocó", "kzx": "Kamarian", "kzy": "Kango (Tshopo District)", "kzz": "Kalabra", "la": "Latin", "laa": "Southern Subanen", "lab": "Linear A", "lac": "Lacandon", "lad": "Ladino", "lae": "Pattani", "laf": "Lafofa", "lag": "Langi", "lah": "Lahnda", "lai": "Lambya", "laj": "Lango (Uganda)", "lal": "Lalia", "lam": "Lamba", "lan": "Laru", "lap": "Laka (Chad)", "laq": "Qabiao", "lar": "Larteh", "las": "Lama (Togo)", "lau": "Laba", "law": "Lauje", "lax": "Tiwa", "lay": "Lama Bai", "laz": "Aribwatsa", "lb": "Luxembourgish; Letzeburgesch", "lbb": "Label", "lbc": "Lakkia", "lbe": "Lak", "lbf": "Tinani", "lbg": "Laopang", "lbi": "La'bi", "lbj": "Ladakhi", "lbk": "Central Bontok", "lbl": "Libon Bikol", "lbm": "Lodhi", "lbn": "Rmeet", "lbo": "Laven", "lbq": "Wampar", "lbr": "Lohorung", "lbs": "Libyan Sign Language", "lbt": "Lachi", "lbu": "Labu", "lbv": "Lavatbura-Lamusong", "lbw": "Tolaki", "lbx": "Lawangan", "lby": "Lamalama; Lamu-Lamu", "lbz": "Lardil", "lcc": "Legenyem", "lcd": "Lola", "lce": "Loncong; Sekak", "lcf": "Lubu", "lch": "Luchazi", "lcl": "Lisela", "lcm": "Tungag", "lcp": "Western Lawa", "lcq": "Luhu", "lcs": "Lisabata-Nuniali", "lda": "Kla-Dan", "ldb": "Dũya", "ldd": "Luri", "ldg": "Lenyima", "ldh": "Lamja-Dengsa-Tola", "ldi": "Laari", "ldj": "Lemoro", "ldk": "Leelau", "ldl": "Kaan", "ldm": "Landoma", "ldn": "Láadan", "ldo": "Loo", "ldp": "Tso", "ldq": "Lufu", "lea": "Lega-Shabunda", "leb": "Lala-Bisa", "lec": "Leco", "led": "Lendu", "lee": "Lyélé", "lef": "Lelemi", "leh": "Lenje", "lei": "Lemio", "lej": "Lengola", "lek": "Leipon", "lel": "Lele (Democratic Republic of Congo)", "lem": "Nomaande", "len": "Lenca", "leo": "Leti (Cameroon)", "lep": "Lepcha", "leq": "Lembena", "ler": "Lenkau", "les": "Lese", "let": "Lesing-Gelimi; Amio-Gelimi", "leu": "Kara (Papua New Guinea)", "lev": "Lamma", "lew": "Ledo Kaili", "lex": "Luang", "ley": "Lemolang", "lez": "Lezghian", "lfa": "Lefa", "lfn": "Lingua Franca Nova", "lg": "Ganda; Luganda", "lga": "Lungga", "lgb": "Laghu", "lgg": "Lugbara", "lgh": "Laghuu", "lgi": "Lengilu", "lgk": "Lingarak; Neverver", "lgl": "Wala", "lgm": "Lega-Mwenga", "lgn": "T'apo; Opuuo", "lgo": "Lango (South Sudan)", "lgq": "Logba", "lgr": "Lengo", "lgt": "Pahi", "lgu": "Longgu", "lgz": "Ligenza", "lha": "Laha (Viet Nam)", "lhh": "Laha (Indonesia)", "lhi": "Lahu Shi", "lhl": "Lahul Lohar", "lhm": "Lhomi", "lhn": "Lahanan", "lhp": "Lhokpu", "lhs": "Mlahsö", "lht": "Lo-Toga", "lhu": "Lahu", "li": "Limburgan; Limburger; Limburgish", "lia": "West-Central Limba", "lib": "Likum", "lic": "Hlai", "lid": "Nyindrou", "lie": "Likila", "lif": "Limbu", "lig": "Ligbi", "lih": "Lihir", "lij": "Ligurian", "lik": "Lika", "lil": "Lillooet", "lio": "Liki", "lip": "Sekpele", "liq": "Libido", "lir": "Liberian English", "lis": "Lisu", "liu": "Logorik", "liv": "Liv", "liw": "Col", "lix": "Liabuku", "liy": "Banda-Bambari", "liz": "Libinza", "lja": "Golpa", "lje": "Rampi", "lji": "Laiyolo", "ljl": "Li'o", "ljp": "Lampung Api", "ljw": "Yirandali", "ljx": "Yuru", "lka": "Lakalei", "lkb": "Kabras; Lukabaras", "lkc": "Kucong", "lkd": "Lakondê", "lke": "Kenyi", "lkh": "Lakha", "lki": "Laki", "lkj": "Remun", "lkl": "Laeko-Libuat", "lkm": "Kalaamaya", "lkn": "Lakon; Vure", "lko": "Khayo; Olukhayo", "lkr": "Päri", "lks": "Kisa; Olushisa", "lkt": "Lakota", "lku": "Kungkari", "lky": "Lokoya", "lla": "Lala-Roba", "llb": "Lolo", "llc": "Lele (Guinea)", "lld": "Ladin", "lle": "Lele (Papua New Guinea)", "llf": "Hermit", "llg": "Lole", "llh": "Lamu", "lli": "Teke-Laali", "llj": "Ladji Ladji", "llk": "Lelak", "lll": "Lilau", "llm": "Lasalimu", "lln": "Lele (Chad)", "llp": "North Efate", "llq": "Lolak", "lls": "Lithuanian Sign Language", "llu": "Lau", "llx": "Lauan", "lma": "East Limba", "lmb": "Merei", "lmc": "Limilngan", "lmd": "Lumun", "lme": "Pévé", "lmf": "South Lembata", "lmg": "Lamogai", "lmh": "Lambichhong", "lmi": "Lombi", "lmj": "West Lembata", "lmk": "Lamkang", "lml": "Hano", "lmn": "Lambadi", "lmo": "Lombard", "lmp": "Limbum", "lmq": "Lamatuka", "lmr": "Lamalera", "lmu": "Lamenu", "lmv": "Lomaiviti", "lmw": "Lake Miwok", "lmx": "Laimbue", "lmy": "Lamboya", "ln": "Lingala", "lna": "Langbashe", "lnb": "Mbalanhu", "lnd": "Lundayeh; Lun Bawang", "lng": "Langobardic", "lnh": "Lanoh", "lni": "Daantanai'", "lnj": "Leningitij", "lnl": "South Central Banda", "lnm": "Langam", "lnn": "Lorediakarkar", "lns": "Lamnso'", "lnu": "Longuda", "lnw": "Lanima", "lnz": "Lonzo", "lo": "Lao", "loa": "Loloda", "lob": "Lobi", "loc": "Inonhan", "loe": "Saluan", "lof": "Logol", "log": "Logo", "loh": "Narim", "loi": "Loma (Côte d'Ivoire)", "loj": "Lou", "lok": "Loko", "lol": "Mongo", "lom": "Loma (Liberia)", "lon": "Malawi Lomwe", "loo": "Lombo", "lop": "Lopa", "loq": "Lobala", "lor": "Téén", "los": "Loniu", "lot": "Otuho", "lou": "Louisiana Creole", "lov": "Lopi", "low": "Tampias Lobu", "lox": "Loun", "loy": "Loke", "loz": "Lozi", "lpa": "Lelepa", "lpe": "Lepki", "lpn": "Long Phuri Naga", "lpo": "Lipo", "lpx": "Lopit", "lqr": "Logir", "lra": "Rara Bakati'", "lrc": "Northern Luri", "lre": "Laurentian", "lrg": "Laragia", "lri": "Marachi; Olumarachi", "lrk": "Loarki", "lrl": "Lari", "lrm": "Marama; Olumarama", "lrn": "Lorang", "lro": "Laro", "lrr": "Southern Yamphu", "lrt": "Larantuka Malay", "lrv": "Larevat", "lrz": "Lemerig", "lsa": "Lasgerdi", "lsb": "Burundian Sign Language; Langue des Signes Burundaise", "lsc": "Albarradas Sign Language; Lengua de señas Albarradas", "lsd": "Lishana Deni", "lse": "Lusengo", "lsh": "Lish", "lsi": "Lashi", "lsl": "Latvian Sign Language", "lsm": "Saamia; Olusamia", "lsn": "Tibetan Sign Language", "lso": "Laos Sign Language", "lsp": "Panamanian Sign Language; Lengua de Señas Panameñas", "lsr": "Aruop", "lss": "Lasi", "lst": "Trinidad and Tobago Sign Language", "lsv": "Sivia Sign Language", "lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise", "lsy": "Mauritian Sign Language", "lt": "Lithuanian", "ltc": "Late Middle Chinese", "ltg": "Latgalian", "lth": "Thur", "lti": "Leti (Indonesia)", "ltn": "Latundê", "lto": "Tsotso; Olutsotso", "lts": "Tachoni; Lutachoni", "ltu": "Latu", "lu": "Luba-Katanga", "lua": "Luba-Lulua", "luc": "Aringa", "lud": "Ludian", "lue": "Luvale", "luf": "Laua", "lui": "Luiseno", "luj": "Luna", "luk": "Lunanakha", "lul": "Olu'bo", "lum": "Luimbi", "lun": "Lunda", "luo": "Luo (Kenya and Tanzania); Dholuo", "lup": "Lumbu", "luq": "Lucumi", "lur": "Laura", "lus": "Lushai", "lut": "Lushootseed", "luu": "Lumba-Yakkha", "luv": "Luwati", "luw": "Luo (Cameroon)", "luy": "Luyia; Oluluyia", "luz": "Southern Luri", "lv": "Latvian", "lva": "Maku'a", "lvi": "Lavi", "lvk": "Lavukaleve", "lvs": "Standard Latvian", "lvu": "Levuka", "lwa": "Lwalu", "lwe": "Lewo Eleng", "lwg": "Wanga; Oluwanga", "lwh": "White Lachi", "lwl": "Eastern Lawa", "lwm": "Laomian", "lwo": "Luwo", "lws": "Malawian Sign Language", "lwt": "Lewotobi", "lwu": "Lawu", "lww": "Lewo", "lxm": "Lakurumau", "lya": "Layakha", "lyg": "Lyngngam", "lyn": "Luyana", "lzh": "Literary Chinese", "lzl": "Litzlitz", "lzn": "Leinong Naga", "lzz": "Laz", "maa": "San Jerónimo Tecóatl Mazatec", "mab": "Yutanduchi Mixtec", "mad": "Madurese", "mae": "Bo-Rukul", "maf": "Mafa", "mag": "Magahi", "mai": "Maithili", "maj": "Jalapa De Díaz Mazatec", "mak": "Makasar", "mam": "Mam", "man": "Mandingo; Manding", "map": "Austronesian languages", "maq": "Chiquihuitlán Mazatec", "mas": "Masai", "mat": "San Francisco Matlatzinca", "mau": "Huautla Mazatec", "mav": "Sateré-Mawé", "maw": "Mampruli", "max": "North Moluccan Malay", "maz": "Central Mazahua", "mba": "Higaonon", "mbb": "Western Bukidnon Manobo", "mbc": "Macushi", "mbd": "Dibabawon Manobo", "mbe": "Molale", "mbf": "Baba Malay", "mbh": "Mangseng", "mbi": "Ilianen Manobo", "mbj": "Nadëb", "mbk": "Malol", "mbl": "Maxakalí", "mbm": "Ombamba", "mbn": "Macaguán", "mbo": "Mbo (Cameroon)", "mbp": "Malayo", "mbq": "Maisin", "mbr": "Nukak Makú", "mbs": "Sarangani Manobo", "mbt": "Matigsalug Manobo", "mbu": "Mbula-Bwazza", "mbv": "Mbulungish", "mbw": "Maring", "mbx": "Mari (East Sepik Province)", "mby": "Memoni", "mbz": "Amoltepec Mixtec", "mca": "Maca", "mcb": "Machiguenga", "mcc": "Bitur", "mcd": "Sharanahua", "mce": "Itundujia Mixtec", "mcf": "Matsés", "mcg": "Mapoyo", "mch": "Maquiritari", "mci": "Mese", "mcj": "Mvanip", "mck": "Mbunda", "mcl": "Macaguaje", "mcm": "Malaccan Creole Portuguese", "mcn": "Masana", "mco": "Coatlán Mixe", "mcp": "Makaa", "mcq": "Ese", "mcr": "Menya", "mcs": "Mambai", "mct": "Mengisa", "mcu": "Cameroon Mambila", "mcv": "Minanibai", "mcw": "Mawa (Chad)", "mcx": "Mpiemo", "mcy": "South Watut", "mcz": "Mawan", "mda": "Mada (Nigeria)", "mdb": "Morigi", "mdc": "Male (Papua New Guinea)", "mdd": "Mbum", "mde": "Maba (Chad)", "mdf": "Moksha", "mdg": "Massalat", "mdh": "Maguindanaon", "mdi": "Mamvu", "mdj": "Mangbetu", "mdk": "Mangbutu", "mdl": "Maltese Sign Language", "mdm": "Mayogo", "mdn": "Mbati", "mdp": "Mbala", "mdq": "Mbole", "mdr": "Mandar", "mds": "Maria (Papua New Guinea)", "mdt": "Mbere", "mdu": "Mboko", "mdv": "Santa Lucía Monteverde Mixtec", "mdw": "Mbosi", "mdx": "Dizin", "mdy": "Male (Ethiopia)", "mdz": "Suruí Do Pará", "mea": "Menka", "meb": "Ikobi", "mec": "Marra", "med": "Melpa", "mee": "Mengen", "mef": "Megam", "meh": "Southwestern Tlaxiaco Mixtec", "mei": "Midob", "mej": "Meyah", "mek": "Mekeo", "mel": "Central Melanau", "mem": "Mangala", "men": "Mende (Sierra Leone)", "meo": "Kedah Malay", "mep": "Miriwoong", "meq": "Merey", "mer": "Meru", "mes": "Masmaje", "met": "Mato", "meu": "Motu", "mev": "Mano", "mew": "Maaka", "mey": "Hassaniyya", "mez": "Menominee", "mfa": "Pattani Malay", "mfb": "Bangka", "mfc": "Mba", "mfd": "Mendankwe-Nkwen", "mfe": "Morisyen", "mff": "Naki", "mfg": "Mogofin", "mfh": "Matal", "mfi": "Wandala", "mfj": "Mefele", "mfk": "North Mofu", "mfl": "Putai", "mfm": "Marghi South", "mfn": "Cross River Mbembe", "mfo": "Mbe", "mfp": "Makassar Malay", "mfq": "Moba", "mfr": "Marrithiyel", "mfs": "Mexican Sign Language", "mft": "Mokerang", "mfu": "Mbwela", "mfv": "Mandjak", "mfw": "Mulaha", "mfx": "Melo", "mfy": "Mayo", "mfz": "Mabaan", "mg": "Malagasy", "mga": "Middle Irish (900-1200)", "mgb": "Mararit", "mgc": "Morokodo", "mgd": "Moru", "mge": "Mango", "mgf": "Maklew", "mgg": "Mpumpong", "mgh": "Makhuwa-Meetto", "mgi": "Lijili", "mgj": "Abureni", "mgk": "Mawes", "mgl": "Maleu-Kilenge", "mgm": "Mambae", "mgn": "Mbangi", "mgo": "Meta'", "mgp": "Eastern Magar", "mgq": "Malila", "mgr": "Mambwe-Lungu", "mgs": "Manda (Tanzania)", "mgt": "Mongol", "mgu": "Mailu", "mgv": "Matengo", "mgw": "Matumbi", "mgy": "Mbunga", "mgz": "Mbugwe", "mh": "Marshallese", "mha": "Manda (India)", "mhb": "Mahongwe", "mhc": "Mocho", "mhd": "Mbugu", "mhe": "Besisi; Mah Meri", "mhf": "Mamaa", "mhg": "Margu", "mhi": "Ma'di", "mhj": "Mogholi", "mhk": "Mungaka", "mhl": "Mauwake", "mhm": "Makhuwa-Moniga", "mhn": "Mócheno", "mho": "Mashi (Zambia)", "mhp": "Balinese Malay", "mhq": "Mandan", "mhr": "Eastern Mari", "mhs": "Buru (Indonesia)", "mht": "Mandahuaca", "mhu": "Digaro-Mishmi; Darang Deng", "mhw": "Mbukushu", "mhx": "Maru; Lhaovo", "mhy": "Ma'anyan", "mhz": "Mor (Mor Islands)", "mi": "Maori", "mia": "Miami", "mib": "Atatláhuca Mixtec", "mic": "Mi'kmaq; Micmac", "mid": "Mandaic", "mie": "Ocotepec Mixtec", "mif": "Mofu-Gudur", "mig": "San Miguel El Grande Mixtec", "mih": "Chayuco Mixtec", "mii": "Chigmecatitlán Mixtec", "mij": "Abar; Mungbam", "mik": "Mikasuki", "mil": "Peñoles Mixtec", "mim": "Alacatlatzala Mixtec", "min": "Minangkabau", "mio": "Pinotepa Nacional Mixtec", "mip": "Apasco-Apoala Mixtec", "miq": "Mískito", "mir": "Isthmus Mixe", "mit": "Southern Puebla Mixtec", "miu": "Cacaloxtepec Mixtec", "miw": "Akoye", "mix": "Mixtepec Mixtec", "miy": "Ayutla Mixtec", "miz": "Coatzospan Mixtec", "mjb": "Makalero", "mjc": "San Juan Colorado Mixtec", "mjd": "Northwest Maidu", "mje": "Muskum", "mjg": "Tu", "mjh": "Mwera (Nyasa)", "mji": "Kim Mun", "mjj": "Mawak", "mjk": "Matukar", "mjl": "Mandeali", "mjm": "Medebur", "mjn": "Ma (Papua New Guinea)", "mjo": "Malankuravan", "mjp": "Malapandaram", "mjq": "Malaryan", "mjr": "Malavedan", "mjs": "Miship", "mjt": "Sauria Paharia", "mju": "Manna-Dora", "mjv": "Mannan", "mjw": "Karbi", "mjx": "Mahali", "mjy": "Mahican", "mjz": "Majhi", "mk": "Macedonian", "mka": "Mbre", "mkb": "Mal Paharia", "mkc": "Siliput", "mke": "Mawchi", "mkf": "Miya", "mkg": "Mak (China)", "mkh": "Mon-Khmer languages", "mki": "Dhatki", "mkj": "Mokilese", "mkk": "Byep", "mkl": "Mokole", "mkm": "Moklen", "mkn": "Kupang Malay", "mko": "Mingang Doso", "mkp": "Moikodi", "mkq": "Bay Miwok", "mkr": "Malas", "mks": "Silacayoapan Mixtec", "mkt": "Vamale", "mku": "Konyanka Maninka", "mkv": "Mafea", "mkw": "Kituba (Congo)", "mkx": "Kinamiging Manobo", "mky": "East Makian", "mkz": "Makasae", "ml": "Malayalam", "mla": "Malo", "mlb": "Mbule", "mlc": "Cao Lan", "mle": "Manambu", "mlf": "Mal", "mlh": "Mape", "mli": "Malimpung", "mlj": "Miltu", "mlk": "Ilwana; Kiwilwana", "mll": "Malua Bay", "mlm": "Mulam", "mln": "Malango", "mlo": "Mlomp", "mlp": "Bargam", "mlq": "Western Maninkakan", "mlr": "Vame", "mls": "Masalit", "mlu": "To'abaita", "mlv": "Motlav; Mwotlap", "mlw": "Moloko", "mlx": "Malfaxal; Naha'ai", "mlz": "Malaynon", "mma": "Mama", "mmb": "Momina", "mmc": "Michoacán Mazahua", "mmd": "Maonan", "mme": "Mae", "mmf": "Mundat", "mmg": "North Ambrym", "mmh": "Mehináku", "mmi": "Musar", "mmj": "Majhwar", "mmk": "Mukha-Dora", "mml": "Man Met", "mmm": "Maii", "mmn": "Mamanwa", "mmo": "Mangga Buang", "mmp": "Siawi", "mmq": "Musak", "mmr": "Western Xiangxi Miao", "mmt": "Malalamai", "mmu": "Mmaala", "mmv": "Miriti", "mmw": "Emae", "mmx": "Madak", "mmy": "Migaama", "mmz": "Mabaale", "mn": "Mongolian", "mna": "Mbula", "mnb": "Muna", "mnc": "Manchu", "mnd": "Mondé", "mne": "Naba", "mnf": "Mundani", "mng": "Eastern Mnong", "mnh": "Mono (Democratic Republic of Congo)", "mni": "Manipuri", "mnj": "Munji", "mnk": "Mandinka", "mnl": "Tiale", "mnm": "Mapena", "mnn": "Southern Mnong", "mno": "Manobo languages", "mnp": "Min Bei Chinese", "mnq": "Minriq", "mnr": "Mono (USA)", "mns": "Mansi", "mnu": "Mer", "mnv": "Rennell-Bellona", "mnw": "Mon", "mnx": "Manikion", "mny": "Manyawa", "mnz": "Moni", "moa": "Mwan", "moc": "Mocoví", "mod": "Mobilian", "moe": "Innu; Montagnais", "mog": "Mongondow", "moh": "Mohawk", "moi": "Mboi", "moj": "Monzombo", "mok": "Morori", "mom": "Mangue", "moo": "Monom", "mop": "Mopán Maya", "moq": "Mor (Bomberai Peninsula)", "mor": "Moro", "mos": "Mossi", "mot": "Barí", "mou": "Mogum", "mov": "Mohave", "mow": "Moi (Congo)", "mox": "Molima", "moy": "Shekkacho", "moz": "Mukulu; Gergiko", "mpa": "Mpoto", "mpb": "Malak Malak; Mullukmulluk", "mpc": "Mangarrayi", "mpd": "Machinere", "mpe": "Majang", "mpg": "Marba", "mph": "Maung", "mpi": "Mpade", "mpj": "Martu Wangka; Wangkajunga", "mpk": "Mbara (Chad)", "mpl": "Middle Watut", "mpm": "Yosondúa Mixtec", "mpn": "Mindiri", "mpo": "Miu", "mpp": "Migabac", "mpq": "Matís", "mpr": "Vangunu", "mps": "Dadibi", "mpt": "Mian", "mpu": "Makuráp", "mpv": "Mungkip", "mpw": "Mapidian", "mpx": "Misima-Panaeati", "mpy": "Mapia", "mpz": "Mpi", "mqa": "Maba (Indonesia)", "mqb": "Mbuko", "mqc": "Mangole", "mqe": "Matepi", "mqf": "Momuna", "mqg": "Kota Bangun Kutai Malay", "mqh": "Tlazoyaltepec Mixtec", "mqi": "Mariri", "mqj": "Mamasa", "mqk": "Rajah Kabunsuwan Manobo", "mql": "Mbelime", "mqm": "South Marquesan", "mqn": "Moronene", "mqo": "Modole", "mqp": "Manipa", "mqq": "Minokok", "mqr": "Mander", "mqs": "West Makian", "mqt": "Mok", "mqu": "Mandari", "mqv": "Mosimo", "mqw": "Murupi", "mqx": "Mamuju", "mqy": "Manggarai", "mqz": "Pano", "mr": "Marathi", "mra": "Mlabri", "mrb": "Marino", "mrc": "Maricopa", "mrd": "Western Magar", "mre": "Martha's Vineyard Sign Language", "mrf": "Elseng", "mrg": "Mising", "mrh": "Mara Chin", "mrj": "Western Mari", "mrk": "Hmwaveke", "mrl": "Mortlockese", "mrm": "Merlav; Mwerlap", "mrn": "Cheke Holo", "mro": "Mru", "mrp": "Morouas", "mrq": "North Marquesan", "mrr": "Maria (India)", "mrs": "Maragus", "mrt": "Marghi Central", "mru": "Mono (Cameroon)", "mrv": "Mangareva", "mrw": "Maranao", "mrx": "Maremgi; Dineor", "mry": "Mandaya", "mrz": "Marind", "ms": "Malay (macrolanguage)", "msb": "Masbatenyo", "msc": "Sankaran Maninka", "msd": "Yucatec Maya Sign Language", "mse": "Musey", "msf": "Mekwei", "msg": "Moraid", "msh": "Masikoro Malagasy", "msi": "Sabah Malay", "msj": "Ma (Democratic Republic of Congo)", "msk": "Mansaka", "msl": "Molof; Poule", "msm": "Agusan Manobo", "msn": "Vurës", "mso": "Mombum", "msp": "Maritsauá", "msq": "Caac", "msr": "Mongolian Sign Language", "mss": "West Masela", "msu": "Musom", "msv": "Maslam", "msw": "Mansoanka", "msx": "Moresada", "msy": "Aruamu", "msz": "Momare", "mt": "Maltese", "mta": "Cotabato Manobo", "mtb": "Anyin Morofo", "mtc": "Munit", "mtd": "Mualang", "mte": "Mono (Solomon Islands)", "mtf": "Murik (Papua New Guinea)", "mtg": "Una", "mth": "Munggui", "mti": "Maiwa (Papua New Guinea)", "mtj": "Moskona", "mtk": "Mbe'", "mtl": "Montol", "mtm": "Mator", "mtn": "Matagalpa", "mto": "Totontepec Mixe", "mtp": "Wichí Lhamtés Nocten", "mtq": "Muong", "mtr": "Mewari", "mts": "Yora", "mtt": "Mota", "mtu": "Tututepec Mixtec", "mtv": "Asaro'o", "mtw": "Southern Binukidnon", "mtx": "Tidaá Mixtec", "mty": "Nabi", "mua": "Mundang", "mub": "Mubi", "muc": "Ajumbu", "mud": "Mednyj Aleut", "mue": "Media Lengua", "mug": "Musgu", "muh": "Mündü", "mui": "Musi", "muj": "Mabire", "muk": "Mugom", "mum": "Maiwala", "mun": "Munda languages", "muo": "Nyong", "mup": "Malvi", "muq": "Eastern Xiangxi Miao", "mur": "Murle", "mus": "Creek", "mut": "Western Muria", "muu": "Yaaku", "muv": "Muthuvan", "mux": "Bo-Ung", "muy": "Muyang", "muz": "Mursi", "mva": "Manam", "mvb": "Mattole", "mvd": "Mamboru", "mve": "Marwari (Pakistan)", "mvf": "Peripheral Mongolian", "mvg": "Yucuañe Mixtec", "mvh": "Mulgi", "mvi": "Miyako", "mvk": "Mekmek", "mvl": "Mbara (Australia)", "mvn": "Minaveha", "mvo": "Marovo", "mvp": "Duri", "mvq": "Moere", "mvr": "Marau", "mvs": "Massep", "mvt": "Mpotovoro", "mvu": "Marfa", "mvv": "Tagal Murut", "mvw": "Machinga", "mvx": "Meoswar", "mvy": "Indus Kohistani", "mvz": "Mesqan", "mwa": "Mwatebu", "mwb": "Juwal", "mwc": "Are", "mwe": "Mwera (Chimwera)", "mwf": "Murrinh-Patha", "mwg": "Aiklep", "mwh": "Mouk-Aria", "mwi": "Labo; Ninde", "mwk": "Kita Maninkakan", "mwl": "Mirandese", "mwm": "Sar", "mwn": "Nyamwanga", "mwo": "Central Maewo", "mwp": "Kala Lagaw Ya", "mwq": "Mün Chin", "mwr": "Marwari", "mws": "Mwimbi-Muthambi", "mwt": "Moken", "mwu": "Mittu", "mwv": "Mentawai", "mww": "Hmong Daw", "mwz": "Moingi", "mxa": "Northwest Oaxaca Mixtec", "mxb": "Tezoatlán Mixtec", "mxc": "Manyika", "mxd": "Modang", "mxe": "Mele-Fila", "mxf": "Malgbe", "mxg": "Mbangala", "mxh": "Mvuba", "mxi": "Mozarabic", "mxj": "Miju-Mishmi; Geman Deng", "mxk": "Monumbo", "mxl": "Maxi Gbe", "mxm": "Meramera", "mxn": "Moi (Indonesia)", "mxo": "Mbowe", "mxp": "Tlahuitoltepec Mixe", "mxq": "Juquila Mixe", "mxr": "Murik (Malaysia)", "mxs": "Huitepec Mixtec", "mxt": "Jamiltepec Mixtec", "mxu": "Mada (Cameroon)", "mxv": "Metlatónoc Mixtec", "mxw": "Namo", "mxx": "Mahou; Mawukakan", "mxy": "Southeastern Nochixtlán Mixtec", "mxz": "Central Masela", "my": "Burmese", "myb": "Mbay", "myc": "Mayeka", "mye": "Myene", "myf": "Bambassi", "myg": "Manta", "myh": "Makah", "myj": "Mangayat", "myk": "Mamara Senoufo", "myl": "Moma", "mym": "Me'en", "myn": "Mayan languages", "myo": "Anfillo", "myp": "Pirahã", "myr": "Muniche", "mys": "Mesmes", "myu": "Mundurukú", "myv": "Erzya", "myw": "Muyuw", "myx": "Masaaba", "myy": "Macuna", "myz": "Classical Mandaic", "mza": "Santa María Zacatepec Mixtec", "mzb": "Tumzabt", "mzc": "Madagascar Sign Language", "mzd": "Malimba", "mze": "Morawa", "mzg": "Monastic Sign Language", "mzh": "Wichí Lhamtés Güisnay", "mzi": "Ixcatlán Mazatec", "mzj": "Manya", "mzk": "Nigeria Mambila", "mzl": "Mazatlán Mixe", "mzm": "Mumuye", "mzn": "Mazanderani", "mzo": "Matipuhy", "mzp": "Movima", "mzq": "Mori Atas", "mzr": "Marúbo", "mzs": "Macanese", "mzt": "Mintil", "mzu": "Inapang", "mzv": "Manza", "mzw": "Deg", "mzx": "Mawayana", "mzy": "Mozambican Sign Language", "mzz": "Maiadomu", "na": "Nauru", "naa": "Namla", "nab": "Southern Nambikuára", "nac": "Narak", "nae": "Naka'ela", "naf": "Nabak", "nag": "Naga Pidgin", "nah": "Nahuatl languages", "nai": "North American Indian languages", "naj": "Nalu", "nak": "Nakanai", "nal": "Nalik", "nam": "Ngan'gityemerri", "nan": "Min Nan Chinese", "nao": "Naaba", "nap": "Neapolitan", "naq": "Khoekhoe; Nama (Namibia)", "nar": "Iguta", "nas": "Naasioi", "nat": "Ca̱hungwa̱rya̱; Hungworo", "naw": "Nawuri", "nax": "Nakwi", "nay": "Ngarrindjeri", "naz": "Coatepec Nahuatl", "nb": "Norwegian Bokmål", "nba": "Nyemba", "nbb": "Ndoe", "nbc": "Chang Naga", "nbd": "Ngbinda", "nbe": "Konyak Naga", "nbg": "Nagarchal", "nbh": "Ngamo", "nbi": "Mao Naga", "nbj": "Ngarinyman", "nbk": "Nake", "nbm": "Ngbaka Ma'bo", "nbn": "Kuri", "nbo": "Nkukoli", "nbp": "Nnam", "nbq": "Nggem", "nbr": "Numana", "nbs": "Namibian Sign Language", "nbt": "Na", "nbu": "Rongmei Naga", "nbv": "Ngamambo", "nbw": "Southern Ngbandi", "nby": "Ningera", "nca": "Iyo", "ncb": "Central Nicobarese", "ncc": "Ponam", "ncd": "Nachering", "nce": "Yale", "ncf": "Notsi", "ncg": "Nisga'a", "nch": "Central Huasteca Nahuatl", "nci": "Classical Nahuatl", "ncj": "Northern Puebla Nahuatl", "nck": "Na-kara", "ncl": "Michoacán Nahuatl", "ncm": "Nambo", "ncn": "Nauna", "nco": "Sibe", "ncq": "Northern Katang", "ncr": "Ncane", "ncs": "Nicaraguan Sign Language", "nct": "Chothe Naga", "ncu": "Chumburung", "ncx": "Central Puebla Nahuatl", "ncz": "Natchez", "nd": "North Ndebele", "nda": "Ndasa", "ndb": "Kenswei Nsei", "ndc": "Ndau", "ndd": "Nde-Nsele-Nta", "ndf": "Nadruvian", "ndg": "Ndengereko", "ndh": "Ndali", "ndi": "Samba Leko", "ndj": "Ndamba", "ndk": "Ndaka", "ndl": "Ndolo", "ndm": "Ndam", "ndn": "Ngundi", "ndp": "Ndo", "ndq": "Ndombe", "ndr": "Ndoola", "nds": "Low German; Low Saxon", "ndt": "Ndunga", "ndu": "Dugun", "ndv": "Ndut", "ndw": "Ndobo", "ndx": "Nduga", "ndy": "Lutos", "ndz": "Ndogo", "ne": "Nepali (macrolanguage)", "nea": "Eastern Ngad'a", "neb": "Toura (Côte d'Ivoire)", "nec": "Nedebang", "ned": "Nde-Gbite", "nee": "Nêlêmwa-Nixumwak", "nef": "Nefamese", "neg": "Negidal", "neh": "Nyenkha", "nei": "Neo-Hittite", "nej": "Neko", "nek": "Neku", "nem": "Nemi", "nen": "Nengone", "neo": "Ná-Meo", "neq": "North Central Mixe", "ner": "Yahadian", "nes": "Bhoti Kinnauri", "net": "Nete", "neu": "Neo", "nev": "Nyaheun", "new": "Newari; Nepal Bhasa", "nex": "Neme", "ney": "Neyo", "nez": "Nez Perce", "nfa": "Dhao", "nfd": "Ahwai", "nfl": "Ayiwo; Äiwoo", "nfr": "Nafaanra", "nfu": "Mfumte", "ng": "Ndonga", "nga": "Ngbaka", "ngb": "Northern Ngbandi", "ngc": "Ngombe (Democratic Republic of Congo)", "ngd": "Ngando (Central African Republic)", "nge": "Ngemba", "ngf": "Trans-New Guinea languages", "ngg": "Ngbaka Manza", "ngh": "Nǁng", "ngi": "Ngizim", "ngj": "Ngie", "ngk": "Dalabon", "ngl": "Lomwe", "ngm": "Ngatik Men's Creole", "ngn": "Ngwo", "ngp": "Ngulu", "ngq": "Ngurimi; Ngoreme", "ngr": "Engdewu", "ngs": "Gvoko", "ngt": "Kriang; Ngeq", "ngu": "Guerrero Nahuatl", "ngv": "Nagumi", "ngw": "Ngwaba", "ngx": "Nggwahyi", "ngy": "Tibea", "ngz": "Ngungwel", "nha": "Nhanda", "nhb": "Beng", "nhc": "Tabasco Nahuatl", "nhd": "Chiripá; Ava Guaraní", "nhe": "Eastern Huasteca Nahuatl", "nhf": "Nhuwala", "nhg": "Tetelcingo Nahuatl", "nhh": "Nahari", "nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl", "nhk": "Isthmus-Cosoleacaque Nahuatl", "nhm": "Morelos Nahuatl", "nhn": "Central Nahuatl", "nho": "Takuu", "nhp": "Isthmus-Pajapan Nahuatl", "nhq": "Huaxcaleca Nahuatl", "nhr": "Naro", "nht": "Ometepec Nahuatl", "nhu": "Noone", "nhv": "Temascaltepec Nahuatl", "nhw": "Western Huasteca Nahuatl", "nhx": "Isthmus-Mecayapan Nahuatl", "nhy": "Northern Oaxaca Nahuatl", "nhz": "Santa María La Alta Nahuatl", "nia": "Nias", "nib": "Nakame", "nic": "Niger-Kordofanian languages", "nid": "Ngandi", "nie": "Niellim", "nif": "Nek", "nig": "Ngalakgan", "nih": "Nyiha (Tanzania)", "nii": "Nii", "nij": "Ngaju", "nik": "Southern Nicobarese", "nil": "Nila", "nim": "Nilamba", "nin": "Ninzo", "nio": "Nganasan", "niq": "Nandi", "nir": "Nimboran", "nis": "Nimi", "nit": "Southeastern Kolami", "niu": "Niuean", "niv": "Gilyak", "niw": "Nimo", "nix": "Hema", "niy": "Ngiti", "niz": "Ningil", "nja": "Nzanyi", "njb": "Nocte Naga", "njd": "Ndonde Hamba", "njh": "Lotha Naga", "nji": "Gudanji", "njj": "Njen", "njl": "Njalgulgule", "njm": "Angami Naga", "njn": "Liangmai Naga", "njo": "Ao Naga", "njr": "Njerep", "njs": "Nisa", "njt": "Ndyuka-Trio Pidgin", "nju": "Ngadjunmaya", "njx": "Kunyi", "njy": "Njyem", "njz": "Nyishi", "nka": "Nkoya", "nkb": "Khoibu Naga", "nkc": "Nkongho", "nkd": "Koireng", "nke": "Duke", "nkf": "Inpui Naga", "nkg": "Nekgini", "nkh": "Khezha Naga", "nki": "Thangal Naga", "nkj": "Nakai", "nkk": "Nokuku", "nkm": "Namat", "nkn": "Nkangala", "nko": "Nkonya", "nkp": "Niuatoputapu", "nkq": "Nkami", "nkr": "Nukuoro", "nks": "North Asmat", "nkt": "Nyika (Tanzania)", "nku": "Bouna Kulango", "nkv": "Nyika (Malawi and Zambia)", "nkw": "Nkutu", "nkx": "Nkoroo", "nkz": "Nkari", "nl": "Dutch; Flemish", "nla": "Ngombale", "nlc": "Nalca", "nle": "East Nyala", "nlg": "Gela", "nli": "Grangali", "nlj": "Nyali", "nlk": "Ninia Yali", "nll": "Nihali", "nlm": "Mankiyali", "nlo": "Ngul", "nlq": "Lao Naga", "nlu": "Nchumbulu", "nlv": "Orizaba Nahuatl", "nlw": "Walangama", "nlx": "Nahali", "nly": "Nyamal", "nlz": "Nalögo", "nma": "Maram Naga", "nmb": "Big Nambas; V'ënen Taut", "nmc": "Ngam", "nmd": "Ndumu", "nme": "Mzieme Naga", "nmf": "Tangkhul Naga (India)", "nmg": "Kwasio", "nmh": "Monsang Naga", "nmi": "Nyam", "nmj": "Ngombe (Central African Republic)", "nmk": "Namakura", "nml": "Ndemli", "nmm": "Manangba", "nmn": "ǃXóõ", "nmo": "Moyon Naga", "nmp": "Nimanbur", "nmq": "Nambya", "nmr": "Nimbari", "nms": "Letemboi", "nmt": "Namonuito", "nmu": "Northeast Maidu", "nmv": "Ngamini", "nmw": "Nimoa; Rifao", "nmx": "Nama (Papua New Guinea)", "nmy": "Namuyi", "nmz": "Nawdm", "nn": "Norwegian Nynorsk", "nna": "Nyangumarta", "nnb": "Nande", "nnc": "Nancere", "nnd": "West Ambae", "nne": "Ngandyera", "nnf": "Ngaing", "nng": "Maring Naga", "nnh": "Ngiemboon", "nni": "North Nuaulu", "nnj": "Nyangatom", "nnk": "Nankina", "nnl": "Northern Rengma Naga", "nnm": "Namia", "nnn": "Ngete", "nnp": "Wancho Naga", "nnq": "Ngindo", "nnr": "Narungga", "nnt": "Nanticoke", "nnu": "Dwang", "nnv": "Nugunu (Australia)", "nnw": "Southern Nuni", "nny": "Nyangga", "nnz": "Nda'nda'", "no": "Norwegian", "noa": "Woun Meu", "noc": "Nuk", "nod": "Northern Thai", "noe": "Nimadi", "nof": "Nomane", "nog": "Nogai", "noh": "Nomu", "noi": "Noiri", "noj": "Nonuya", "nok": "Nooksack", "nol": "Nomlaki", "nom": "Nocamán", "non": "Old Norse", "nop": "Numanggang", "noq": "Ngongo", "nos": "Eastern Nisu", "not": "Nomatsiguenga", "nou": "Ewage-Notu", "nov": "Novial", "now": "Nyambo", "noy": "Noy", "noz": "Nayi", "npa": "Nar Phu", "npb": "Nupbikha", "npg": "Ponyo-Gongwang Naga", "nph": "Phom Naga", "npi": "Nepali (individual language)", "npl": "Southeastern Puebla Nahuatl", "npn": "Mondropolon", "npo": "Pochuri Naga", "nps": "Nipsan", "npu": "Puimei Naga", "npx": "Noipx", "npy": "Napu", "nqg": "Southern Nago", "nqk": "Kura Ede Nago", "nql": "Ngendelengo", "nqm": "Ndom", "nqn": "Nen", "nqo": "N'Ko; N’Ko", "nqq": "Kyan-Karyaw Naga", "nqt": "Nteng", "nqy": "Akyaung Ari Naga", "nr": "South Ndebele", "nra": "Ngom", "nrb": "Nara", "nrc": "Noric", "nre": "Southern Rengma Naga", "nrf": "Jèrriais; Guernésiais", "nrg": "Narango", "nri": "Chokri Naga", "nrk": "Ngarla", "nrl": "Ngarluma", "nrm": "Narom", "nrn": "Norn", "nrp": "North Picene", "nrr": "Norra; Nora", "nrt": "Northern Kalapuya", "nru": "Narua", "nrx": "Ngurmbur", "nrz": "Lala", "nsa": "Sangtam Naga", "nsb": "Lower Nossob", "nsc": "Nshi", "nsd": "Southern Nisu", "nse": "Nsenga", "nsf": "Northwestern Nisu", "nsg": "Ngasa", "nsh": "Ngoshie", "nsi": "Nigerian Sign Language", "nsk": "Naskapi", "nsl": "Norwegian Sign Language", "nsm": "Sumi Naga", "nsn": "Nehan", "nso": "Pedi; Northern Sotho; Sepedi", "nsp": "Nepalese Sign Language", "nsq": "Northern Sierra Miwok", "nsr": "Maritime Sign Language", "nss": "Nali", "nst": "Tase Naga", "nsu": "Sierra Negra Nahuatl", "nsv": "Southwestern Nisu", "nsw": "Navut", "nsx": "Nsongo", "nsy": "Nasal", "nsz": "Nisenan", "ntd": "Northern Tidung", "nte": "Nathembo", "ntg": "Ngantangarra", "nti": "Natioro", "ntj": "Ngaanyatjarra", "ntk": "Ikoma-Nata-Isenye", "ntm": "Nateni", "nto": "Ntomba", "ntp": "Northern Tepehuan", "ntr": "Delo", "ntu": "Natügu", "ntw": "Nottoway", "ntx": "Tangkhul Naga (Myanmar)", "nty": "Mantsi", "ntz": "Natanzi", "nua": "Yuanga", "nub": "Nubian languages", "nuc": "Nukuini", "nud": "Ngala", "nue": "Ngundu", "nuf": "Nusu", "nug": "Nungali", "nuh": "Ndunda", "nui": "Ngumbi", "nuj": "Nyole", "nuk": "Nuu-chah-nulth; Nuuchahnulth", "nul": "Nusa Laut", "num": "Niuafo'ou", "nun": "Anong", "nuo": "Nguôn", "nup": "Nupe-Nupe-Tako", "nuq": "Nukumanu", "nur": "Nukuria", "nus": "Nuer", "nut": "Nung (Viet Nam)", "nuu": "Ngbundu", "nuv": "Northern Nuni", "nuw": "Nguluwan", "nux": "Mehek", "nuy": "Nunggubuyu", "nuz": "Tlamacazapa Nahuatl", "nv": "Navajo; Navaho", "nvh": "Nasarian", "nvm": "Namiae", "nvo": "Nyokon", "nwa": "Nawathinehena", "nwb": "Nyabwa", "nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari", "nwe": "Ngwe", "nwg": "Ngayawung", "nwi": "Southwest Tanna", "nwm": "Nyamusa-Molo", "nwo": "Nauo", "nwr": "Nawaru", "nww": "Ndwewe", "nwx": "Middle Newar", "nwy": "Nottoway-Meherrin", "nxa": "Nauete", "nxd": "Ngando (Democratic Republic of Congo)", "nxe": "Nage", "nxg": "Ngad'a", "nxi": "Nindi", "nxk": "Koki Naga", "nxl": "South Nuaulu", "nxm": "Numidian", "nxn": "Ngawun", "nxo": "Ndambomo", "nxq": "Naxi", "nxr": "Ninggerum", "nxx": "Nafri", "ny": "Nyanja; Chewa; Chichewa", "nyb": "Nyangbo", "nyc": "Nyanga-li", "nyd": "Nyore; Olunyole", "nye": "Nyengo", "nyf": "Giryama; Kigiryama", "nyg": "Nyindu", "nyh": "Nyikina", "nyi": "Ama (Sudan)", "nyj": "Nyanga", "nyk": "Nyaneka", "nyl": "Nyeu", "nym": "Nyamwezi", "nyn": "Nyankole", "nyo": "Nyoro", "nyp": "Nyang'i", "nyq": "Nayini", "nyr": "Nyiha (Malawi)", "nys": "Nyungar", "nyt": "Nyawaygi", "nyu": "Nyungwe", "nyv": "Nyulnyul", "nyw": "Nyaw", "nyx": "Nganyaywana", "nyy": "Nyakyusa-Ngonde", "nza": "Tigon Mbembe", "nzb": "Njebi", "nzd": "Nzadi", "nzi": "Nzima", "nzk": "Nzakara", "nzm": "Zeme Naga", "nzs": "New Zealand Sign Language", "nzu": "Teke-Nzikou", "nzy": "Nzakambay", "nzz": "Nanga Dama Dogon", "oaa": "Orok", "oac": "Oroch", "oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)", "oav": "Old Avar", "obi": "Obispeño", "obk": "Southern Bontok", "obl": "Oblo", "obm": "Moabite", "obo": "Obo Manobo", "obr": "Old Burmese", "obt": "Old Breton", "obu": "Obulom", "oc": "Occitan (post 1500)", "oca": "Ocaina", "och": "Old Chinese", "ocm": "Old Cham", "oco": "Old Cornish", "ocu": "Atzingo Matlatzinca", "oda": "Odut", "odk": "Od", "odt": "Old Dutch", "odu": "Odual", "ofo": "Ofo", "ofs": "Old Frisian", "ofu": "Efutop", "ogb": "Ogbia", "ogc": "Ogbah", "oge": "Old Georgian", "ogg": "Ogbogolo", "ogo": "Khana", "ogu": "Ogbronuagum", "oht": "Old Hittite", "ohu": "Old Hungarian", "oia": "Oirata", "oie": "Okolie", "oin": "Inebu One", "oj": "Ojibwa", "ojb": "Northwestern Ojibwa", "ojc": "Central Ojibwa", "ojg": "Eastern Ojibwa", "ojp": "Old Japanese", "ojs": "Severn Ojibwa", "ojv": "Ontong Java", "ojw": "Western Ojibwa", "oka": "Okanagan", "okb": "Okobo", "okc": "Kobo", "okd": "Okodia", "oke": "Okpe (Southwestern Edo)", "okg": "Koko Babangk", "okh": "Koresh-e Rostam", "oki": "Okiek", "okj": "Oko-Juwoi", "okk": "Kwamtim One", "okl": "Old Kentish Sign Language", "okm": "Middle Korean (10th-16th cent.)", "okn": "Oki-No-Erabu", "oko": "Old Korean (3rd-9th cent.)", "okr": "Kirike", "oks": "Oko-Eni-Osayen", "oku": "Oku", "okv": "Orokaiva", "okx": "Okpe (Northwestern Edo)", "okz": "Old Khmer", "ola": "Walungge", "old": "Mochi", "ole": "Olekha", "olk": "Olkol", "olm": "Oloma", "olo": "Livvi", "olr": "Olrat", "olt": "Old Lithuanian", "olu": "Kuvale", "om": "Oromo", "oma": "Omaha-Ponca", "omb": "East Ambae", "omc": "Mochica", "omg": "Omagua", "omi": "Omi", "omk": "Omok", "oml": "Ombo", "omn": "Minoan", "omo": "Utarmbung", "omp": "Old Manipuri", "omq": "Oto-Manguean languages", "omr": "Old Marathi", "omt": "Omotik", "omu": "Omurano", "omv": "Omotic languages", "omw": "South Tairora", "omx": "Old Mon", "omy": "Old Malay", "ona": "Ona", "onb": "Lingao", "one": "Oneida", "ong": "Olo", "oni": "Onin", "onj": "Onjob", "onk": "Kabore One", "onn": "Onobasulu", "ono": "Onondaga", "onp": "Sartang", "onr": "Northern One", "ons": "Ono", "ont": "Ontenu", "onu": "Unua", "onw": "Old Nubian", "onx": "Onin Based Pidgin", "ood": "Tohono O'odham", "oog": "Ong", "oon": "Önge", "oor": "Oorlams", "oos": "Old Ossetic", "opa": "Okpamheri", "opk": "Kopkaka", "opm": "Oksapmin", "opo": "Opao", "opt": "Opata", "opy": "Ofayé", "or": "Oriya (macrolanguage); Odia (macrolanguage)", "ora": "Oroha", "orc": "Orma", "ore": "Orejón", "org": "Oring", "orh": "Oroqen", "orn": "Orang Kanaq", "oro": "Orokolo", "orr": "Oruma", "ors": "Orang Seletar", "ort": "Adivasi Oriya", "oru": "Ormuri", "orv": "Old Russian", "orw": "Oro Win", "orx": "Oro", "ory": "Odia (individual language); Oriya (individual language)", "orz": "Ormu", "os": "Ossetian; Ossetic", "osa": "Osage", "osc": "Oscan", "osi": "Osing", "osn": "Old Sundanese", "oso": "Ososo", "osp": "Old Spanish", "ost": "Osatu", "osu": "Southern One", "osx": "Old Saxon", "ota": "Ottoman Turkish (1500-1928)", "otb": "Old Tibetan", "otd": "Ot Danum", "ote": "Mezquital Otomi", "oti": "Oti", "otk": "Old Turkish", "otl": "Tilapa Otomi", "otm": "Eastern Highland Otomi", "otn": "Tenango Otomi", "oto": "Otomian languages", "otq": "Querétaro Otomi", "otr": "Otoro", "ots": "Estado de México Otomi", "ott": "Temoaya Otomi", "otu": "Otuke", "otw": "Ottawa", "otx": "Texcatepec Otomi", "oty": "Old Tamil", "otz": "Ixtenco Otomi", "oua": "Tagargrent", "oub": "Glio-Oubi", "oue": "Oune", "oui": "Old Uighur", "oum": "Ouma", "ovd": "Elfdalian; Övdalian", "owi": "Owiniga", "owl": "Old Welsh", "oyb": "Oy", "oyd": "Oyda", "oym": "Wayampi", "oyy": "Oya'oya", "ozm": "Koonzime", "pa": "Panjabi; Punjabi", "paa": "Papuan languages", "pab": "Parecís", "pac": "Pacoh", "pad": "Paumarí", "pae": "Pagibete", "paf": "Paranawát", "pag": "Pangasinan", "pah": "Tenharim", "pai": "Pe", "pak": "Parakanã", "pal": "Pahlavi", "pam": "Pampanga; Kapampangan", "pao": "Northern Paiute", "pap": "Papiamento", "paq": "Parya", "par": "Panamint; Timbisha", "pas": "Papasena", "pau": "Palauan", "pav": "Pakaásnovos", "paw": "Pawnee", "pax": "Pankararé", "pay": "Pech", "paz": "Pankararú", "pbb": "Páez", "pbc": "Patamona", "pbe": "Mezontla Popoloca", "pbf": "Coyotepec Popoloca", "pbg": "Paraujano", "pbh": "E'ñapa Woromaipu", "pbi": "Parkwa", "pbl": "Mak (Nigeria)", "pbm": "Puebla Mazatec", "pbn": "Kpasam", "pbo": "Papel", "pbp": "Badyara", "pbr": "Pangwa", "pbs": "Central Pame", "pbt": "Southern Pashto", "pbu": "Northern Pashto", "pbv": "Pnar", "pby": "Pyu (Papua New Guinea)", "pca": "Santa Inés Ahuatempan Popoloca", "pcb": "Pear", "pcc": "Bouyei", "pcd": "Picard", "pce": "Ruching Palaung", "pcf": "Paliyan", "pcg": "Paniya", "pch": "Pardhan", "pci": "Duruwa", "pcj": "Parenga", "pck": "Paite Chin", "pcl": "Pardhi", "pcm": "Nigerian Pidgin", "pcn": "Piti", "pcp": "Pacahuara", "pcw": "Pyapun", "pda": "Anam", "pdc": "Pennsylvania German", "pdi": "Pa Di", "pdn": "Podena; Fedan", "pdo": "Padoe", "pdt": "Plautdietsch", "pdu": "Kayan", "pea": "Peranakan Indonesian", "peb": "Eastern Pomo", "ped": "Mala (Papua New Guinea)", "pee": "Taje", "pef": "Northeastern Pomo", "peg": "Pengo", "peh": "Bonan", "pei": "Chichimeca-Jonaz", "pej": "Northern Pomo", "pek": "Penchal", "pel": "Pekal", "pem": "Phende", "peo": "Old Persian (ca. 600-400 B.C.)", "pep": "Kunja", "peq": "Southern Pomo", "pes": "Iranian Persian", "pev": "Pémono", "pex": "Petats", "pey": "Petjo", "pez": "Eastern Penan", "pfa": "Pááfang", "pfe": "Pere", "pfl": "Pfaelzisch", "pga": "Sudanese Creole Arabic", "pgd": "Gāndhārī", "pgg": "Pangwali", "pgi": "Pagi", "pgk": "Rerep", "pgl": "Primitive Irish", "pgn": "Paelignian", "pgs": "Pangseng", "pgu": "Pagu", "pgz": "Papua New Guinean Sign Language", "pha": "Pa-Hng", "phd": "Phudagi", "phg": "Phuong", "phh": "Phukha", "phi": "Philippine languages", "phj": "Pahari", "phk": "Phake", "phl": "Phalura; Palula", "phm": "Phimbi", "phn": "Phoenician", "pho": "Phunoi", "phq": "Phana'", "phr": "Pahari-Potwari", "pht": "Phu Thai", "phu": "Phuan", "phv": "Pahlavani", "phw": "Phangduwali", "pi": "Pali", "pia": "Pima Bajo", "pib": "Yine", "pic": "Pinji", "pid": "Piaroa", "pie": "Piro", "pif": "Pingelapese", "pig": "Pisabo", "pih": "Pitcairn-Norfolk", "pij": "Pijao", "pil": "Yom", "pim": "Powhatan", "pin": "Piame", "pio": "Piapoco", "pip": "Pero", "pir": "Piratapuyo", "pis": "Pijin", "pit": "Pitta Pitta", "piu": "Pintupi-Luritja", "piv": "Pileni; Vaeakau-Taumako", "piw": "Pimbwe", "pix": "Piu", "piy": "Piya-Kwonci", "piz": "Pije", "pjt": "Pitjantjatjara", "pka": "Ardhamāgadhī Prākrit", "pkb": "Pokomo; Kipfokomo", "pkc": "Paekche", "pkg": "Pak-Tong", "pkh": "Pankhu", "pkn": "Pakanha", "pko": "Pökoot", "pkp": "Pukapuka", "pkr": "Attapady Kurumba", "pks": "Pakistan Sign Language", "pkt": "Maleng", "pku": "Paku", "pl": "Polish", "pla": "Miani", "plb": "Polonombauk", "plc": "Central Palawano", "pld": "Polari", "ple": "Palu'e", "plf": "Central Malayo-Polynesian languages", "plg": "Pilagá", "plh": "Paulohi", "plj": "Polci", "plk": "Kohistani Shina", "pll": "Shwe Palaung", "pln": "Palenquero", "plo": "Oluta Popoluca", "plq": "Palaic", "plr": "Palaka Senoufo", "pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca", "plt": "Plateau Malagasy", "plu": "Palikúr", "plv": "Southwest Palawano", "plw": "Brooke's Point Palawano", "ply": "Bolyu", "plz": "Paluan", "pma": "Paama", "pmb": "Pambia", "pmd": "Pallanganmiddang", "pme": "Pwaamei", "pmf": "Pamona", "pmh": "Māhārāṣṭri Prākrit", "pmi": "Northern Pumi", "pmj": "Southern Pumi", "pmk": "Pamlico", "pml": "Lingua Franca", "pmm": "Pomo", "pmn": "Pam", "pmo": "Pom", "pmq": "Northern Pame", "pmr": "Paynamar", "pms": "Piemontese", "pmt": "Tuamotuan", "pmw": "Plains Miwok", "pmx": "Poumei Naga", "pmy": "Papuan Malay", "pmz": "Southern Pame", "pna": "Punan Bah-Biau", "pnb": "Western Panjabi", "pnc": "Pannei", "pnd": "Mpinda", "pne": "Western Penan", "png": "Pangu; Pongu", "pnh": "Penrhyn", "pni": "Aoheng", "pnj": "Pinjarup", "pnk": "Paunaka", "pnl": "Paleni", "pnm": "Punan Batu 1", "pnn": "Pinai-Hagahai", "pno": "Panobo", "pnp": "Pancana", "pnq": "Pana (Burkina Faso)", "pnr": "Panim", "pns": "Ponosakan", "pnt": "Pontic", "pnu": "Jiongnai Bunu", "pnv": "Pinigura", "pnw": "Banyjima; Panytyima", "pnx": "Phong-Kniang", "pny": "Pinyin", "pnz": "Pana (Central African Republic)", "poc": "Poqomam", "poe": "San Juan Atzingo Popoloca", "pof": "Poke", "pog": "Potiguára", "poh": "Poqomchi'", "poi": "Highland Popoluca", "pok": "Pokangá", "pom": "Southeastern Pomo", "pon": "Pohnpeian", "poo": "Central Pomo", "pop": "Pwapwâ", "poq": "Texistepec Popoluca", "pos": "Sayula Popoluca", "pot": "Potawatomi", "pov": "Upper Guinea Crioulo", "pow": "San Felipe Otlaltepec Popoloca", "pox": "Polabian", "poy": "Pogolo", "poz": "Malayo-Polynesian languages", "ppe": "Papi", "ppi": "Paipai", "ppk": "Uma", "ppl": "Pipil; Nicarao", "ppm": "Papuma", "ppn": "Papapana", "ppo": "Folopa", "ppp": "Pelende", "ppq": "Pei", "pps": "San Luís Temalacayuca Popoloca", "ppt": "Pare", "ppu": "Papora", "pqa": "Pa'a", "pqe": "Eastern Malayo-Polynesian languages", "pqm": "Malecite-Passamaquoddy", "pqw": "Western Malayo-Polynesian languages", "pra": "Prakrit languages", "prc": "Parachi", "prd": "Parsi-Dari", "pre": "Principense", "prf": "Paranan", "prg": "Prussian", "prh": "Porohanon", "pri": "Paicî", "prk": "Parauk", "prl": "Peruvian Sign Language", "prm": "Kibiri", "prn": "Prasuni", "pro": "Old Provençal (to 1500); Old Occitan (to 1500)", "prp": "Parsi", "prq": "Ashéninka Perené", "prr": "Puri", "prs": "Dari; Afghan Persian", "prt": "Phai", "pru": "Puragi", "prw": "Parawen", "prx": "Purik", "prz": "Providencia Sign Language", "ps": "Pushto; Pashto", "psa": "Asue Awyu", "psc": "Iranian Sign Language; Persian Sign Language", "psd": "Plains Indian Sign Language", "pse": "Central Malay", "psg": "Penang Sign Language", "psh": "Southwest Pashai; Southwest Pashayi", "psi": "Southeast Pashai; Southeast Pashayi", "psl": "Puerto Rican Sign Language", "psm": "Pauserna", "psn": "Panasuan", "pso": "Polish Sign Language", "psp": "Philippine Sign Language", "psq": "Pasi", "psr": "Portuguese Sign Language", "pss": "Kaulong", "pst": "Central Pashto", "psu": "Sauraseni Prākrit", "psw": "Port Sandwich", "psy": "Piscataway", "pt": "Portuguese", "pta": "Pai Tavytera", "pth": "Pataxó Hã-Ha-Hãe", "pti": "Pindiini; Wangkatha", "ptn": "Patani", "pto": "Zo'é", "ptp": "Patep", "ptq": "Pattapu", "ptr": "Piamatsina", "ptt": "Enrekang", "ptu": "Bambam", "ptv": "Port Vato", "ptw": "Pentlatch", "pty": "Pathiya", "pua": "Western Highland Purepecha", "pub": "Purum", "puc": "Punan Merap", "pud": "Punan Aput", "pue": "Puelche", "puf": "Punan Merah", "pug": "Phuie", "pui": "Puinave", "puj": "Punan Tubu", "pum": "Puma", "puo": "Puoc", "pup": "Pulabu", "puq": "Puquina", "pur": "Puruborá", "put": "Putoh", "puu": "Punu", "puw": "Puluwatese", "pux": "Puare", "puy": "Purisimeño", "pwa": "Pawaia", "pwb": "Panawa", "pwg": "Gapapaiwa", "pwi": "Patwin", "pwm": "Molbog", "pwn": "Paiwan", "pwo": "Pwo Western Karen", "pwr": "Powari", "pww": "Pwo Northern Karen", "pxm": "Quetzaltepec Mixe", "pye": "Pye Krumen", "pym": "Fyam", "pyn": "Poyanáwa", "pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay", "pyu": "Puyuma", "pyx": "Pyu (Myanmar)", "pyy": "Pyen", "pzh": "Pazeh", "pzn": "Jejara Naga; Para Naga", "qu": "Quechua", "qua": "Quapaw", "qub": "Huallaga Huánuco Quechua", "quc": "K'iche'; Quiché", "qud": "Calderón Highland Quichua", "quf": "Lambayeque Quechua", "qug": "Chimborazo Highland Quichua", "quh": "South Bolivian Quechua", "qui": "Quileute", "quk": "Chachapoyas Quechua", "qul": "North Bolivian Quechua", "qum": "Sipacapense", "qun": "Quinault", "qup": "Southern Pastaza Quechua", "quq": "Quinqui", "qur": "Yanahuanca Pasco Quechua", "qus": "Santiago del Estero Quichua", "quv": "Sacapulteco", "quw": "Tena Lowland Quichua", "qux": "Yauyos Quechua", "quy": "Ayacucho Quechua", "quz": "Cusco Quechua", "qva": "Ambo-Pasco Quechua", "qvc": "Cajamarca Quechua", "qve": "Eastern Apurímac Quechua", "qvh": "Huamalíes-Dos de Mayo Huánuco Quechua", "qvi": "Imbabura Highland Quichua", "qvj": "Loja Highland Quichua", "qvl": "Cajatambo North Lima Quechua", "qvm": "Margos-Yarowilca-Lauricocha Quechua", "qvn": "North Junín Quechua", "qvo": "Napo Lowland Quechua", "qvp": "Pacaraos Quechua", "qvs": "San Martín Quechua", "qvw": "Huaylla Wanca Quechua", "qvy": "Queyu", "qvz": "Northern Pastaza Quichua", "qwa": "Corongo Ancash Quechua", "qwc": "Classical Quechua", "qwe": "Quechuan (family)", "qwh": "Huaylas Ancash Quechua", "qwm": "Kuman (Russia)", "qws": "Sihuas Ancash Quechua", "qwt": "Kwalhioqua-Tlatskanai", "qxa": "Chiquián Ancash Quechua", "qxc": "Chincha Quechua", "qxh": "Panao Huánuco Quechua", "qxl": "Salasaca Highland Quichua", "qxn": "Northern Conchucos Ancash Quechua", "qxo": "Southern Conchucos Ancash Quechua", "qxp": "Puno Quechua", "qxq": "Qashqa'i", "qxr": "Cañar Highland Quichua", "qxs": "Southern Qiang", "qxt": "Santa Ana de Tusi Pasco Quechua", "qxu": "Arequipa-La Unión Quechua", "qxw": "Jauja Wanca Quechua", "qya": "Quenya", "qyp": "Quiripi", "raa": "Dungmali", "rab": "Camling", "rac": "Rasawa", "rad": "Rade", "raf": "Western Meohang", "rag": "Logooli; Lulogooli", "rah": "Rabha", "rai": "Ramoaaina", "raj": "Rajasthani", "rak": "Tulu-Bohuai", "ral": "Ralte", "ram": "Canela", "ran": "Riantana", "rao": "Rao", "rap": "Rapanui", "raq": "Saam", "rar": "Rarotongan; Cook Islands Maori", "ras": "Tegali", "rat": "Razajerdi", "rau": "Raute", "rav": "Sampang", "raw": "Rawang", "rax": "Rang", "ray": "Rapa", "raz": "Rahambuu", "rbb": "Rumai Palaung", "rbk": "Northern Bontok", "rbl": "Miraya Bikol", "rbp": "Barababaraba", "rcf": "Réunion Creole French", "rdb": "Rudbari", "rea": "Rerau", "reb": "Rembong", "ree": "Rejang Kayan", "reg": "Kara (Tanzania)", "rei": "Reli", "rej": "Rejang", "rel": "Rendille", "rem": "Remo", "ren": "Rengao", "rer": "Rer Bare", "res": "Reshe", "ret": "Retta", "rey": "Reyesano", "rga": "Roria", "rge": "Romano-Greek", "rgk": "Rangkas", "rgn": "Romagnol", "rgr": "Resígaro", "rgs": "Southern Roglai", "rgu": "Ringgou", "rhg": "Rohingya", "rhp": "Yahang", "ria": "Riang (India)", "rib": "Bribri Sign Language", "rif": "Tarifit", "ril": "Riang Lang; Riang (Myanmar)", "rim": "Nyaturu", "rin": "Nungu", "rir": "Ribun", "rit": "Ritharrngu", "riu": "Riung", "rjg": "Rajong", "rji": "Raji", "rjs": "Rajbanshi", "rka": "Kraol", "rkb": "Rikbaktsa", "rkh": "Rakahanga-Manihiki", "rki": "Rakhine", "rkm": "Marka", "rkt": "Rangpuri; Kamta", "rkw": "Arakwal", "rm": "Romansh", "rma": "Rama", "rmb": "Rembarrnga", "rmc": "Carpathian Romani", "rmd": "Traveller Danish", "rme": "Angloromani", "rmf": "Kalo Finnish Romani", "rmg": "Traveller Norwegian", "rmh": "Murkim", "rmi": "Lomavren", "rmk": "Romkun", "rml": "Baltic Romani", "rmm": "Roma", "rmn": "Balkan Romani", "rmo": "Sinte Romani", "rmp": "Rempi", "rmq": "Caló", "rms": "Romanian Sign Language", "rmt": "Domari", "rmu": "Tavringer Romani", "rmv": "Romanova", "rmw": "Welsh Romani", "rmx": "Romam", "rmy": "Vlax Romani", "rmz": "Marma", "rn": "Rundi", "rnb": "Brunca Sign Language", "rnd": "Ruund", "rng": "Ronga", "rnl": "Ranglong", "rnn": "Roon", "rnp": "Rongpo", "rnr": "Nari Nari", "rnw": "Rungwa", "ro": "Romanian; Moldavian; Moldovan", "roa": "Romance languages", "rob": "Tae'", "roc": "Cacgia Roglai", "rod": "Rogo", "roe": "Ronji", "rof": "Rombo", "rog": "Northern Roglai", "rol": "Romblomanon", "rom": "Romany", "roo": "Rotokas", "rop": "Kriol", "ror": "Rongga", "rou": "Runga", "row": "Dela-Oenale", "rpn": "Repanbitip", "rpt": "Rapting", "rri": "Ririo", "rro": "Waima", "rrt": "Arritinngithigh", "rsb": "Romano-Serbian", "rsk": "Ruthenian; Rusyn", "rsl": "Russian Sign Language", "rsm": "Miriwoong Sign Language", "rsn": "Rwandan Sign Language", "rtc": "Rungtu Chin", "rth": "Ratahan", "rtm": "Rotuman", "rts": "Yurats", "rtw": "Rathawi", "ru": "Russian", "rub": "Gungu", "ruc": "Ruuli", "rue": "Rusyn", "ruf": "Luguru", "rug": "Roviana", "ruh": "Ruga", "rui": "Rufiji", "ruk": "Che", "ruo": "Istro Romanian", "rup": "Macedo-Romanian; Aromanian; Arumanian", "ruq": "Megleno Romanian", "rut": "Rutul", "ruu": "Lanas Lobu", "ruy": "Mala (Nigeria)", "ruz": "Ruma", "rw": "Kinyarwanda", "rwa": "Rawo", "rwk": "Rwa", "rwl": "Ruwila", "rwm": "Amba (Uganda)", "rwo": "Rawa", "rwr": "Marwari (India)", "rxd": "Ngardi", "rxw": "Karuwali; Garuwali", "ryn": "Northern Amami-Oshima", "rys": "Yaeyama", "ryu": "Central Okinawan", "rzh": "Rāziḥī", "sa": "Sanskrit", "saa": "Saba", "sab": "Buglere", "sac": "Meskwaki", "sad": "Sandawe", "sae": "Sabanê", "saf": "Safaliba", "sah": "Yakut", "sai": "South American Indian languages", "saj": "Sahu", "sak": "Sake", "sal": "Salishan languages", "sam": "Samaritan Aramaic", "sao": "Sause", "saq": "Samburu", "sar": "Saraveca", "sas": "Sasak", "sat": "Santali", "sau": "Saleman", "sav": "Saafi-Saafi", "saw": "Sawi", "sax": "Sa", "say": "Saya", "saz": "Saurashtra", "sba": "Ngambay", "sbb": "Simbo", "sbc": "Kele (Papua New Guinea)", "sbd": "Southern Samo", "sbe": "Saliba", "sbf": "Chabu; Shabo", "sbg": "Seget", "sbh": "Sori-Harengan", "sbi": "Seti", "sbj": "Surbakhal", "sbk": "Safwa", "sbl": "Botolan Sambal", "sbm": "Sagala", "sbn": "Sindhi Bhil", "sbo": "Sabüm", "sbp": "Sangu (Tanzania)", "sbq": "Sileibi", "sbr": "Sembakung Murut", "sbs": "Subiya", "sbt": "Kimki", "sbu": "Stod Bhoti", "sbv": "Sabine", "sbw": "Simba", "sbx": "Seberuang", "sby": "Soli", "sbz": "Sara Kaba", "sc": "Sardinian", "scb": "Chut", "sce": "Dongxiang", "scf": "San Miguel Creole French", "scg": "Sanggau", "sch": "Sakachep", "sci": "Sri Lankan Creole Malay", "sck": "Sadri", "scl": "Shina", "scn": "Sicilian", "sco": "Scots", "scp": "Hyolmo; Helambu Sherpa", "scq": "Sa'och", "scs": "North Slavey", "sct": "Southern Katang", "scu": "Shumcho", "scv": "Sheni", "scw": "Sha", "scx": "Sicel", "sd": "Sindhi", "sda": "Toraja-Sa'dan", "sdb": "Shabak", "sdc": "Sassarese Sardinian", "sde": "Surubu", "sdf": "Sarli", "sdg": "Savi", "sdh": "Southern Kurdish", "sdj": "Suundi", "sdk": "Sos Kundi", "sdl": "Saudi Arabian Sign Language", "sdn": "Gallurese Sardinian", "sdo": "Bukar-Sadung Bidayuh", "sdp": "Sherdukpen", "sdq": "Semandang", "sdr": "Oraon Sadri", "sds": "Sened", "sdt": "Shuadit", "sdu": "Sarudu", "sdv": "Eastern Sudanic languages", "sdx": "Sibu Melanau", "sdz": "Sallands", "se": "Northern Sami", "sea": "Semai", "seb": "Shempire Senoufo", "sec": "Sechelt", "sed": "Sedang", "see": "Seneca", "sef": "Cebaara Senoufo", "seg": "Segeju", "seh": "Sena", "sei": "Seri", "sej": "Sene", "sek": "Sekani", "sel": "Selkup", "sem": "Semitic languages", "sen": "Nanerigé Sénoufo", "seo": "Suarmin", "sep": "Sìcìté Sénoufo", "seq": "Senara Sénoufo", "ser": "Serrano", "ses": "Koyraboro Senni Songhai", "set": "Sentani", "seu": "Serui-Laut", "sev": "Nyarafolo Senoufo", "sew": "Sewa Bay", "sey": "Secoya", "sez": "Senthang Chin", "sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language", "sfe": "Eastern Subanen", "sfm": "Small Flowery Miao", "sfs": "South African Sign Language", "sfw": "Sehwi", "sg": "Sango", "sga": "Old Irish (to 900)", "sgb": "Mag-antsi Ayta", "sgc": "Kipsigis", "sgd": "Surigaonon", "sge": "Segai", "sgg": "Swiss-German Sign Language", "sgh": "Shughni", "sgi": "Suga", "sgj": "Surgujia", "sgk": "Sangkong", "sgm": "Singa", "sgn": "Sign languages", "sgp": "Singpho", "sgr": "Sangisari", "sgs": "Samogitian", "sgt": "Brokpake", "sgu": "Salas", "sgw": "Sebat Bet Gurage", "sgx": "Sierra Leone Sign Language", "sgy": "Sanglechi", "sgz": "Sursurunga", "sh": "Serbo-Croatian", "sha": "Shall-Zwall", "shb": "Ninam", "shc": "Sonde", "shd": "Kundal Shahi", "she": "Sheko", "shg": "Shua", "shh": "Shoshoni", "shi": "Tachelhit", "shj": "Shatt", "shk": "Shilluk", "shl": "Shendu", "shm": "Shahrudi", "shn": "Shan", "sho": "Shanga", "shp": "Shipibo-Conibo", "shq": "Sala", "shr": "Shi", "shs": "Shuswap", "sht": "Shasta", "shu": "Chadian Arabic", "shv": "Shehri", "shw": "Shwai", "shx": "She", "shy": "Tachawit", "shz": "Syenara Senoufo", "si": "Sinhala; Sinhalese", "sia": "Akkala Sami", "sib": "Sebop", "sid": "Sidamo", "sie": "Simaa", "sif": "Siamou", "sig": "Paasaal", "sih": "Zire; Sîshëë", "sii": "Shom Peng", "sij": "Numbami", "sik": "Sikiana", "sil": "Tumulung Sisaala", "sim": "Mende (Papua New Guinea)", "sio": "Siouan languages", "sip": "Sikkimese", "siq": "Sonia", "sir": "Siri", "sis": "Siuslaw", "sit": "Sino-Tibetan languages", "siu": "Sinagen", "siv": "Sumariup", "siw": "Siwai", "six": "Sumau", "siy": "Sivandi", "siz": "Siwi", "sja": "Epena", "sjb": "Sajau Basap", "sjd": "Kildin Sami", "sje": "Pite Sami", "sjg": "Assangori", "sjk": "Kemi Sami", "sjl": "Sajalong; Miji", "sjm": "Mapun", "sjn": "Sindarin", "sjo": "Xibe", "sjp": "Surjapuri", "sjr": "Siar-Lak", "sjs": "Senhaja De Srair", "sjt": "Ter Sami", "sju": "Ume Sami", "sjw": "Shawnee", "sk": "Slovak", "ska": "Skagit", "skb": "Saek", "skc": "Ma Manda", "skd": "Southern Sierra Miwok", "ske": "Seke (Vanuatu)", "skf": "Sakirabiá", "skg": "Sakalava Malagasy", "skh": "Sikule", "ski": "Sika", "skj": "Seke (Nepal)", "skm": "Kutong", "skn": "Kolibugan Subanon", "sko": "Seko Tengah", "skp": "Sekapan", "skq": "Sininkere", "skr": "Saraiki; Seraiki", "sks": "Maia", "skt": "Sakata", "sku": "Sakao", "skv": "Skou", "skw": "Skepi Creole Dutch", "skx": "Seko Padang", "sky": "Sikaiana", "skz": "Sekar", "sl": "Slovenian", "sla": "Slavic languages", "slc": "Sáliba", "sld": "Sissala", "sle": "Sholaga", "slf": "Swiss-Italian Sign Language", "slg": "Selungai Murut", "slh": "Southern Puget Sound Salish", "sli": "Lower Silesian", "slj": "Salumá", "sll": "Salt-Yui", "slm": "Pangutaran Sama", "sln": "Salinan", "slp": "Lamaholot", "slq": "Salchuq", "slr": "Salar", "sls": "Singapore Sign Language", "slt": "Sila", "slu": "Selaru", "slw": "Sialum", "slx": "Salampasu", "sly": "Selayar", "slz": "Ma'ya", "sm": "Samoan", "sma": "Southern Sami", "smb": "Simbari", "smc": "Som", "smf": "Auwe", "smg": "Simbali", "smh": "Samei", "smi": "Sami languages", "smj": "Lule Sami", "smk": "Bolinao", "sml": "Central Sama", "smm": "Musasa", "smn": "Inari Sami", "smp": "Samaritan", "smq": "Samo", "smr": "Simeulue", "sms": "Skolt Sami", "smt": "Simte", "smu": "Somray", "smv": "Samvedi", "smw": "Sumbawa", "smx": "Samba", "smy": "Semnani", "smz": "Simeku", "sn": "Shona", "snc": "Sinaugoro", "sne": "Bau Bidayuh", "snf": "Noon", "sng": "Sanga (Democratic Republic of Congo)", "sni": "Sensi", "snj": "Riverain Sango", "snk": "Soninke", "snl": "Sangil", "snm": "Southern Ma'di", "snn": "Siona", "sno": "Snohomish", "snp": "Siane", "snq": "Sangu (Gabon)", "snr": "Sihan", "sns": "South West Bay; Nahavaq", "snu": "Senggi; Viid", "snv": "Sa'ban", "snw": "Selee", "snx": "Sam", "sny": "Saniyo-Hiyewe", "snz": "Kou", "so": "Somali", "soa": "Thai Song", "sob": "Sobei", "soc": "So (Democratic Republic of Congo)", "sod": "Songoora", "soe": "Songomeno", "sog": "Sogdian", "soh": "Aka", "soi": "Sonha", "soj": "Soi", "sok": "Sokoro", "sol": "Solos", "son": "Songhai languages", "soo": "Songo", "sop": "Songe", "soq": "Kanasi", "sor": "Somrai", "sos": "Seeku", "sou": "Southern Thai", "sov": "Sonsorol", "sow": "Sowanda", "sox": "Swo", "soy": "Miyobe", "soz": "Temi", "spb": "Sepa (Indonesia)", "spc": "Sapé", "spd": "Saep", "spe": "Sepa (Papua New Guinea)", "spg": "Sian", "spi": "Saponi", "spk": "Sengo", "spl": "Selepet", "spm": "Akukem", "spn": "Sanapaná", "spo": "Spokane", "spp": "Supyire Senoufo", "spq": "Loreto-Ucayali Spanish", "spr": "Saparua", "sps": "Saposa", "spt": "Spiti Bhoti", "spu": "Sapuan", "spv": "Sambalpuri; Kosli", "spx": "South Picene", "spy": "Sabaot", "sq": "Albanian", "sqa": "Shama-Sambuga", "sqh": "Shau", "sqj": "Albanian languages", "sqk": "Albanian Sign Language", "sqm": "Suma", "sqn": "Susquehannock", "sqo": "Sorkhei", "sqq": "Sou", "sqr": "Siculo Arabic", "sqs": "Sri Lankan Sign Language", "sqt": "Soqotri", "squ": "Squamish", "sqx": "Kufr Qassem Sign Language (KQSL)", "sr": "Serbian", "sra": "Saruga", "srb": "Sora", "src": "Logudorese Sardinian", "sre": "Sara", "srf": "Nafi", "srg": "Sulod", "srh": "Sarikoli", "sri": "Siriano", "srk": "Serudung Murut", "srl": "Isirawa", "srm": "Saramaccan", "srn": "Sranan Tongo", "sro": "Campidanese Sardinian", "srq": "Sirionó", "srr": "Serer", "srs": "Sarsi", "srt": "Sauri", "sru": "Suruí", "srv": "Southern Sorsoganon", "srw": "Serua", "srx": "Sirmauri", "sry": "Sera", "srz": "Shahmirzadi", "ss": "Swati", "ssa": "Nilo-Saharan languages", "ssb": "Southern Sama", "ssc": "Suba-Simbiti", "ssd": "Siroi", "sse": "Balangingi; Bangingih Sama", "ssf": "Thao", "ssg": "Seimat", "ssh": "Shihhi Arabic", "ssi": "Sansi", "ssj": "Sausi", "ssk": "Sunam", "ssl": "Western Sisaala", "ssm": "Semnam", "ssn": "Waata", "sso": "Sissano", "ssp": "Spanish Sign Language", "ssq": "So'a", "ssr": "Swiss-French Sign Language", "sss": "Sô", "sst": "Sinasina", "ssu": "Susuami", "ssv": "Shark Bay", "ssx": "Samberigi", "ssy": "Saho", "ssz": "Sengseng", "st": "Southern Sotho", "sta": "Settla", "stb": "Northern Subanen", "std": "Sentinel", "ste": "Liana-Seti", "stf": "Seta", "stg": "Trieng", "sth": "Shelta", "sti": "Bulo Stieng", "stj": "Matya Samo", "stk": "Arammba", "stl": "Stellingwerfs", "stm": "Setaman", "stn": "Owa", "sto": "Stoney", "stp": "Southeastern Tepehuan", "stq": "Saterfriesisch", "str": "Straits Salish", "sts": "Shumashti", "stt": "Budeh Stieng", "stu": "Samtao", "stv": "Silt'e", "stw": "Satawalese", "sty": "Siberian Tatar", "su": "Sundanese", "sua": "Sulka", "sub": "Suku", "suc": "Western Subanon", "sue": "Suena", "sug": "Suganga", "sui": "Suki", "suj": "Shubi", "suk": "Sukuma", "suo": "Bouni", "suq": "Tirmaga-Chai Suri; Suri", "sur": "Mwaghavul", "sus": "Susu", "sut": "Subtiaba", "suv": "Puroik", "suw": "Sumbwa", "sux": "Sumerian", "suy": "Suyá", "suz": "Sunwar", "sv": "Swedish", "sva": "Svan", "svb": "Ulau-Suain", "svc": "Vincentian Creole English", "sve": "Serili", "svk": "Slovakian Sign Language", "svm": "Slavomolisano", "svs": "Savosavo", "svx": "Skalvian", "sw": "Swahili (macrolanguage)", "swb": "Maore Comorian", "swc": "Congo Swahili", "swf": "Sere", "swg": "Swabian", "swh": "Swahili (individual language); Kiswahili", "swi": "Sui", "swj": "Sira", "swk": "Malawi Sena", "swl": "Swedish Sign Language", "swm": "Samosa", "swn": "Sawknah", "swo": "Shanenawa", "swp": "Suau", "swq": "Sharwa", "swr": "Saweru", "sws": "Seluwasan", "swt": "Sawila", "swu": "Suwawa", "swv": "Shekhawati", "sww": "Sowa", "swx": "Suruahá", "swy": "Sarua", "sxb": "Suba", "sxc": "Sicanian", "sxe": "Sighu", "sxg": "Shuhi; Shixing", "sxk": "Southern Kalapuya", "sxl": "Selian", "sxm": "Samre", "sxn": "Sangir", "sxo": "Sorothaptic", "sxr": "Saaroa", "sxs": "Sasaru", "sxu": "Upper Saxon", "sxw": "Saxwe Gbe", "sya": "Siang", "syb": "Central Subanen", "syc": "Classical Syriac", "syd": "Samoyedic languages", "syi": "Seki", "syk": "Sukur", "syl": "Sylheti", "sym": "Maya Samo", "syn": "Senaya", "syo": "Suoy", "syr": "Syriac", "sys": "Sinyar", "syw": "Kagate", "syx": "Samay", "syy": "Al-Sayyid Bedouin Sign Language", "sza": "Semelai", "szb": "Ngalum", "szc": "Semaq Beri", "szd": "Seru", "sze": "Seze", "szg": "Sengele", "szl": "Silesian", "szn": "Sula", "szp": "Suabo", "szs": "Solomon Islands Sign Language", "szv": "Isu (Fako Division)", "szw": "Sawai", "szy": "Sakizaya", "ta": "Tamil", "taa": "Lower Tanana", "tab": "Tabassaran", "tac": "Lowland Tarahumara", "tad": "Tause", "tae": "Tariana", "taf": "Tapirapé", "tag": "Tagoi", "tai": "Tai languages", "taj": "Eastern Tamang", "tak": "Tala", "tal": "Tal", "tan": "Tangale", "tao": "Yami", "tap": "Taabwa", "taq": "Tamasheq", "tar": "Central Tarahumara", "tas": "Tay Boi", "tau": "Upper Tanana", "tav": "Tatuyo", "taw": "Tai", "tax": "Tamki", "tay": "Atayal", "taz": "Tocho", "tba": "Aikanã", "tbc": "Takia", "tbd": "Kaki Ae", "tbe": "Tanimbili", "tbf": "Mandara", "tbg": "North Tairora", "tbh": "Dharawal; Thurawal", "tbi": "Gaam", "tbj": "Tiang", "tbk": "Calamian Tagbanwa", "tbl": "Tboli", "tbm": "Tagbu", "tbn": "Barro Negro Tunebo", "tbo": "Tawala", "tbp": "Taworta; Diebroud", "tbq": "Tibeto-Burman languages", "tbr": "Tumtum", "tbs": "Tanguat", "tbt": "Tembo (Kitembo)", "tbu": "Tubar", "tbv": "Tobo", "tbw": "Tagbanwa", "tbx": "Kapin", "tby": "Tabaru", "tbz": "Ditammari", "tca": "Ticuna", "tcb": "Tanacross", "tcc": "Datooga", "tcd": "Tafi", "tce": "Southern Tutchone", "tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec", "tcg": "Tamagario", "tch": "Turks And Caicos Creole English", "tci": "Wára", "tck": "Tchitchege", "tcl": "Taman (Myanmar)", "tcm": "Tanahmerah", "tcn": "Tichurong", "tco": "Taungyo", "tcp": "Tawr Chin", "tcq": "Kaiy", "tcs": "Torres Strait Creole; Yumplatok", "tct": "T'en", "tcu": "Southeastern Tarahumara", "tcw": "Tecpatlán Totonac", "tcx": "Toda", "tcy": "Tulu", "tcz": "Thado Chin", "tda": "Tagdal", "tdb": "Panchpargania", "tdc": "Emberá-Tadó", "tdd": "Tai Nüa", "tde": "Tiranige Diga Dogon", "tdf": "Talieng", "tdg": "Western Tamang", "tdh": "Thulung", "tdi": "Tomadino", "tdj": "Tajio", "tdk": "Tambas", "tdl": "Sur", "tdm": "Taruma", "tdn": "Tondano", "tdo": "Teme", "tdq": "Tita", "tdr": "Todrah", "tds": "Doutai", "tdt": "Tetun Dili", "tdv": "Toro", "tdx": "Tandroy-Mahafaly Malagasy", "tdy": "Tadyawan", "te": "Telugu", "tea": "Temiar", "teb": "Tetete", "tec": "Terik", "ted": "Tepo Krumen", "tee": "Huehuetla Tepehua", "tef": "Teressa", "teg": "Teke-Tege", "teh": "Tehuelche", "tei": "Torricelli", "tek": "Ibali Teke", "tem": "Timne", "ten": "Tama (Colombia)", "teo": "Teso", "tep": "Tepecano", "teq": "Temein", "ter": "Tereno", "tes": "Tengger", "tet": "Tetum", "teu": "Soo", "tev": "Teor", "tew": "Tewa (USA)", "tex": "Tennet", "tey": "Tulishi", "tez": "Tetserret", "tfi": "Tofin Gbe", "tfn": "Tanaina", "tfo": "Tefaro", "tfr": "Teribe", "tft": "Ternate", "tg": "Tajik", "tga": "Sagalla", "tgb": "Tobilung", "tgc": "Tigak", "tgd": "Ciwogai", "tge": "Eastern Gorkha Tamang", "tgf": "Chalikha", "tgh": "Tobagonian Creole English", "tgi": "Lawunuia", "tgj": "Tagin", "tgn": "Tandaganon", "tgo": "Sudest", "tgp": "Tangoa", "tgq": "Tring", "tgr": "Tareng", "tgs": "Nume", "tgt": "Central Tagbanwa", "tgu": "Tanggu", "tgv": "Tingui-Boto", "tgw": "Tagwana Senoufo", "tgx": "Tagish", "tgy": "Togoyo", "tgz": "Tagalaka", "th": "Thai", "thd": "Kuuk Thaayorre; Thayore", "the": "Chitwania Tharu", "thf": "Thangmi", "thh": "Northern Tarahumara", "thi": "Tai Long", "thk": "Tharaka; Kitharaka", "thl": "Dangaura Tharu", "thm": "Aheu", "thn": "Thachanadan", "thp": "Thompson", "thq": "Kochila Tharu", "thr": "Rana Tharu", "ths": "Thakali", "tht": "Tahltan", "thu": "Thuri", "thv": "Tahaggart Tamahaq", "thy": "Tha", "thz": "Tayart Tamajeq", "ti": "Tigrinya", "tia": "Tidikelt Tamazight", "tic": "Tira", "tif": "Tifal", "tig": "Tigre", "tih": "Timugon Murut", "tii": "Tiene", "tij": "Tilung", "tik": "Tikar", "til": "Tillamook", "tim": "Timbe", "tin": "Tindi", "tio": "Teop", "tip": "Trimuris", "tiq": "Tiéfo", "tis": "Masadiit Itneg", "tit": "Tinigua", "tiu": "Adasen", "tiv": "Tiv", "tiw": "Tiwi", "tix": "Southern Tiwa", "tiy": "Tiruray", "tiz": "Tai Hongjin", "tja": "Tajuasohn", "tjg": "Tunjung", "tji": "Northern Tujia", "tjj": "Tjungundji", "tjl": "Tai Laing", "tjm": "Timucua", "tjn": "Tonjon", "tjo": "Temacine Tamazight", "tjp": "Tjupany", "tjs": "Southern Tujia", "tju": "Tjurruru", "tjw": "Djabwurrung", "tk": "Turkmen", "tka": "Truká", "tkb": "Buksa", "tkd": "Tukudede", "tke": "Takwane", "tkf": "Tukumanféd", "tkg": "Tesaka Malagasy", "tkl": "Tokelau", "tkm": "Takelma", "tkn": "Toku-No-Shima", "tkp": "Tikopia", "tkq": "Tee", "tkr": "Tsakhur", "tks": "Takestani", "tkt": "Kathoriya Tharu", "tku": "Upper Necaxa Totonac", "tkv": "Mur Pano", "tkw": "Teanu", "tkx": "Tangko", "tkz": "Takua", "tl": "Tagalog", "tla": "Southwestern Tepehuan", "tlb": "Tobelo", "tlc": "Yecuatla Totonac", "tld": "Talaud", "tlf": "Telefol", "tlg": "Tofanma", "tlh": "Klingon; tlhIngan Hol", "tli": "Tlingit", "tlj": "Talinga-Bwisi", "tlk": "Taloki", "tll": "Tetela", "tlm": "Tolomako", "tln": "Talondo'", "tlo": "Talodi", "tlp": "Filomena Mata-Coahuitlán Totonac", "tlq": "Tai Loi", "tlr": "Talise", "tls": "Tambotalo", "tlt": "Sou Nama; Teluti", "tlu": "Tulehu", "tlv": "Taliabu", "tlx": "Khehek", "tly": "Talysh", "tma": "Tama (Chad)", "tmb": "Katbol; Avava", "tmc": "Tumak", "tmd": "Haruai", "tme": "Tremembé", "tmf": "Toba-Maskoy", "tmg": "Ternateño", "tmh": "Tamashek", "tmi": "Tutuba", "tmj": "Samarokena", "tmk": "Northwestern Tamang", "tml": "Tamnim Citak", "tmm": "Tai Thanh", "tmn": "Taman (Indonesia)", "tmo": "Temoq", "tmq": "Tumleo", "tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)", "tms": "Tima", "tmt": "Tasmate", "tmu": "Iau", "tmv": "Tembo (Motembo)", "tmw": "Temuan", "tmy": "Tami", "tmz": "Tamanaku", "tn": "Tswana", "tna": "Tacana", "tnb": "Western Tunebo", "tnc": "Tanimuca-Retuarã", "tnd": "Angosturas Tunebo", "tng": "Tobanga", "tnh": "Maiani", "tni": "Tandia", "tnk": "Kwamera", "tnl": "Lenakel", "tnm": "Tabla", "tnn": "North Tanna", "tno": "Toromono", "tnp": "Whitesands", "tnq": "Taino", "tnr": "Ménik", "tns": "Tenis", "tnt": "Tontemboan", "tnu": "Tay Khang", "tnv": "Tangchangya", "tnw": "Tonsawang", "tnx": "Tanema", "tny": "Tongwe", "tnz": "Ten'edn", "to": "Tonga (Tonga Islands)", "tob": "Toba", "toc": "Coyutla Totonac", "tod": "Toma", "tof": "Gizrra", "tog": "Tonga (Nyasa)", "toh": "Gitonga", "toi": "Tonga (Zambia)", "toj": "Tojolabal", "tok": "Toki Pona", "tol": "Tolowa", "tom": "Tombulu", "too": "Xicotepec De Juárez Totonac", "top": "Papantla Totonac", "toq": "Toposa", "tor": "Togbo-Vara Banda", "tos": "Highland Totonac", "tou": "Tho", "tov": "Upper Taromi", "tow": "Jemez", "tox": "Tobian", "toy": "Topoiyo", "toz": "To", "tpa": "Taupota", "tpc": "Azoyú Me'phaa; Azoyú Tlapanec", "tpe": "Tippera", "tpf": "Tarpia", "tpg": "Kula", "tpi": "Tok Pisin", "tpj": "Tapieté", "tpk": "Tupinikin", "tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec", "tpm": "Tampulma", "tpn": "Tupinambá", "tpo": "Tai Pao", "tpp": "Pisaflores Tepehua", "tpq": "Tukpa", "tpr": "Tuparí", "tpt": "Tlachichilco Tepehua", "tpu": "Tampuan", "tpv": "Tanapag", "tpw": "Tupí", "tpx": "Acatepec Me'phaa; Acatepec Tlapanec", "tpy": "Trumai", "tpz": "Tinputz", "tqb": "Tembé", "tql": "Lehali", "tqm": "Turumsa", "tqn": "Tenino", "tqo": "Toaripi", "tqp": "Tomoip", "tqq": "Tunni", "tqr": "Torona", "tqt": "Western Totonac", "tqu": "Touo", "tqw": "Tonkawa", "tr": "Turkish", "tra": "Tirahi", "trb": "Terebu", "trc": "Copala Triqui", "trd": "Turi", "tre": "East Tarangan", "trf": "Trinidadian Creole English", "trg": "Lishán Didán", "trh": "Turaka", "tri": "Trió", "trj": "Toram", "trk": "Turkic languages", "trl": "Traveller Scottish", "trm": "Tregami", "trn": "Trinitario", "tro": "Tarao Naga", "trp": "Kok Borok", "trq": "San Martín Itunyoso Triqui", "trr": "Taushiro", "trs": "Chicahuaxtla Triqui", "trt": "Tunggare", "tru": "Turoyo; Surayt", "trv": "Sediq; Seediq; Taroko", "trw": "Torwali", "trx": "Tringgus-Sembaan Bidayuh", "try": "Turung", "trz": "Torá", "ts": "Tsonga", "tsa": "Tsaangi", "tsb": "Tsamai", "tsc": "Tswa", "tsd": "Tsakonian", "tse": "Tunisian Sign Language", "tsg": "Tausug", "tsh": "Tsuvan", "tsi": "Tsimshian", "tsj": "Tshangla", "tsk": "Tseku", "tsl": "Ts'ün-Lao", "tsm": "Turkish Sign Language; Türk İşaret Dili", "tsp": "Northern Toussian", "tsq": "Thai Sign Language", "tsr": "Akei", "tss": "Taiwan Sign Language", "tst": "Tondi Songway Kiini", "tsu": "Tsou", "tsv": "Tsogo", "tsw": "Tsishingini", "tsx": "Mubami", "tsy": "Tebul Sign Language", "tsz": "Purepecha", "tt": "Tatar", "tta": "Tutelo", "ttb": "Gaa", "ttc": "Tektiteko", "ttd": "Tauade", "tte": "Bwanabwana", "ttf": "Tuotomb", "ttg": "Tutong", "tth": "Upper Ta'oih", "tti": "Tobati", "ttj": "Tooro", "ttk": "Totoro", "ttl": "Totela", "ttm": "Northern Tutchone", "ttn": "Towei", "tto": "Lower Ta'oih", "ttp": "Tombelala", "ttq": "Tawallammat Tamajaq", "ttr": "Tera", "tts": "Northeastern Thai", "ttt": "Muslim Tat", "ttu": "Torau", "ttv": "Titan", "ttw": "Long Wat", "tty": "Sikaritai", "ttz": "Tsum", "tua": "Wiarumus", "tub": "Tübatulabal", "tuc": "Mutu", "tud": "Tuxá", "tue": "Tuyuca", "tuf": "Central Tunebo", "tug": "Tunia", "tuh": "Taulil", "tui": "Tupuri", "tuj": "Tugutil", "tul": "Tula", "tum": "Tumbuka", "tun": "Tunica", "tuo": "Tucano", "tup": "Tupi languages", "tuq": "Tedaga", "tus": "Tuscarora", "tut": "Altaic languages", "tuu": "Tututni", "tuv": "Turkana", "tuw": "Tungus languages", "tux": "Tuxináwa", "tuy": "Tugen", "tuz": "Turka", "tva": "Vaghua", "tvd": "Tsuvadi", "tve": "Te'un", "tvk": "Southeast Ambrym", "tvl": "Tuvalu", "tvm": "Tela-Masbuar", "tvn": "Tavoyan", "tvo": "Tidore", "tvs": "Taveta", "tvt": "Tutsa Naga", "tvu": "Tunen", "tvw": "Sedoa", "tvx": "Taivoan", "tvy": "Timor Pidgin", "tw": "Twi", "twa": "Twana", "twb": "Western Tawbuid", "twc": "Teshenawa", "twd": "Twents", "twe": "Tewa (Indonesia)", "twf": "Northern Tiwa", "twg": "Tereweng", "twh": "Tai Dón", "twl": "Tawara", "twm": "Tawang Monpa", "twn": "Twendi", "two": "Tswapong", "twp": "Ere", "twq": "Tasawaq", "twr": "Southwestern Tarahumara", "twt": "Turiwára", "twu": "Termanu", "tww": "Tuwari", "twx": "Tewe", "twy": "Tawoyan", "txa": "Tombonuo", "txb": "Tokharian B", "txc": "Tsetsaut", "txe": "Totoli", "txg": "Tangut", "txh": "Thracian", "txi": "Ikpeng", "txj": "Tarjumo", "txm": "Tomini", "txn": "West Tarangan", "txo": "Toto", "txq": "Tii", "txr": "Tartessian", "txs": "Tonsea", "txt": "Citak", "txu": "Kayapó", "txx": "Tatana", "txy": "Tanosy Malagasy", "ty": "Tahitian", "tya": "Tauya", "tye": "Kyanga", "tyh": "O'du", "tyi": "Teke-Tsaayi", "tyj": "Tai Do; Tai Yo", "tyl": "Thu Lao", "tyn": "Kombai", "typ": "Thaypan", "tyr": "Tai Daeng", "tys": "Tày Sa Pa", "tyt": "Tày Tac", "tyu": "Kua", "tyv": "Tuvinian", "tyx": "Teke-Tyee", "tyy": "Tiyaa", "tyz": "Tày", "tza": "Tanzanian Sign Language", "tzh": "Tzeltal", "tzj": "Tz'utujil", "tzl": "Talossan", "tzm": "Central Atlas Tamazight", "tzn": "Tugun", "tzo": "Tzotzil", "tzx": "Tabriak", "uam": "Uamué", "uan": "Kuan", "uar": "Tairuma", "uba": "Ubang", "ubi": "Ubi", "ubl": "Buhi'non Bikol", "ubr": "Ubir", "ubu": "Umbu-Ungu", "uby": "Ubykh", "uda": "Uda", "ude": "Udihe", "udg": "Muduga", "udi": "Udi", "udj": "Ujir", "udl": "Wuzlam", "udm": "Udmurt", "udu": "Uduk", "ues": "Kioko", "ufi": "Ufim", "ug": "Uighur; Uyghur", "uga": "Ugaritic", "ugb": "Kuku-Ugbanh", "uge": "Ughele", "ugh": "Kubachi", "ugn": "Ugandan Sign Language", "ugo": "Ugong", "ugy": "Uruguayan Sign Language", "uha": "Uhami", "uhn": "Damal", "uis": "Uisai", "uiv": "Iyive", "uji": "Tanjijili", "uk": "Ukrainian", "uka": "Kaburi", "ukg": "Ukuriguma", "ukh": "Ukhwejo", "uki": "Kui (India)", "ukk": "Muak Sa-aak", "ukl": "Ukrainian Sign Language", "ukp": "Ukpe-Bayobiri", "ukq": "Ukwa", "uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language", "uku": "Ukue", "ukv": "Kuku", "ukw": "Ukwuani-Aboh-Ndoni", "uky": "Kuuk-Yak", "ula": "Fungwa", "ulb": "Ulukwumi", "ulc": "Ulch", "ule": "Lule", "ulf": "Usku; Afra", "uli": "Ulithian", "ulk": "Meriam Mir", "ull": "Ullatan", "ulm": "Ulumanda'", "uln": "Unserdeutsch", "ulu": "Uma' Lung", "ulw": "Ulwa", "uma": "Umatilla", "umb": "Umbundu", "umc": "Marrucinian", "umd": "Umbindhamu", "umg": "Morrobalama; Umbuygamu", "umi": "Ukit", "umm": "Umon", "umn": "Makyan Naga", "umo": "Umotína", "ump": "Umpila", "umr": "Umbugarla", "ums": "Pendau", "umu": "Munsee", "una": "North Watut", "und": "Undetermined", "une": "Uneme", "ung": "Ngarinyin", "uni": "Uni", "unk": "Enawené-Nawé", "unm": "Unami", "unn": "Kurnai", "unr": "Mundari", "unu": "Unubahe", "unx": "Munda", "unz": "Unde Kaili", "uon": "Kulon", "upi": "Umeda", "upv": "Uripiv-Wala-Rano-Atchin", "ur": "Urdu", "ura": "Urarina", "urb": "Urubú-Kaapor; Kaapor", "urc": "Urningangg", "ure": "Uru", "urf": "Uradhi", "urg": "Urigina", "urh": "Urhobo", "uri": "Urim", "urj": "Uralic languages", "urk": "Urak Lawoi'", "url": "Urali", "urm": "Urapmin", "urn": "Uruangnirin", "uro": "Ura (Papua New Guinea)", "urp": "Uru-Pa-In", "urr": "Lehalurup; Löyöp", "urt": "Urat", "uru": "Urumi", "urv": "Uruava", "urw": "Sop", "urx": "Urimo", "ury": "Orya", "urz": "Uru-Eu-Wau-Wau", "usa": "Usarufa", "ush": "Ushojo", "usi": "Usui", "usk": "Usaghade", "usp": "Uspanteco", "uss": "us-Saare", "usu": "Uya", "uta": "Otank", "ute": "Ute-Southern Paiute", "uth": "ut-Hun", "utp": "Amba (Solomon Islands)", "utr": "Etulo", "utu": "Utu", "uum": "Urum", "uur": "Ura (Vanuatu)", "uuu": "U", "uve": "West Uvean; Fagauvea", "uvh": "Uri", "uvl": "Lote", "uwa": "Kuku-Uwanh", "uya": "Doko-Uyanga", "uz": "Uzbek", "uzn": "Northern Uzbek", "uzs": "Southern Uzbek", "vaa": "Vaagri Booli", "vae": "Vale", "vaf": "Vafsi", "vag": "Vagla", "vah": "Varhadi-Nagpuri", "vai": "Vai", "vaj": "Sekele; Northwestern ǃKung; Vasekele", "val": "Vehes", "vam": "Vanimo", "van": "Valman", "vao": "Vao", "vap": "Vaiphei", "var": "Huarijio", "vas": "Vasavi", "vau": "Vanuma", "vav": "Varli", "vay": "Wayu", "vbb": "Southeast Babar", "vbk": "Southwestern Bontok", "ve": "Venda", "vec": "Venetian", "ved": "Veddah", "vel": "Veluws", "vem": "Vemgo-Mabas", "veo": "Ventureño", "vep": "Veps", "ver": "Mom Jango", "vgr": "Vaghri", "vgt": "Vlaamse Gebarentaal; Flemish Sign Language", "vi": "Vietnamese", "vic": "Virgin Islands Creole English", "vid": "Vidunda", "vif": "Vili", "vig": "Viemo", "vil": "Vilela", "vin": "Vinza", "vis": "Vishavan", "vit": "Viti", "viv": "Iduna", "vka": "Kariyarra", "vkj": "Kujarge", "vkk": "Kaur", "vkl": "Kulisusu", "vkm": "Kamakan", "vkn": "Koro Nulu", "vko": "Kodeoha", "vkp": "Korlai Creole Portuguese", "vkt": "Tenggarong Kutai Malay", "vku": "Kurrama", "vkz": "Koro Zuba", "vlp": "Valpei", "vls": "Vlaams", "vma": "Martuyhunira", "vmb": "Barbaram", "vmc": "Juxtlahuaca Mixtec", "vmd": "Mudu Koraga", "vme": "East Masela", "vmf": "Mainfränkisch", "vmg": "Lungalunga", "vmh": "Maraghei", "vmi": "Miwa", "vmj": "Ixtayutla Mixtec", "vmk": "Makhuwa-Shirima", "vml": "Malgana", "vmm": "Mitlatongo Mixtec", "vmp": "Soyaltepec Mazatec", "vmq": "Soyaltepec Mixtec", "vmr": "Marenje", "vms": "Moksela", "vmu": "Muluridyi", "vmv": "Valley Maidu", "vmw": "Makhuwa", "vmx": "Tamazola Mixtec", "vmy": "Ayautla Mazatec", "vmz": "Mazatlán Mazatec", "vnk": "Vano; Lovono", "vnm": "Vinmavis; Neve'ei", "vnp": "Vunapu", "vo": "Volapük", "vor": "Voro", "vot": "Votic", "vra": "Vera'a", "vro": "Võro", "vrs": "Varisi", "vrt": "Burmbar; Banam Bay", "vsi": "Moldova Sign Language", "vsl": "Venezuelan Sign Language", "vsv": "Valencian Sign Language; Llengua de signes valenciana", "vto": "Vitou", "vum": "Vumbu", "vun": "Vunjo", "vut": "Vute", "vwa": "Awa (China)", "wa": "Walloon", "waa": "Walla Walla", "wab": "Wab", "wac": "Wasco-Wishram", "wad": "Wamesa; Wondama", "wae": "Walser", "waf": "Wakoná", "wag": "Wa'ema", "wah": "Watubela", "wai": "Wares", "waj": "Waffa", "wak": "Wakashan languages", "wal": "Wolaytta; Wolaitta", "wam": "Wampanoag", "wan": "Wan", "wao": "Wappo", "wap": "Wapishana", "waq": "Wagiman", "war": "Waray (Philippines)", "was": "Washo", "wat": "Kaninuwa", "wau": "Waurá", "wav": "Waka", "waw": "Waiwai", "wax": "Watam; Marangis", "way": "Wayana", "waz": "Wampur", "wba": "Warao", "wbb": "Wabo", "wbe": "Waritai", "wbf": "Wara", "wbh": "Wanda", "wbi": "Vwanji", "wbj": "Alagwa", "wbk": "Waigali", "wbl": "Wakhi", "wbm": "Wa", "wbp": "Warlpiri", "wbq": "Waddar", "wbr": "Wagdi", "wbs": "West Bengal Sign Language", "wbt": "Warnman", "wbv": "Wajarri", "wbw": "Woi", "wca": "Yanomámi", "wci": "Waci Gbe", "wdd": "Wandji", "wdg": "Wadaginam", "wdj": "Wadjiginy", "wdk": "Wadikali", "wdt": "Wendat", "wdu": "Wadjigu", "wdy": "Wadjabangayi", "wea": "Wewaw", "wec": "Wè Western", "wed": "Wedau", "weg": "Wergaia", "weh": "Weh", "wei": "Kiunum", "wem": "Weme Gbe", "wen": "Sorbian languages", "weo": "Wemale", "wep": "Westphalien", "wer": "Weri", "wes": "Cameroon Pidgin", "wet": "Perai", "weu": "Rawngtu Chin", "wew": "Wejewa", "wfg": "Yafi; Zorop", "wga": "Wagaya", "wgb": "Wagawaga", "wgg": "Wangkangurru; Wangganguru", "wgi": "Wahgi", "wgo": "Waigeo", "wgu": "Wirangu", "wgy": "Warrgamay", "wha": "Sou Upaa; Manusela", "whg": "North Wahgi", "whk": "Wahau Kenyah", "whu": "Wahau Kayan", "wib": "Southern Toussian", "wic": "Wichita", "wie": "Wik-Epa", "wif": "Wik-Keyangan", "wig": "Wik Ngathan", "wih": "Wik-Me'anha", "wii": "Minidien", "wij": "Wik-Iiyanh", "wik": "Wikalkan", "wil": "Wilawila", "wim": "Wik-Mungkan", "win": "Ho-Chunk", "wir": "Wiraféd", "wiu": "Wiru", "wiv": "Vitu", "wiy": "Wiyot", "wja": "Waja", "wji": "Warji", "wka": "Kw'adza", "wkb": "Kumbaran", "wkd": "Wakde; Mo", "wkl": "Kalanadi", "wkr": "Keerray-Woorroong", "wku": "Kunduvadi", "wkw": "Wakawaka", "wky": "Wangkayutyuru", "wla": "Walio", "wlc": "Mwali Comorian", "wle": "Wolane", "wlg": "Kunbarlang", "wlh": "Welaun", "wli": "Waioli", "wlk": "Wailaki", "wll": "Wali (Sudan)", "wlm": "Middle Welsh", "wlo": "Wolio", "wlr": "Wailapa", "wls": "Wallisian", "wlu": "Wuliwuli", "wlv": "Wichí Lhamtés Vejoz", "wlw": "Walak", "wlx": "Wali (Ghana)", "wly": "Waling", "wma": "Mawa (Nigeria)", "wmb": "Wambaya", "wmc": "Wamas", "wmd": "Mamaindé", "wme": "Wambule", "wmg": "Western Minyag", "wmh": "Waima'a", "wmi": "Wamin", "wmm": "Maiwa (Indonesia)", "wmn": "Waamwang", "wmo": "Wom (Papua New Guinea)", "wms": "Wambon", "wmt": "Walmajarri", "wmw": "Mwani", "wmx": "Womo", "wnb": "Wanambre", "wnc": "Wantoat", "wnd": "Wandarang", "wne": "Waneci", "wng": "Wanggom", "wni": "Ndzwani Comorian", "wnk": "Wanukaka", "wnm": "Wanggamala", "wnn": "Wunumara", "wno": "Wano", "wnp": "Wanap", "wnu": "Usan", "wnw": "Wintu", "wny": "Wanyi; Waanyi", "wo": "Wolof", "woa": "Kuwema; Tyaraity", "wob": "Wè Northern", "woc": "Wogeo", "wod": "Wolani", "woe": "Woleaian", "wof": "Gambian Wolof", "wog": "Wogamusin", "woi": "Kamang", "wok": "Longto", "wom": "Wom (Nigeria)", "won": "Wongo", "woo": "Manombai", "wor": "Woria", "wos": "Hanga Hundi", "wow": "Wawonii", "woy": "Weyto", "wpc": "Maco", "wrb": "Waluwarra; Warluwara", "wrg": "Warungu; Gudjal", "wrh": "Wiradjuri", "wri": "Wariyangga", "wrk": "Garrwa", "wrl": "Warlmanpa", "wrm": "Warumungu", "wrn": "Warnang", "wro": "Worrorra", "wrp": "Waropen", "wrr": "Wardaman", "wrs": "Waris", "wru": "Waru", "wrv": "Waruna", "wrw": "Gugu Warra", "wrx": "Wae Rana", "wry": "Merwari", "wrz": "Waray (Australia)", "wsa": "Warembori", "wsg": "Adilabad Gondi", "wsi": "Wusi", "wsk": "Waskia", "wsr": "Owenia", "wss": "Wasa", "wsu": "Wasu", "wsv": "Wotapuri-Katarqalai", "wtf": "Watiwa", "wth": "Wathawurrung", "wti": "Berta", "wtk": "Watakataui", "wtm": "Mewati", "wtw": "Wotu", "wua": "Wikngenchera", "wub": "Wunambal", "wud": "Wudu", "wuh": "Wutunhua", "wul": "Silimo", "wum": "Wumbvu", "wun": "Bungu", "wur": "Wurrugu", "wut": "Wutung", "wuu": "Wu Chinese", "wuv": "Wuvulu-Aua", "wux": "Wulna", "wuy": "Wauyai", "wwa": "Waama", "wwb": "Wakabunga", "wwo": "Wetamut; Dorig", "wwr": "Warrwa", "www": "Wawa", "wxa": "Waxianghua", "wxw": "Wardandi", "wyb": "Wangaaybuwan-Ngiyambaa", "wyi": "Woiwurrung", "wym": "Wymysorys", "wyn": "Wyandot", "wyr": "Wayoró", "wyy": "Western Fijian", "xaa": "Andalusian Arabic", "xab": "Sambe", "xac": "Kachari", "xad": "Adai", "xae": "Aequian", "xag": "Aghwan", "xai": "Kaimbé", "xaj": "Ararandewára", "xak": "Máku", "xal": "Kalmyk; Oirat", "xam": "ǀXam", "xan": "Xamtanga", "xao": "Khao", "xap": "Apalachee", "xaq": "Aquitanian", "xar": "Karami", "xas": "Kamas", "xat": "Katawixi", "xau": "Kauwera", "xav": "Xavánte", "xaw": "Kawaiisu", "xay": "Kayan Mahakam", "xbb": "Lower Burdekin", "xbc": "Bactrian", "xbd": "Bindal", "xbe": "Bigambal", "xbg": "Bunganditj", "xbi": "Kombio", "xbj": "Birrpayi", "xbm": "Middle Breton", "xbn": "Kenaboi", "xbo": "Bolgarian", "xbp": "Bibbulman", "xbr": "Kambera", "xbw": "Kambiwá", "xby": "Batjala; Batyala", "xcb": "Cumbric", "xcc": "Camunic", "xce": "Celtiberian", "xcg": "Cisalpine Gaulish", "xch": "Chemakum; Chimakum", "xcl": "Classical Armenian", "xcm": "Comecrudo", "xcn": "Cotoname", "xco": "Chorasmian", "xcr": "Carian", "xct": "Classical Tibetan", "xcu": "Curonian", "xcv": "Chuvantsy", "xcw": "Coahuilteco", "xcy": "Cayuse", "xda": "Darkinyung", "xdc": "Dacian", "xdk": "Dharuk", "xdm": "Edomite", "xdo": "Kwandu", "xdq": "Kaitag", "xdy": "Malayic Dayak", "xeb": "Eblan", "xed": "Hdi", "xeg": "ǁXegwi", "xel": "Kelo", "xem": "Kembayan", "xep": "Epi-Olmec", "xer": "Xerénte", "xes": "Kesawai", "xet": "Xetá", "xeu": "Keoru-Ahia", "xfa": "Faliscan", "xga": "Galatian", "xgb": "Gbin", "xgd": "Gudang", "xgf": "Gabrielino-Fernandeño", "xgg": "Goreng", "xgi": "Garingbal", "xgl": "Galindan", "xgm": "Dharumbal; Guwinmal", "xgn": "Mongolian languages", "xgr": "Garza", "xgu": "Unggumi", "xgw": "Guwa", "xh": "Xhosa", "xha": "Harami", "xhc": "Hunnic", "xhd": "Hadrami", "xhe": "Khetrani", "xhm": "Middle Khmer (1400 to 1850 CE)", "xhr": "Hernican", "xht": "Hattic", "xhu": "Hurrian", "xhv": "Khua", "xib": "Iberian", "xii": "Xiri", "xil": "Illyrian", "xin": "Xinca", "xir": "Xiriâna", "xis": "Kisan", "xiv": "Indus Valley Language", "xiy": "Xipaya", "xjb": "Minjungbal", "xjt": "Jaitmatang", "xka": "Kalkoti", "xkb": "Northern Nago", "xkc": "Kho'ini", "xkd": "Mendalam Kayan", "xke": "Kereho", "xkf": "Khengkha", "xkg": "Kagoro", "xki": "Kenyan Sign Language", "xkj": "Kajali", "xkk": "Kachok; Kaco'", "xkl": "Mainstream Kenyah", "xkn": "Kayan River Kayan", "xko": "Kiorr", "xkp": "Kabatei", "xkq": "Koroni", "xkr": "Xakriabá", "xks": "Kumbewaha", "xkt": "Kantosi", "xku": "Kaamba", "xkv": "Kgalagadi", "xkw": "Kembra", "xkx": "Karore", "xky": "Uma' Lasan", "xkz": "Kurtokha", "xla": "Kamula", "xlb": "Loup B", "xlc": "Lycian", "xld": "Lydian", "xle": "Lemnian", "xlg": "Ligurian (Ancient)", "xli": "Liburnian", "xln": "Alanic", "xlo": "Loup A", "xlp": "Lepontic", "xls": "Lusitanian", "xlu": "Cuneiform Luwian", "xly": "Elymian", "xma": "Mushungulu", "xmb": "Mbonga", "xmc": "Makhuwa-Marrevone", "xmd": "Mbudum", "xme": "Median", "xmf": "Mingrelian", "xmg": "Mengaka", "xmh": "Kugu-Muminh", "xmj": "Majera", "xmk": "Ancient Macedonian", "xml": "Malaysian Sign Language", "xmm": "Manado Malay", "xmn": "Manichaean Middle Persian", "xmo": "Morerebi", "xmp": "Kuku-Mu'inh", "xmq": "Kuku-Mangk", "xmr": "Meroitic", "xms": "Moroccan Sign Language", "xmt": "Matbat", "xmu": "Kamu", "xmv": "Antankarana Malagasy; Tankarana Malagasy", "xmw": "Tsimihety Malagasy", "xmx": "Salawati; Maden", "xmy": "Mayaguduna", "xmz": "Mori Bawah", "xna": "Ancient North Arabian", "xnb": "Kanakanabu", "xnd": "Na-Dene languages", "xng": "Middle Mongolian", "xnh": "Kuanhua", "xni": "Ngarigu", "xnj": "Ngoni (Tanzania)", "xnk": "Nganakarti", "xnm": "Ngumbarl", "xnn": "Northern Kankanay", "xno": "Anglo-Norman", "xnq": "Ngoni (Mozambique)", "xnr": "Kangri", "xns": "Kanashi", "xnt": "Narragansett", "xnu": "Nukunul", "xny": "Nyiyaparli", "xnz": "Kenzi; Mattoki", "xoc": "O'chi'chi'", "xod": "Kokoda", "xog": "Soga", "xoi": "Kominimung", "xok": "Xokleng", "xom": "Komo (Sudan)", "xon": "Konkomba", "xoo": "Xukurú", "xop": "Kopar", "xor": "Korubo", "xow": "Kowaki", "xpa": "Pirriya", "xpb": "Northeastern Tasmanian; Pyemmairrener", "xpc": "Pecheneg", "xpd": "Oyster Bay Tasmanian", "xpe": "Liberia Kpelle", "xpf": "Southeast Tasmanian; Nuenonne", "xpg": "Phrygian", "xph": "North Midlands Tasmanian; Tyerrenoterpanner", "xpi": "Pictish", "xpj": "Mpalitjanh", "xpk": "Kulina Pano", "xpl": "Port Sorell Tasmanian", "xpm": "Pumpokol", "xpn": "Kapinawá", "xpo": "Pochutec", "xpp": "Puyo-Paekche", "xpq": "Mohegan-Pequot", "xpr": "Parthian", "xps": "Pisidian", "xpt": "Punthamara", "xpu": "Punic", "xpv": "Northern Tasmanian; Tommeginne", "xpw": "Northwestern Tasmanian; Peerapper", "xpx": "Southwestern Tasmanian; Toogee", "xpy": "Puyo", "xpz": "Bruny Island Tasmanian", "xqa": "Karakhanid", "xqt": "Qatabanian", "xra": "Krahô", "xrb": "Eastern Karaboro", "xrd": "Gundungurra", "xre": "Kreye", "xrg": "Minang", "xri": "Krikati-Timbira", "xrm": "Armazic", "xrn": "Arin", "xrr": "Raetic", "xrt": "Aranama-Tamique", "xru": "Marriammu", "xrw": "Karawa", "xsa": "Sabaean", "xsb": "Sambal", "xsc": "Scythian", "xsd": "Sidetic", "xse": "Sempan", "xsh": "Shamang", "xsi": "Sio", "xsj": "Subi", "xsl": "South Slavey", "xsm": "Kasem", "xsn": "Sanga (Nigeria)", "xso": "Solano", "xsp": "Silopi", "xsq": "Makhuwa-Saka", "xsr": "Sherpa", "xss": "Assan", "xsu": "Sanumá", "xsv": "Sudovian", "xsy": "Saisiyat", "xta": "Alcozauca Mixtec", "xtb": "Chazumba Mixtec", "xtc": "Katcha-Kadugli-Miri", "xtd": "Diuxi-Tilantongo Mixtec", "xte": "Ketengban", "xtg": "Transalpine Gaulish", "xth": "Yitha Yitha", "xti": "Sinicahua Mixtec", "xtj": "San Juan Teita Mixtec", "xtl": "Tijaltepec Mixtec", "xtm": "Magdalena Peñasco Mixtec", "xtn": "Northern Tlaxiaco Mixtec", "xto": "Tokharian A", "xtp": "San Miguel Piedras Mixtec", "xtq": "Tumshuqese", "xtr": "Early Tripuri", "xts": "Sindihui Mixtec", "xtt": "Tacahua Mixtec", "xtu": "Cuyamecalco Mixtec", "xtv": "Thawa", "xtw": "Tawandê", "xty": "Yoloxochitl Mixtec", "xua": "Alu Kurumba", "xub": "Betta Kurumba", "xud": "Umiida", "xug": "Kunigami", "xuj": "Jennu Kurumba", "xul": "Ngunawal; Nunukul", "xum": "Umbrian", "xun": "Unggaranggu", "xuo": "Kuo", "xup": "Upper Umpqua", "xur": "Urartian", "xut": "Kuthant", "xuu": "Kxoe; Khwedam", "xve": "Venetic", "xvi": "Kamviri", "xvn": "Vandalic", "xvo": "Volscian", "xvs": "Vestinian", "xwa": "Kwaza", "xwc": "Woccon", "xwd": "Wadi Wadi", "xwe": "Xwela Gbe", "xwg": "Kwegu", "xwj": "Wajuk", "xwk": "Wangkumara", "xwl": "Western Xwla Gbe", "xwo": "Written Oirat", "xwr": "Kwerba Mamberamo", "xwt": "Wotjobaluk", "xww": "Wemba Wemba", "xxb": "Boro (Ghana)", "xxk": "Ke'o", "xxm": "Minkin", "xxr": "Koropó", "xxt": "Tambora", "xya": "Yaygir", "xyb": "Yandjibara", "xyj": "Mayi-Yapi", "xyk": "Mayi-Kulan", "xyl": "Yalakalore", "xyt": "Mayi-Thakurti", "xyy": "Yorta Yorta", "xzh": "Zhang-Zhung", "xzm": "Zemgalian", "xzp": "Ancient Zapotec", "yaa": "Yaminahua", "yab": "Yuhup", "yac": "Pass Valley Yali", "yad": "Yagua", "yae": "Pumé", "yaf": "Yaka (Democratic Republic of Congo)", "yag": "Yámana", "yah": "Yazgulyam", "yai": "Yagnobi", "yaj": "Banda-Yangere", "yak": "Yakama", "yal": "Yalunka", "yam": "Yamba", "yan": "Mayangna", "yao": "Yao", "yap": "Yapese", "yaq": "Yaqui", "yar": "Yabarana", "yas": "Nugunu (Cameroon)", "yat": "Yambeta", "yau": "Yuwana", "yav": "Yangben", "yaw": "Yawalapití", "yax": "Yauma", "yay": "Agwagwune", "yaz": "Lokaa", "yba": "Yala", "ybb": "Yemba", "ybe": "West Yugur", "ybh": "Yakha", "ybi": "Yamphu", "ybj": "Hasha", "ybk": "Bokha", "ybl": "Yukuben", "ybm": "Yaben", "ybn": "Yabaâna", "ybo": "Yabong", "ybx": "Yawiyo", "yby": "Yaweyuha", "ych": "Chesu", "ycl": "Lolopo", "ycn": "Yucuna", "ycp": "Chepya", "yda": "Yanda", "ydd": "Eastern Yiddish", "yde": "Yangum Dey", "ydg": "Yidgha", "ydk": "Yoidik", "yea": "Ravula", "yec": "Yeniche", "yee": "Yimas", "yei": "Yeni", "yej": "Yevanic", "yel": "Yela", "yer": "Tarok", "yes": "Nyankpa", "yet": "Yetfa", "yeu": "Yerukula", "yev": "Yapunda", "yey": "Yeyi", "yga": "Malyangapa", "ygi": "Yiningayi", "ygl": "Yangum Gel", "ygm": "Yagomi", "ygp": "Gepo", "ygr": "Yagaria", "ygs": "Yolŋu Sign Language", "ygu": "Yugul", "ygw": "Yagwoia", "yha": "Baha Buyang", "yhd": "Judeo-Iraqi Arabic", "yhl": "Hlepho Phowa", "yhs": "Yan-nhaŋu Sign Language", "yi": "Yiddish", "yia": "Yinggarda", "yif": "Ache", "yig": "Wusa Nasu", "yih": "Western Yiddish", "yii": "Yidiny", "yij": "Yindjibarndi", "yik": "Dongshanba Lalo", "yil": "Yindjilandji", "yim": "Yimchungru Naga", "yin": "Riang Lai; Yinchia", "yip": "Pholo", "yiq": "Miqie", "yir": "North Awyu", "yis": "Yis", "yit": "Eastern Lalu", "yiu": "Awu", "yiv": "Northern Nisu", "yix": "Axi Yi", "yiz": "Azhe", "yka": "Yakan", "ykg": "Northern Yukaghir", "yki": "Yoke", "ykk": "Yakaikeke", "ykl": "Khlula", "ykm": "Kap", "ykn": "Kua-nsi", "yko": "Yasa", "ykr": "Yekora", "ykt": "Kathu", "yku": "Kuamasi", "yky": "Yakoma", "yla": "Yaul", "ylb": "Yaleba", "yle": "Yele", "ylg": "Yelogu", "yli": "Angguruk Yali", "yll": "Yil", "ylm": "Limi", "yln": "Langnian Buyang", "ylo": "Naluo Yi", "ylr": "Yalarnnga", "ylu": "Aribwaung", "yly": "Nyâlayu; Nyelâyu", "ymb": "Yambes", "ymc": "Southern Muji", "ymd": "Muda", "yme": "Yameo", "ymg": "Yamongeri", "ymh": "Mili", "ymi": "Moji", "ymk": "Makwe", "yml": "Iamalele", "ymm": "Maay", "ymn": "Yamna; Sunum", "ymo": "Yangum Mon", "ymp": "Yamap", "ymq": "Qila Muji", "ymr": "Malasar", "yms": "Mysian", "ymx": "Northern Muji", "ymz": "Muzi", "yna": "Aluo", "ynd": "Yandruwandha", "yne": "Lang'e", "yng": "Yango", "ynk": "Naukan Yupik", "ynl": "Yangulam", "ynn": "Yana", "yno": "Yong", "ynq": "Yendang", "yns": "Yansi", "ynu": "Yahuna", "yo": "Yoruba", "yob": "Yoba", "yog": "Yogad", "yoi": "Yonaguni", "yok": "Yokuts", "yol": "Yola", "yom": "Yombe", "yon": "Yongkom", "yot": "Yotti", "yox": "Yoron", "yoy": "Yoy", "ypa": "Phala", "ypb": "Labo Phowa", "ypg": "Phola", "yph": "Phupha", "ypk": "Yupik languages", "ypm": "Phuma", "ypn": "Ani Phowa", "ypo": "Alo Phola", "ypp": "Phupa", "ypz": "Phuza", "yra": "Yerakai", "yrb": "Yareba", "yre": "Yaouré", "yrk": "Nenets", "yrl": "Nhengatu", "yrm": "Yirrk-Mel", "yrn": "Yerong", "yro": "Yaroamë", "yrs": "Yarsun", "yrw": "Yarawata", "yry": "Yarluyandi", "ysc": "Yassic", "ysd": "Samatao", "ysg": "Sonaga", "ysl": "Yugoslavian Sign Language", "ysm": "Myanmar Sign Language", "ysn": "Sani", "yso": "Nisi (China)", "ysp": "Southern Lolopo", "ysr": "Sirenik Yupik", "yss": "Yessan-Mayo", "ysy": "Sanie", "yta": "Talu", "ytl": "Tanglang", "ytp": "Thopho", "ytw": "Yout Wam", "yty": "Yatay", "yua": "Yucateco; Yucatec Maya", "yub": "Yugambal", "yuc": "Yuchi", "yud": "Judeo-Tripolitanian Arabic", "yue": "Yue Chinese; Cantonese", "yuf": "Havasupai-Walapai-Yavapai", "yug": "Yug", "yui": "Yurutí", "yuj": "Karkar-Yuri", "yuk": "Yuki", "yul": "Yulu", "yum": "Quechan", "yun": "Bena (Nigeria)", "yup": "Yukpa", "yuq": "Yuqui", "yur": "Yurok", "yut": "Yopno", "yuw": "Yau (Morobe Province)", "yux": "Southern Yukaghir", "yuy": "East Yugur", "yuz": "Yuracare", "yva": "Yawa", "yvt": "Yavitero", "ywa": "Kalou", "ywg": "Yinhawangka", "ywl": "Western Lalu", "ywn": "Yawanawa", "ywq": "Wuding-Luquan Yi", "ywr": "Yawuru", "ywt": "Xishanba Lalo; Central Lalo", "ywu": "Wumeng Nasu", "yww": "Yawarawarga", "yxa": "Mayawali", "yxg": "Yagara", "yxl": "Yardliyawarra", "yxm": "Yinwum", "yxu": "Yuyu", "yxy": "Yabula Yabula", "yyr": "Yir Yoront", "yyu": "Yau (Sandaun Province)", "yyz": "Ayizi", "yzg": "E'ma Buyang", "yzk": "Zokhuo", "za": "Zhuang; Chuang", "zaa": "Sierra de Juárez Zapotec", "zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec", "zac": "Ocotlán Zapotec", "zad": "Cajonos Zapotec", "zae": "Yareni Zapotec", "zaf": "Ayoquesco Zapotec", "zag": "Zaghawa", "zah": "Zangwal", "zai": "Isthmus Zapotec", "zaj": "Zaramo", "zak": "Zanaki", "zal": "Zauzou", "zam": "Miahuatlán Zapotec", "zao": "Ozolotepec Zapotec", "zap": "Zapotec", "zaq": "Aloápam Zapotec", "zar": "Rincón Zapotec", "zas": "Santo Domingo Albarradas Zapotec", "zat": "Tabaa Zapotec", "zau": "Zangskari", "zav": "Yatzachi Zapotec", "zaw": "Mitla Zapotec", "zax": "Xadani Zapotec", "zay": "Zayse-Zergulla; Zaysete", "zaz": "Zari", "zba": "Balaibalan", "zbc": "Central Berawan", "zbe": "East Berawan", "zbl": "Blissymbols; Bliss; Blissymbolics", "zbt": "Batui", "zbu": "Bu (Bauchi State)", "zbw": "West Berawan", "zca": "Coatecas Altas Zapotec", "zcd": "Las Delicias Zapotec", "zch": "Central Hongshuihe Zhuang", "zdj": "Ngazidja Comorian", "zea": "Zeeuws", "zeg": "Zenag", "zeh": "Eastern Hongshuihe Zhuang", "zen": "Zenaga", "zga": "Kinga", "zgb": "Guibei Zhuang", "zgh": "Standard Moroccan Tamazight", "zgm": "Minz Zhuang", "zgn": "Guibian Zhuang", "zgr": "Magori", "zh": "Chinese", "zhb": "Zhaba", "zhd": "Dai Zhuang", "zhi": "Zhire", "zhn": "Nong Zhuang", "zhw": "Zhoa", "zhx": "Chinese (family)", "zia": "Zia", "zib": "Zimbabwe Sign Language", "zik": "Zimakani", "zil": "Zialo", "zim": "Mesme", "zin": "Zinza", "ziw": "Zigula", "ziz": "Zizilivakan", "zka": "Kaimbulawa", "zkb": "Koibal", "zkd": "Kadu", "zkg": "Koguryo", "zkh": "Khorezmian", "zkk": "Karankawa", "zkn": "Kanan", "zko": "Kott", "zkp": "São Paulo Kaingáng", "zkr": "Zakhring", "zkt": "Kitan", "zku": "Kaurna", "zkv": "Krevinian", "zkz": "Khazar", "zla": "Zula", "zle": "East Slavic languages", "zlj": "Liujiang Zhuang", "zlm": "Malay (individual language)", "zln": "Lianshan Zhuang", "zlq": "Liuqian Zhuang", "zls": "South Slavic languages", "zlw": "West Slavic languages", "zma": "Manda (Australia)", "zmb": "Zimba", "zmc": "Margany", "zmd": "Maridan", "zme": "Mangerr", "zmf": "Mfinu", "zmg": "Marti Ke", "zmh": "Makolkol", "zmi": "Negeri Sembilan Malay", "zmj": "Maridjabin", "zmk": "Mandandanyi", "zml": "Matngala", "zmm": "Marimanindji; Marramaninyshi", "zmn": "Mbangwe", "zmo": "Molo", "zmp": "Mpuono", "zmq": "Mituku", "zmr": "Maranunggu", "zms": "Mbesa", "zmt": "Maringarr", "zmu": "Muruwari", "zmv": "Mbariman-Gudhinma", "zmw": "Mbo (Democratic Republic of Congo)", "zmx": "Bomitaba", "zmy": "Mariyedi", "zmz": "Mbandja", "zna": "Zan Gula", "znd": "Zande languages", "zne": "Zande (individual language)", "zng": "Mang", "znk": "Manangkari", "zns": "Mangas", "zoc": "Copainalá Zoque", "zoh": "Chimalapa Zoque", "zom": "Zou", "zoo": "Asunción Mixtepec Zapotec", "zoq": "Tabasco Zoque", "zor": "Rayón Zoque", "zos": "Francisco León Zoque", "zpa": "Lachiguiri Zapotec", "zpb": "Yautepec Zapotec", "zpc": "Choapan Zapotec", "zpd": "Southeastern Ixtlán Zapotec", "zpe": "Petapa Zapotec", "zpf": "San Pedro Quiatoni Zapotec", "zpg": "Guevea De Humboldt Zapotec", "zph": "Totomachapan Zapotec", "zpi": "Santa María Quiegolani Zapotec", "zpj": "Quiavicuzas Zapotec", "zpk": "Tlacolulita Zapotec", "zpl": "Lachixío Zapotec", "zpm": "Mixtepec Zapotec", "zpn": "Santa Inés Yatzechi Zapotec", "zpo": "Amatlán Zapotec", "zpp": "El Alto Zapotec", "zpq": "Zoogocho Zapotec", "zpr": "Santiago Xanica Zapotec", "zps": "Coatlán Zapotec", "zpt": "San Vicente Coatlán Zapotec", "zpu": "Yalálag Zapotec", "zpv": "Chichicapan Zapotec", "zpw": "Zaniza Zapotec", "zpx": "San Baltazar Loxicha Zapotec", "zpy": "Mazaltepec Zapotec", "zpz": "Texmelucan Zapotec", "zqe": "Qiubei Zhuang", "zra": "Kara (Korea)", "zrg": "Mirgan", "zrn": "Zerenkel", "zro": "Záparo", "zrp": "Zarphatic", "zrs": "Mairasi", "zsa": "Sarasira", "zsk": "Kaskean", "zsl": "Zambian Sign Language", "zsm": "Standard Malay", "zsr": "Southern Rincon Zapotec", "zsu": "Sukurum", "zte": "Elotepec Zapotec", "ztg": "Xanaguía Zapotec", "ztl": "Lapaguía-Guivini Zapotec", "ztm": "San Agustín Mixtepec Zapotec", "ztn": "Santa Catarina Albarradas Zapotec", "ztp": "Loxicha Zapotec", "ztq": "Quioquitani-Quierí Zapotec", "zts": "Tilquiapan Zapotec", "ztt": "Tejalapan Zapotec", "ztu": "Güilá Zapotec", "ztx": "Zaachila Zapotec", "zty": "Yatee Zapotec", "zu": "Zulu", "zua": "Zeem", "zuh": "Tokano", "zum": "Kumzari", "zun": "Zuni", "zuy": "Zumaya", "zwa": "Zay", "zyb": "Yongbei Zhuang", "zyg": "Yang Zhuang", "zyj": "Youjiang Zhuang", "zyn": "Yongnan Zhuang", "zyp": "Zyphe Chin", "zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki", "zzj": "Zuojiang Zhuang" }
datasets/src/datasets/utils/resources/languages.json/0
{ "file_path": "datasets/src/datasets/utils/resources/languages.json", "repo_id": "datasets", "token_count": 111198 }
84
# ruff: noqa: F401 # This is the module that test_patching.py uses to test patch_submodule() import os import os as renamed_os from os import path from os import path as renamed_path from os.path import join from os.path import join as renamed_join open = open # we just need to have a builtin inside this module to test it properly
datasets/tests/_test_patching.py/0
{ "file_path": "datasets/tests/_test_patching.py", "repo_id": "datasets", "token_count": 95 }
85
import datetime from typing import List, Tuple from unittest import TestCase from unittest.mock import MagicMock, patch import numpy as np import pandas as pd import pyarrow as pa import pytest from datasets import Array2D from datasets.arrow_dataset import Dataset from datasets.features import Audio, ClassLabel, Features, Image, LargeList, Sequence, Value from datasets.features.features import ( _align_features, _arrow_to_datasets_dtype, _cast_to_python_objects, _check_if_features_can_be_aligned, _check_non_null_non_empty_recursive, _visit, cast_to_python_objects, decode_nested_example, encode_nested_example, generate_from_arrow_type, generate_from_dict, get_nested_type, require_decoding, require_storage_cast, require_storage_embed, string_to_arrow, ) from datasets.features.translation import Translation, TranslationVariableLanguages from datasets.info import DatasetInfo from datasets.utils.py_utils import asdict from ..utils import require_jax, require_numpy1_on_windows, require_tf, require_torch def list_with(item): return [item] class FeaturesTest(TestCase): def test_from_arrow_schema_simple(self): data = {"a": [{"b": {"c": "text"}}] * 10, "foo": [1] * 10} original_features = Features({"a": {"b": {"c": Value("string")}}, "foo": Value("int64")}) dset = Dataset.from_dict(data, features=original_features) new_features = dset.features new_dset = Dataset.from_dict(data, features=new_features) self.assertEqual(original_features.type, new_features.type) self.assertDictEqual(dset[0], new_dset[0]) self.assertDictEqual(dset[:], new_dset[:]) def test_from_arrow_schema_with_sequence(self): data = {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10} original_features = Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}) dset = Dataset.from_dict(data, features=original_features) new_features = dset.features new_dset = Dataset.from_dict(data, features=new_features) self.assertEqual(original_features.type, new_features.type) self.assertDictEqual(dset[0], new_dset[0]) self.assertDictEqual(dset[:], new_dset[:]) def test_string_to_arrow_bijection_for_primitive_types(self): supported_pyarrow_datatypes = [ pa.time32("s"), pa.time64("us"), pa.timestamp("s"), pa.timestamp("ns", tz="America/New_York"), pa.date32(), pa.date64(), pa.duration("s"), pa.decimal128(10, 2), pa.decimal256(40, -3), pa.string(), pa.int32(), pa.float64(), pa.array([datetime.time(1, 1, 1)]).type, # arrow type: DataType(time64[us]) ] for dt in supported_pyarrow_datatypes: self.assertEqual(dt, string_to_arrow(_arrow_to_datasets_dtype(dt))) unsupported_pyarrow_datatypes = [pa.list_(pa.float64())] for dt in unsupported_pyarrow_datatypes: with self.assertRaises(ValueError): string_to_arrow(_arrow_to_datasets_dtype(dt)) supported_datasets_dtypes = [ "time32[s]", "timestamp[ns]", "timestamp[ns, tz=+07:30]", "duration[us]", "decimal128(30, -4)", "int32", "float64", ] for sdt in supported_datasets_dtypes: self.assertEqual(sdt, _arrow_to_datasets_dtype(string_to_arrow(sdt))) unsupported_datasets_dtypes = [ "time32[ns]", "timestamp[blob]", "timestamp[[ns]]", "timestamp[ns, tz=[ns]]", "duration[[us]]", "decimal20(30, -4)", "int", ] for sdt in unsupported_datasets_dtypes: with self.assertRaises(ValueError): string_to_arrow(sdt) def test_categorical_one_way(self): # Categorical types (aka dictionary types) need special handling as there isn't a bijection categorical_type = pa.dictionary(pa.int32(), pa.string()) self.assertEqual("string", _arrow_to_datasets_dtype(categorical_type)) def test_feature_named_type(self): """reference: issue #1110""" features = Features({"_type": Value("string")}) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_feature_named_self_as_kwarg(self): """reference: issue #5641""" features = Features(self=Value("string")) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_class_label_feature_with_no_labels(self): """reference: issue #4681""" features = Features({"label": ClassLabel(names=[])}) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_reorder_fields_as(self): features = Features( { "id": Value("string"), "document": { "title": Value("string"), "url": Value("string"), "html": Value("string"), "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), }, "question": { "text": Value("string"), "tokens": Sequence(Value("string")), }, "annotations": Sequence( { "id": Value("string"), "long_answer": { "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), }, "short_answers": Sequence( { "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), "text": Value("string"), } ), "yes_no_answer": ClassLabel(names=["NO", "YES"]), } ), } ) other = Features( # same but with [] instead of sequences, and with a shuffled fields order { "id": Value("string"), "document": { "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), "title": Value("string"), "url": Value("string"), "html": Value("string"), }, "question": { "text": Value("string"), "tokens": [Value("string")], }, "annotations": { "yes_no_answer": [ClassLabel(names=["NO", "YES"])], "id": [Value("string")], "long_answer": [ { "end_byte": Value("int64"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), } ], "short_answers": [ Sequence( { "text": Value("string"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), } ) ], }, } ) expected = Features( { "id": Value("string"), "document": { "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), "title": Value("string"), "url": Value("string"), "html": Value("string"), }, "question": { "text": Value("string"), "tokens": Sequence(Value("string")), }, "annotations": Sequence( { "yes_no_answer": ClassLabel(names=["NO", "YES"]), "id": Value("string"), "long_answer": { "end_byte": Value("int64"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), }, "short_answers": Sequence( { "text": Value("string"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), } ), } ), } ) reordered_features = features.reorder_fields_as(other) self.assertDictEqual(reordered_features, expected) self.assertEqual(reordered_features.type, other.type) self.assertEqual(reordered_features.type, expected.type) self.assertNotEqual(reordered_features.type, features.type) def test_flatten(self): features = Features({"foo": {"bar1": Value("int32"), "bar2": {"foobar": Value("string")}}}) _features = features.copy() flattened_features = features.flatten() assert flattened_features == {"foo.bar1": Value("int32"), "foo.bar2.foobar": Value("string")} assert features == _features, "calling flatten shouldn't alter the current features" def test_flatten_with_sequence(self): features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})}) _features = features.copy() flattened_features = features.flatten() assert flattened_features == {"foo.bar": [{"my_value": Value("int32")}]} assert features == _features, "calling flatten shouldn't alter the current features" def test_features_dicts_are_synced(self): def assert_features_dicts_are_synced(features: Features): assert ( hasattr(features, "_column_requires_decoding") and features.keys() == features._column_requires_decoding.keys() ) features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})}) assert_features_dicts_are_synced(features) features["barfoo"] = Image() assert_features_dicts_are_synced(features) del features["barfoo"] assert_features_dicts_are_synced(features) features.update({"foobar": Value("string")}) assert_features_dicts_are_synced(features) features.pop("foobar") assert_features_dicts_are_synced(features) features.popitem() assert_features_dicts_are_synced(features) features.setdefault("xyz", Value("bool")) assert_features_dicts_are_synced(features) features.clear() assert_features_dicts_are_synced(features) def test_classlabel_init(tmp_path_factory): names = ["negative", "positive"] names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") with open(names_file, "w", encoding="utf-8") as f: f.write("\n".join(names)) classlabel = ClassLabel(names=names) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(names_file=names_file) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(num_classes=len(names), names=names) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(num_classes=len(names)) assert classlabel.names == [str(i) for i in range(len(names))] and classlabel.num_classes == len(names) with pytest.raises(ValueError): classlabel = ClassLabel(num_classes=len(names) + 1, names=names) with pytest.raises(ValueError): classlabel = ClassLabel(names=names, names_file=names_file) with pytest.raises(ValueError): classlabel = ClassLabel() with pytest.raises(TypeError): classlabel = ClassLabel(names=np.array(names)) def test_classlabel_str2int(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) for label in names: assert classlabel.str2int(label) == names.index(label) with pytest.raises(ValueError): classlabel.str2int("__bad_label_name__") with pytest.raises(ValueError): classlabel.str2int(1) with pytest.raises(ValueError): classlabel.str2int(None) def test_classlabel_int2str(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) for i in range(len(names)): assert classlabel.int2str(i) == names[i] with pytest.raises(ValueError): classlabel.int2str(len(names)) with pytest.raises(ValueError): classlabel.int2str(-1) with pytest.raises(ValueError): classlabel.int2str(None) def test_classlabel_cast_storage(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) # from integers arr = pa.array([0, 1, -1, -100], type=pa.int64()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1, -1, -100] arr = pa.array([0, 1, -1, -100], type=pa.int32()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1, -1, -100] arr = pa.array([3]) with pytest.raises(ValueError): classlabel.cast_storage(arr) # from strings arr = pa.array(["negative", "positive"]) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1] arr = pa.array(["__label_that_doesnt_exist__"]) with pytest.raises(ValueError): classlabel.cast_storage(arr) # from nulls arr = pa.array([None]) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [None] # from empty arr = pa.array([], pa.int64()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [] arr = pa.array([], pa.string()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [] @pytest.mark.parametrize("class_label_arg", ["names", "names_file"]) def test_class_label_to_and_from_dict(class_label_arg, tmp_path_factory): names = ["negative", "positive"] names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") with open(names_file, "w", encoding="utf-8") as f: f.write("\n".join(names)) if class_label_arg == "names": class_label = ClassLabel(names=names) elif class_label_arg == "names_file": class_label = ClassLabel(names_file=names_file) generated_class_label = generate_from_dict(asdict(class_label)) assert generated_class_label == class_label @pytest.mark.parametrize( "schema", [[Audio()], LargeList(Audio()), Sequence(Audio())], ) def test_decode_nested_example_with_list_types(schema, monkeypatch): mock_decode_example = MagicMock() monkeypatch.setattr(Audio, "decode_example", mock_decode_example) audio_example = {"path": "dummy_audio_path"} _ = decode_nested_example(schema, [audio_example]) assert mock_decode_example.called assert mock_decode_example.call_args.args[0] == audio_example @pytest.mark.parametrize( "schema", [[ClassLabel(names=["a", "b"])], LargeList(ClassLabel(names=["a", "b"])), Sequence(ClassLabel(names=["a", "b"]))], ) def test_encode_nested_example_with_list_types(schema): result = encode_nested_example(schema, ["b"]) assert result == [1] @pytest.mark.parametrize("inner_type", [Value("int32"), {"subcolumn": Value("int32")}]) def test_encode_nested_example_sequence_with_none(inner_type): schema = Sequence(inner_type) obj = None result = encode_nested_example(schema, obj) assert result is None @pytest.mark.parametrize( "features_dict, example, expected_encoded_example", [ ({"col_1": ClassLabel(names=["a", "b"])}, {"col_1": "b"}, {"col_1": 1}), ({"col_1": [ClassLabel(names=["a", "b"])]}, {"col_1": ["b"]}, {"col_1": [1]}), ({"col_1": LargeList(ClassLabel(names=["a", "b"]))}, {"col_1": ["b"]}, {"col_1": [1]}), ({"col_1": Sequence(ClassLabel(names=["a", "b"]))}, {"col_1": ["b"]}, {"col_1": [1]}), ], ) def test_encode_example(features_dict, example, expected_encoded_example): features = Features(features_dict) encoded_example = features.encode_example(example) assert encoded_example == expected_encoded_example def test_encode_batch_with_example_with_empty_first_elem(): features = Features( { "x": Sequence(Sequence(ClassLabel(names=["a", "b"]))), } ) encoded_batch = features.encode_batch( { "x": [ [["a"], ["b"]], [[], ["b"]], ] } ) assert encoded_batch == {"x": [[[0], [1]], [[], [1]]]} def test_encode_column_dict_with_none(): features = Features( { "x": {"a": ClassLabel(names=["a", "b"]), "b": Value("int32")}, } ) encoded_column = features.encode_column([{"a": "a", "b": 1}, None], "x") assert encoded_column == [{"a": 0, "b": 1}, None] @pytest.mark.parametrize( "feature", [ Value("int32"), ClassLabel(num_classes=2), Translation(languages=["en", "fr"]), TranslationVariableLanguages(languages=["en", "fr"]), ], ) def test_dataset_feature_with_none(feature): data = {"col": [None]} features = Features({"col": feature}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"col"} assert item["col"] is None batch = dset[:1] assert len(batch) == 1 assert batch.keys() == {"col"} assert isinstance(batch["col"], list) and all(item is None for item in batch["col"]) column = dset["col"] assert len(column) == 1 assert isinstance(column, list) and all(item is None for item in column) # nested tests data = {"col": [[None]]} features = Features({"col": Sequence(feature)}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"col"} assert all(i is None for i in item["col"]) data = {"nested": [{"col": None}]} features = Features({"nested": {"col": feature}}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"nested"} assert item["nested"].keys() == {"col"} assert item["nested"]["col"] is None def iternumpy(key1, value1, value2): if value1.dtype != value2.dtype: # check only for dtype raise AssertionError( f"dtype of '{key1}' key for casted object: {value1.dtype} and expected object: {value2.dtype} not matching" ) def dict_diff(d1: dict, d2: dict): # check if 2 dictionaries are equal np.testing.assert_equal(d1, d2) # sanity check if dict values are equal or not for (k1, v1), (k2, v2) in zip(d1.items(), d2.items()): # check if their values have same dtype or not if isinstance(v1, dict): # nested dictionary case dict_diff(v1, v2) elif isinstance(v1, np.ndarray): # checks if dtype and value of np.ndarray is equal iternumpy(k1, v1, v2) elif isinstance(v1, list): for element1, element2 in zip(v1, v2): # iterates over all elements of list if isinstance(element1, dict): dict_diff(element1, element2) elif isinstance(element1, np.ndarray): iternumpy(k1, element1, element2) class CastToPythonObjectsTest(TestCase): def test_cast_to_python_objects_list(self): obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_tuple(self): obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} expected_obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_or_numpy(self): obj = {"col_1": [{"vec": np.arange(1, 4), "txt": "foo"}] * 3, "col_2": np.arange(1, 7).reshape(3, 2)} expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) def test_cast_to_python_objects_series(self): obj = { "col_1": pd.Series([{"vec": [1, 2, 3], "txt": "foo"}] * 3), "col_2": pd.Series([[1, 2], [3, 4], [5, 6]]), } expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_dataframe(self): obj = pd.DataFrame({"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}) expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_pandas_timestamp(self): obj = pd.Timestamp(2020, 1, 1) expected_obj = obj.to_pydatetime() casted_obj = cast_to_python_objects(obj) self.assertEqual(casted_obj, expected_obj) casted_obj = cast_to_python_objects(pd.Series([obj])) self.assertListEqual(casted_obj, [expected_obj]) casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) self.assertDictEqual(casted_obj, {"a": [expected_obj]}) def test_cast_to_python_objects_pandas_timedelta(self): obj = pd.Timedelta(seconds=1) expected_obj = obj.to_pytimedelta() casted_obj = cast_to_python_objects(obj) self.assertEqual(casted_obj, expected_obj) casted_obj = cast_to_python_objects(pd.Series([obj])) self.assertListEqual(casted_obj, [expected_obj]) casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) self.assertDictEqual(casted_obj, {"a": [expected_obj]}) @require_numpy1_on_windows @require_torch def test_cast_to_python_objects_torch(self): import torch obj = { "col_1": [{"vec": torch.tensor(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": torch.tensor(np.arange(1, 7).reshape(3, 2)), } expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @require_tf def test_cast_to_python_objects_tf(self): import tensorflow as tf obj = { "col_1": [{"vec": tf.constant(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": tf.constant(np.arange(1, 7).reshape(3, 2)), } expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @require_jax def test_cast_to_python_objects_jax(self): import jax.numpy as jnp obj = { "col_1": [{"vec": jnp.array(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": jnp.array(np.arange(1, 7).reshape(3, 2)), } assert obj["col_2"].dtype == jnp.int32 expected_obj = { "col_1": [{"vec": np.array([1, 2, 3], dtype=np.int32), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @patch("datasets.features.features._cast_to_python_objects", side_effect=_cast_to_python_objects) def test_dont_iterate_over_each_element_in_a_list(self, mocked_cast): obj = {"col_1": [[1, 2], [3, 4], [5, 6]]} cast_to_python_objects(obj) self.assertEqual(mocked_cast.call_count, 4) # 4 = depth of obj SIMPLE_FEATURES = [ Features(), Features({"a": Value("int32")}), Features({"a": Value("int32", id="my feature")}), Features({"a": Value("int32"), "b": Value("float64"), "c": Value("string")}), ] CUSTOM_FEATURES = [ Features({"label": ClassLabel(names=["negative", "positive"])}), Features({"array": Array2D(dtype="float32", shape=(4, 4))}), Features({"image": Image()}), Features({"audio": Audio()}), Features({"image": Image(decode=False)}), Features({"audio": Audio(decode=False)}), Features({"translation": Translation(["en", "fr"])}), Features({"translation": TranslationVariableLanguages(["en", "fr"])}), ] NESTED_FEATURES = [ Features({"foo": {}}), Features({"foo": {"bar": Value("int32")}}), Features({"foo": {"bar1": Value("int32"), "bar2": Value("float64")}}), Features({"foo": Sequence(Value("int32"))}), Features({"foo": Sequence({})}), Features({"foo": Sequence({"bar": Value("int32")})}), Features({"foo": [Value("int32")]}), Features({"foo": [{"bar": Value("int32")}]}), Features({"foo": LargeList(Value("int32"))}), Features({"foo": LargeList({"bar": Value("int32")})}), ] NESTED_CUSTOM_FEATURES = [ Features({"foo": {"bar": ClassLabel(names=["negative", "positive"])}}), Features({"foo": Sequence(ClassLabel(names=["negative", "positive"]))}), Features({"foo": Sequence({"bar": ClassLabel(names=["negative", "positive"])})}), Features({"foo": [ClassLabel(names=["negative", "positive"])]}), Features({"foo": [{"bar": ClassLabel(names=["negative", "positive"])}]}), Features({"foo": LargeList(ClassLabel(names=["negative", "positive"]))}), Features({"foo": LargeList({"bar": ClassLabel(names=["negative", "positive"])})}), ] @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_dict_and_from_dict_round_trip(features: Features): features_dict = features.to_dict() assert isinstance(features_dict, dict) reloaded = Features.from_dict(features_dict) assert features == reloaded @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_yaml_list(features: Features): features_yaml_list = features._to_yaml_list() assert isinstance(features_yaml_list, list) reloaded = Features._from_yaml_list(features_yaml_list) assert features == reloaded @pytest.mark.parametrize( "features_dict, expected_features_dict", [ ({"col": [{"sub_col": Value("int32")}]}, {"col": [{"sub_col": Value("int32")}]}), ({"col": LargeList({"sub_col": Value("int32")})}, {"col": LargeList({"sub_col": Value("int32")})}), ({"col": Sequence({"sub_col": Value("int32")})}, {"col.sub_col": Sequence(Value("int32"))}), ], ) def test_features_flatten_with_list_types(features_dict, expected_features_dict): features = Features(features_dict) flattened_features = features.flatten() assert flattened_features == Features(expected_features_dict) @pytest.mark.parametrize( "deserialized_features_dict, expected_features_dict", [ ( {"col": [{"dtype": "int32", "_type": "Value"}]}, {"col": [Value("int32")]}, ), ( {"col": {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "LargeList"}}, {"col": LargeList(Value("int32"))}, ), ( {"col": {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "Sequence"}}, {"col": Sequence(Value("int32"))}, ), ( {"col": [{"sub_col": {"dtype": "int32", "_type": "Value"}}]}, {"col": [{"sub_col": Value("int32")}]}, ), ( {"col": {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "LargeList"}}, {"col": LargeList({"sub_col": Value("int32")})}, ), ( {"col": {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "Sequence"}}, {"col": Sequence({"sub_col": Value("int32")})}, ), ], ) def test_features_from_dict_with_list_types(deserialized_features_dict, expected_features_dict): features = Features.from_dict(deserialized_features_dict) assert features == Features(expected_features_dict) @pytest.mark.parametrize( "deserialized_feature_dict, expected_feature", [ ( [{"dtype": "int32", "_type": "Value"}], [Value("int32")], ), ( {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "LargeList"}, LargeList(Value("int32")), ), ( {"feature": {"dtype": "int32", "_type": "Value"}, "_type": "Sequence"}, Sequence(Value("int32")), ), ( [{"sub_col": {"dtype": "int32", "_type": "Value"}}], [{"sub_col": Value("int32")}], ), ( {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "LargeList"}, LargeList({"sub_col": Value("int32")}), ), ( {"feature": {"sub_col": {"dtype": "int32", "_type": "Value"}}, "_type": "Sequence"}, Sequence({"sub_col": Value("int32")}), ), ], ) def test_generate_from_dict_with_list_types(deserialized_feature_dict, expected_feature): feature = generate_from_dict(deserialized_feature_dict) assert feature == expected_feature @pytest.mark.parametrize( "features_dict, expected_features_yaml_list", [ ({"col": LargeList(Value("int32"))}, [{"name": "col", "large_list": "int32"}]), ( {"col": LargeList({"sub_col": Value("int32")})}, [{"name": "col", "large_list": [{"dtype": "int32", "name": "sub_col"}]}], ), ], ) def test_features_to_yaml_list_with_large_list(features_dict, expected_features_yaml_list): features = Features(features_dict) features_yaml_list = features._to_yaml_list() assert features_yaml_list == expected_features_yaml_list @pytest.mark.parametrize( "features_yaml_list, expected_features_dict", [ ([{"name": "col", "large_list": "int32"}], {"col": LargeList(Value("int32"))}), ( [{"name": "col", "large_list": [{"dtype": "int32", "name": "sub_col"}]}], {"col": LargeList({"sub_col": Value("int32")})}, ), ], ) def test_features_from_yaml_list_with_large_list(features_yaml_list, expected_features_dict): features = Features._from_yaml_list(features_yaml_list) assert features == Features(expected_features_dict) @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_arrow_schema(features: Features): arrow_schema = features.arrow_schema assert isinstance(arrow_schema, pa.Schema) reloaded = Features.from_arrow_schema(arrow_schema) assert features == reloaded NESTED_COMPARISON = [ [ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})], [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})], ], [ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="null", id=None)})], [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})], ], [ [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="string", id=None)}}), ], [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="string", id=None)}}), ], ], [ [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="null", id=None)}}), ], [ Features({"speaker": {"email": Value(dtype="string", id=None)}}), Features({"speaker": {"email": Value(dtype="string", id=None)}}), ], ], ] @pytest.mark.parametrize("features", NESTED_COMPARISON) def test_features_alignment(features: Tuple[List[Features], Features]): inputs, expected = features _check_if_features_can_be_aligned(inputs) # Check that we can align, will raise otherwise. assert _align_features(inputs) == expected @pytest.mark.parametrize("dtype", [pa.int32, pa.string]) def test_features_from_arrow_schema_primitive_data_type(dtype): schema = pa.schema([("column_name", dtype())]) assert schema == Features.from_arrow_schema(schema).arrow_schema @pytest.mark.parametrize("scalar_dtype", [pa.int32, pa.string]) @pytest.mark.parametrize("list_dtype", [pa.list_, pa.large_list]) def test_features_from_arrow_schema_list_data_type(list_dtype, scalar_dtype): schema = pa.schema([("column_name", list_dtype(scalar_dtype()))]) assert schema == Features.from_arrow_schema(schema).arrow_schema @pytest.mark.parametrize( "feature, other_feature", [ ([Value("int64")], [Value("int64")]), (LargeList(Value("int64")), LargeList(Value("int64"))), (Sequence(Value("int64")), Sequence(Value("int64"))), ( [{"sub_col_1": Value("int64"), "sub_col_2": Value("int64")}], [{"sub_col_2": Value("int64"), "sub_col_1": Value("int64")}], ), ( LargeList({"sub_col_1": Value("int64"), "sub_col_2": Value("int64")}), LargeList({"sub_col_2": Value("int64"), "sub_col_1": Value("int64")}), ), ( Sequence({"sub_col_1": Value("int64"), "sub_col_2": Value("int64")}), Sequence({"sub_col_2": Value("int64"), "sub_col_1": Value("int64")}), ), ], ) def test_features_reorder_fields_as_with_list_types(feature, other_feature): features = Features({"col": feature}) other_features = Features({"col": other_feature}) new_features = features.reorder_fields_as(other_features) assert new_features.type == other_features.type @pytest.mark.parametrize( "feature, expected_arrow_data_type", [(Value("int64"), pa.int64), (Value("string"), pa.string)] ) def test_get_nested_type_with_scalar_feature(feature, expected_arrow_data_type): arrow_data_type = get_nested_type(feature) assert arrow_data_type == expected_arrow_data_type() @pytest.mark.parametrize( "scalar_feature, expected_arrow_primitive_data_type", [(Value("int64"), pa.int64), (Value("string"), pa.string)] ) @pytest.mark.parametrize( "list_feature, expected_arrow_nested_data_type", [(list_with, pa.list_), (LargeList, pa.large_list), (Sequence, pa.list_)], ) def test_get_nested_type_with_list_feature( list_feature, expected_arrow_nested_data_type, scalar_feature, expected_arrow_primitive_data_type ): feature = list_feature(scalar_feature) arrow_data_type = get_nested_type(feature) assert arrow_data_type == expected_arrow_nested_data_type(expected_arrow_primitive_data_type()) @pytest.mark.parametrize( "arrow_primitive_data_type, expected_feature", [(pa.int32, Value("int32")), (pa.string, Value("string"))] ) def test_generate_from_arrow_type_with_arrow_primitive_data_type(arrow_primitive_data_type, expected_feature): arrow_data_type = arrow_primitive_data_type() feature = generate_from_arrow_type(arrow_data_type) assert feature == expected_feature @pytest.mark.parametrize( "arrow_primitive_data_type, expected_scalar_feature", [(pa.int32, Value("int32")), (pa.string, Value("string"))] ) @pytest.mark.parametrize( "arrow_nested_data_type, expected_list_feature", [(pa.list_, Sequence), (pa.large_list, LargeList)] ) def test_generate_from_arrow_type_with_arrow_nested_data_type( arrow_nested_data_type, expected_list_feature, arrow_primitive_data_type, expected_scalar_feature ): arrow_data_type = arrow_nested_data_type(arrow_primitive_data_type()) feature = generate_from_arrow_type(arrow_data_type) expected_feature = expected_list_feature(expected_scalar_feature) assert feature == expected_feature @pytest.mark.parametrize( "schema", [[ClassLabel(names=["a", "b"])], LargeList(ClassLabel(names=["a", "b"])), Sequence(ClassLabel(names=["a", "b"]))], ) def test_check_non_null_non_empty_recursive_with_list_types(schema): assert _check_non_null_non_empty_recursive([], schema) is False @pytest.mark.parametrize( "schema", [ [[ClassLabel(names=["a", "b"])]], LargeList(LargeList(ClassLabel(names=["a", "b"]))), Sequence(Sequence(ClassLabel(names=["a", "b"]))), ], ) def test_check_non_null_non_empty_recursive_with_nested_list_types(schema): assert _check_non_null_non_empty_recursive([[]], schema) is False @pytest.mark.parametrize("feature", [[Audio()], LargeList(Audio()), Sequence(Audio())]) def test_require_decoding_with_list_types(feature): assert require_decoding(feature) @pytest.mark.parametrize("feature", [[Audio()], LargeList(Audio()), Sequence(Audio())]) def test_require_storage_cast_with_list_types(feature): assert require_storage_cast(feature) @pytest.mark.parametrize("feature", [[Audio()], LargeList(Audio()), Sequence(Audio())]) def test_require_storage_embed_with_list_types(feature): assert require_storage_embed(feature) @pytest.mark.parametrize( "feature, expected", [([Value("int32")], [1]), (LargeList(Value("int32")), LargeList(1)), (Sequence(Value("int32")), Sequence(1))], ) def test_visit_with_list_types(feature, expected): def func(x): return 1 if isinstance(x, Value) else x result = _visit(feature, func) assert result == expected
datasets/tests/features/test_features.py/0
{ "file_path": "datasets/tests/features/test_features.py", "repo_id": "datasets", "token_count": 18204 }
86
import pyarrow as pa import pytest from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.arrow.arrow import Arrow, ArrowConfig @pytest.fixture def arrow_file_streaming_format(tmp_path): filename = tmp_path / "stream.arrow" testdata = [[1, 1, 1], [0, 100, 6], [1, 90, 900]] schema = pa.schema([pa.field("input_ids", pa.list_(pa.int32()))]) array = pa.array(testdata, type=pa.list_(pa.int32())) table = pa.Table.from_arrays([array], schema=schema) with open(filename, "wb") as f: with pa.ipc.new_stream(f, schema) as writer: writer.write_table(table) return str(filename) @pytest.fixture def arrow_file_file_format(tmp_path): filename = tmp_path / "file.arrow" testdata = [[1, 1, 1], [0, 100, 6], [1, 90, 900]] schema = pa.schema([pa.field("input_ids", pa.list_(pa.int32()))]) array = pa.array(testdata, type=pa.list_(pa.int32())) table = pa.Table.from_arrays([array], schema=schema) with open(filename, "wb") as f: with pa.ipc.new_file(f, schema) as writer: writer.write_table(table) return str(filename) @pytest.mark.parametrize( "file_fixture, config_kwargs", [ ("arrow_file_streaming_format", {}), ("arrow_file_file_format", {}), ], ) def test_arrow_generate_tables(file_fixture, config_kwargs, request): arrow = Arrow(**config_kwargs) generator = arrow._generate_tables([[request.getfixturevalue(file_fixture)]]) pa_table = pa.concat_tables([table for _, table in generator]) expected = {"input_ids": [[1, 1, 1], [0, 100, 6], [1, 90, 900]]} assert pa_table.to_pydict() == expected def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = ArrowConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = ArrowConfig(name="name", data_files=data_files)
datasets/tests/packaged_modules/test_arrow.py/0
{ "file_path": "datasets/tests/packaged_modules/test_arrow.py", "repo_id": "datasets", "token_count": 873 }
87
import importlib import os import tempfile import types from contextlib import nullcontext as does_not_raise from multiprocessing import Process from pathlib import Path from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from multiprocess.pool import Pool from datasets.arrow_dataset import Dataset from datasets.arrow_writer import ArrowWriter from datasets.builder import ( ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder, InvalidConfigName, ) from datasets.data_files import DataFilesList from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.download.download_manager import DownloadMode from datasets.features import Features, Value from datasets.info import DatasetInfo, PostProcessedInfo from datasets.iterable_dataset import IterableDataset from datasets.load import configure_builder_class from datasets.splits import Split, SplitDict, SplitGenerator, SplitInfo from datasets.streaming import xjoin from datasets.utils.file_utils import is_local_path from datasets.utils.info_utils import VerificationMode from datasets.utils.logging import INFO, get_logger from .utils import ( assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_faiss, set_current_working_directory_to_temp_dir, ) class DummyBuilder(DatasetBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _prepare_split(self, split_generator, **kwargs): fname = f"{self.dataset_name}-{split_generator.name}.arrow" with ArrowWriter(features=self.info.features, path=os.path.join(self._output_dir, fname)) as writer: writer.write_batch({"text": ["foo"] * 100}) num_examples, num_bytes = writer.finalize() split_generator.split_info.num_examples = num_examples split_generator.split_info.num_bytes = num_bytes class DummyGeneratorBasedBuilder(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"text": "foo"} class DummyArrowBasedBuilder(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_tables(self): for i in range(10): yield i, pa.table({"text": ["foo"] * 10}) class DummyGeneratorBasedBuilderWithIntegers(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"id": i} class DummyGeneratorBasedBuilderConfig(BuilderConfig): def __init__(self, content="foo", times=2, *args, **kwargs): super().__init__(*args, **kwargs) self.content = content self.times = times class DummyGeneratorBasedBuilderWithConfig(GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = DummyGeneratorBasedBuilderConfig def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"text": self.config.content * self.config.times} class DummyBuilderWithMultipleConfigs(DummyBuilder): BUILDER_CONFIGS = [ DummyGeneratorBasedBuilderConfig(name="a"), DummyGeneratorBasedBuilderConfig(name="b"), ] class DummyBuilderWithDefaultConfig(DummyBuilderWithMultipleConfigs): DEFAULT_CONFIG_NAME = "a" class DummyBuilderWithDownload(DummyBuilder): def __init__(self, *args, rel_path=None, abs_path=None, **kwargs): super().__init__(*args, **kwargs) self._rel_path = rel_path self._abs_path = abs_path def _split_generators(self, dl_manager): if self._rel_path is not None: assert os.path.exists(dl_manager.download(self._rel_path)), "dl_manager must support relative paths" if self._abs_path is not None: assert os.path.exists(dl_manager.download(self._abs_path)), "dl_manager must support absolute paths" return [SplitGenerator(name=Split.TRAIN)] class DummyBuilderWithManualDownload(DummyBuilderWithMultipleConfigs): @property def manual_download_instructions(self): return "To use the dataset you have to download some stuff manually and pass the data path to data_dir" def _split_generators(self, dl_manager): if not os.path.exists(self.config.data_dir): raise FileNotFoundError(f"data_dir {self.config.data_dir} doesn't exist.") return [SplitGenerator(name=Split.TRAIN)] class DummyArrowBasedBuilderWithShards(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] def _generate_tables(self, filepaths): idx = 0 for filepath in filepaths: for i in range(10): yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) idx += 1 class DummyGeneratorBasedBuilderWithShards(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] def _generate_examples(self, filepaths): idx = 0 for filepath in filepaths: for i in range(100): yield idx, {"id": i, "filepath": filepath} idx += 1 class DummyArrowBasedBuilderWithAmbiguousShards(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator( name=Split.TRAIN, gen_kwargs={ "filepaths": [f"data{i}.txt" for i in range(4)], "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], }, ) ] def _generate_tables(self, filepaths, dummy_kwarg_with_different_length): idx = 0 for filepath in filepaths: for i in range(10): yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) idx += 1 class DummyGeneratorBasedBuilderWithAmbiguousShards(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator( name=Split.TRAIN, gen_kwargs={ "filepaths": [f"data{i}.txt" for i in range(4)], "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], }, ) ] def _generate_examples(self, filepaths, dummy_kwarg_with_different_length): idx = 0 for filepath in filepaths: for i in range(100): yield idx, {"id": i, "filepath": filepath} idx += 1 def _run_concurrent_download_and_prepare(tmp_dir): builder = DummyBuilder(cache_dir=tmp_dir) builder.download_and_prepare(download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) return builder def check_streaming(builder): builders_module = importlib.import_module(builder.__module__) assert builders_module._patched_for_streaming assert builders_module.os.path.join is xjoin class BuilderTest(TestCase): def test_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def test_download_and_prepare_checksum_computation(self): with tempfile.TemporaryDirectory() as tmp_dir: builder_no_verification = DummyBuilder(cache_dir=tmp_dir) builder_no_verification.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( all(v["checksum"] is not None for _, v in builder_no_verification.info.download_checksums.items()) ) builder_with_verification = DummyBuilder(cache_dir=tmp_dir) builder_with_verification.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.ALL_CHECKS, ) self.assertTrue( all(v["checksum"] is None for _, v in builder_with_verification.info.download_checksums.items()) ) def test_concurrent_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: processes = 2 with Pool(processes=processes) as pool: jobs = [ pool.apply_async(_run_concurrent_download_and_prepare, kwds={"tmp_dir": tmp_dir}) for _ in range(processes) ] builders = [job.get() for job in jobs] for builder in builders: self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists( os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json") ) ) def test_download_and_prepare_with_base_path(self): with tempfile.TemporaryDirectory() as tmp_dir: rel_path = "dummy1.data" abs_path = os.path.join(tmp_dir, "dummy2.data") # test relative path is missing builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path) with self.assertRaises(FileNotFoundError): builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir) # test absolute path is missing builder = DummyBuilderWithDownload(cache_dir=tmp_dir, abs_path=abs_path) with self.assertRaises(FileNotFoundError): builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir) # test that they are both properly loaded when they exist open(os.path.join(tmp_dir, rel_path), "w") open(abs_path, "w") builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path, abs_path=abs_path) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) def test_as_dataset_with_post_process(self): def _post_process(self, dataset, resources_paths): def char_tokenize(example): return {"tokens": list(example["text"])} return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) def _post_processing_resources(self, split): return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.info.post_processed = PostProcessedInfo( features=Features({"text": Value("string"), "tokens": [Value("string")]}) ) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"tokenized_dataset-{split}.arrow"), features=Features({"text": Value("string"), "tokens": [Value("string")]}), ) as writer: writer.write_batch({"text": ["foo"] * 10, "tokens": [list("foo")] * 10}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 10) self.assertEqual(len(dsets["test"]), 10) self.assertDictEqual( dsets["train"].features, Features({"text": Value("string"), "tokens": [Value("string")]}) ) self.assertDictEqual( dsets["test"].features, Features({"text": Value("string"), "tokens": [Value("string")]}) ) self.assertListEqual(dsets["train"].column_names, ["text", "tokens"]) self.assertListEqual(dsets["test"].column_names, ["text", "tokens"]) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) self.assertListEqual(dset.column_names, ["text", "tokens"]) self.assertGreater(builder.info.post_processing_size, 0) self.assertGreater( builder.info.post_processed.resources_checksums["train"]["tokenized_dataset"]["num_bytes"], 0 ) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 13) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) self.assertListEqual(dset.column_names, ["text", "tokens"]) del dset dset = builder.as_dataset("all") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test") self.assertEqual(len(dset), 20) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) self.assertListEqual(dset.column_names, ["text", "tokens"]) del dset def _post_process(self, dataset, resources_paths): return dataset.select([0, 1], keep_in_memory=True) with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 2}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 2) self.assertEqual(len(dsets["test"]), 2) self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) self.assertListEqual(dsets["train"].column_names, ["text"]) self.assertListEqual(dsets["test"].column_names, ["text"]) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 2) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 2) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) del dset @require_faiss def test_as_dataset_with_post_process_with_index(self): def _post_process(self, dataset, resources_paths): if os.path.exists(resources_paths["index"]): dataset.load_faiss_index("my_index", resources_paths["index"]) return dataset else: dataset.add_faiss_index_from_external_arrays( external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" ) dataset.save_faiss_index("my_index", resources_paths["index"]) return dataset def _post_processing_resources(self, split): return {"index": f"Flat-{split}.faiss"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 2}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 10) self.assertEqual(len(dsets["test"]), 10) self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) self.assertListEqual(dsets["train"].column_names, ["text"]) self.assertListEqual(dsets["test"].column_names, ["text"]) self.assertListEqual(dsets["train"].list_indexes(), ["my_index"]) self.assertListEqual(dsets["test"].list_indexes(), ["my_index"]) self.assertGreater(builder.info.post_processing_size, 0) self.assertGreater(builder.info.post_processed.resources_checksums["train"]["index"]["num_bytes"], 0) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) self.assertListEqual(dset.list_indexes(), ["my_index"]) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 13) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) self.assertListEqual(dset.list_indexes(), ["my_index"]) del dset def test_download_and_prepare_with_post_process(self): def _post_process(self, dataset, resources_paths): def char_tokenize(example): return {"tokens": list(example["text"])} return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) def _post_processing_resources(self, split): return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.info.post_processed = PostProcessedInfo( features=Features({"text": Value("string"), "tokens": [Value("string")]}) ) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertDictEqual( builder.info.post_processed.features, Features({"text": Value("string"), "tokens": [Value("string")]}), ) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def _post_process(self, dataset, resources_paths): return dataset.select([0, 1], keep_in_memory=True) with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertIsNone(builder.info.post_processed) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def _post_process(self, dataset, resources_paths): if os.path.exists(resources_paths["index"]): dataset.load_faiss_index("my_index", resources_paths["index"]) return dataset else: dataset = dataset.add_faiss_index_from_external_arrays( external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" ) dataset.save_faiss_index("my_index", resources_paths["index"]) return dataset def _post_processing_resources(self, split): return {"index": f"Flat-{split}.faiss"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertIsNone(builder.info.post_processed) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def test_error_download_and_prepare(self): def _prepare_split(self, split_generator, **kwargs): raise ValueError() with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._prepare_split = types.MethodType(_prepare_split, builder) self.assertRaises( ValueError, builder.download_and_prepare, download_mode=DownloadMode.FORCE_REDOWNLOAD, ) self.assertRaises(FileNotFoundError, builder.as_dataset) def test_generator_based_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) # Test that duplicated keys are ignored if verification_mode is "no_checks" with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) with patch("datasets.builder.ArrowWriter", side_effect=ArrowWriter) as mock_arrow_writer: builder.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS ) mock_arrow_writer.assert_called_once() args, kwargs = mock_arrow_writer.call_args_list[0] self.assertFalse(kwargs["check_duplicates"]) mock_arrow_writer.reset_mock() builder.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.BASIC_CHECKS ) mock_arrow_writer.assert_called_once() args, kwargs = mock_arrow_writer.call_args_list[0] self.assertTrue(kwargs["check_duplicates"]) def test_cache_dir_no_args(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_dir=None, data_files=None) relative_cache_dir_parts = Path(builder._relative_data_dir()).parts self.assertTupleEqual(relative_cache_dir_parts, (builder.dataset_name, "default", "0.0.0")) def test_cache_dir_for_data_files(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_data1 = os.path.join(tmp_dir, "dummy_data1.txt") with open(dummy_data1, "w", encoding="utf-8") as f: f.writelines("foo bar") dummy_data2 = os.path.join(tmp_dir, "dummy_data2.txt") with open(dummy_data2, "w", encoding="utf-8") as f: f.writelines("foo bar\n") builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1]) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": dummy_data1}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={Split.TRAIN: dummy_data1}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": [dummy_data1]}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"test": dummy_data1}) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2, dummy_data1]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} ) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} ) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": [dummy_data1], "test": dummy_data2} ) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "validation": dummy_data2} ) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": [dummy_data1, dummy_data2], "test": dummy_data2}, ) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_features(self): with tempfile.TemporaryDirectory() as tmp_dir: f1 = Features({"id": Value("int8")}) f2 = Features({"id": Value("int32")}) builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_config_kwargs(self): with tempfile.TemporaryDirectory() as tmp_dir: # create config on the fly builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo", times=2) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, times=2, content="foo") self.assertEqual(builder.cache_dir, other_builder.cache_dir) self.assertIn("content=foo", builder.cache_dir) self.assertIn("times=2", builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="bar", times=2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) with tempfile.TemporaryDirectory() as tmp_dir: # overwrite an existing config builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo", times=2) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", times=2, content="foo") self.assertEqual(builder.cache_dir, other_builder.cache_dir) self.assertIn("content=foo", builder.cache_dir) self.assertIn("times=2", builder.cache_dir) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="bar", times=2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_config_names(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as error_context: DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, data_files=None, data_dir=None) self.assertIn("Please pick one among the available configs", str(error_context.exception)) builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a") self.assertEqual(builder.config.name, "a") builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="b") self.assertEqual(builder.config.name, "b") with self.assertRaises(ValueError): DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir) builder = DummyBuilderWithDefaultConfig(cache_dir=tmp_dir) self.assertEqual(builder.config.name, "a") def test_cache_dir_for_data_dir(self): with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir: builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=tmp_dir) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_configured_builder(self): with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir: builder_cls = configure_builder_class( DummyBuilderWithManualDownload, builder_configs=[BuilderConfig(data_dir=data_dir)], default_config_name=None, dataset_name="dummy", ) builder = builder_cls(cache_dir=tmp_dir, hash="abc") other_builder = builder_cls(cache_dir=tmp_dir, hash="abc") self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = builder_cls(cache_dir=tmp_dir, hash="def") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = BuilderConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = BuilderConfig(name="name", data_files=data_files) def test_arrow_based_download_and_prepare(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare() assert os.path.exists( os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) assert builder.info.features, Features({"text": Value("string")}) assert builder.info.splits["train"].num_examples == 100 assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) @pytest.mark.parametrize( "split, expected_dataset_class, expected_dataset_length", [ (None, DatasetDict, 10), ("train", Dataset, 10), ("train+test[:30%]", Dataset, 13), ], ) @pytest.mark.parametrize("in_memory", [False, True]) def test_builder_as_dataset(split, expected_dataset_class, expected_dataset_length, in_memory, tmp_path): cache_dir = str(tmp_path) builder = DummyBuilder(cache_dir=cache_dir) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for info_split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{info_split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset = builder.as_dataset(split=split, in_memory=in_memory) assert isinstance(dataset, expected_dataset_class) if isinstance(dataset, DatasetDict): assert list(dataset.keys()) == ["train", "test"] datasets = dataset.values() expected_splits = ["train", "test"] elif isinstance(dataset, Dataset): datasets = [dataset] expected_splits = [split] for dataset, expected_split in zip(datasets, expected_splits): assert dataset.split == expected_split assert len(dataset) == expected_dataset_length assert dataset.features == Features({"text": Value("string")}) dataset.column_names == ["text"] @pytest.mark.parametrize("in_memory", [False, True]) def test_generator_based_builder_as_dataset(in_memory, tmp_path): cache_dir = tmp_path / "data" cache_dir.mkdir() cache_dir = str(cache_dir) builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset = builder.as_dataset("train", in_memory=in_memory) assert dataset.data.to_pydict() == {"text": ["foo"] * 100} @pytest.mark.parametrize( "writer_batch_size, default_writer_batch_size, expected_chunks", [(None, None, 1), (None, 5, 20), (10, None, 10)] ) def test_custom_writer_batch_size(tmp_path, writer_batch_size, default_writer_batch_size, expected_chunks): cache_dir = str(tmp_path) if default_writer_batch_size: DummyGeneratorBasedBuilder.DEFAULT_WRITER_BATCH_SIZE = default_writer_batch_size builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir, writer_batch_size=writer_batch_size) assert builder._writer_batch_size == (writer_batch_size or default_writer_batch_size) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) dataset = builder.as_dataset("train") assert len(dataset.data[0].chunks) == expected_chunks def test_builder_as_streaming_dataset(tmp_path): dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) check_streaming(dummy_builder) dsets = dummy_builder.as_streaming_dataset() assert isinstance(dsets, IterableDatasetDict) assert isinstance(dsets["train"], IterableDataset) assert len(list(dsets["train"])) == 100 dset = dummy_builder.as_streaming_dataset(split="train") assert isinstance(dset, IterableDataset) assert len(list(dset)) == 100 def _run_test_builder_streaming_works_in_subprocesses(builder): check_streaming(builder) dset = builder.as_streaming_dataset(split="train") assert isinstance(dset, IterableDataset) assert len(list(dset)) == 100 def test_builder_streaming_works_in_subprocess(tmp_path): dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) p = Process(target=_run_test_builder_streaming_works_in_subprocesses, args=(dummy_builder,)) p.start() p.join() class DummyBuilderWithVersion(GeneratorBasedBuilder): VERSION = "2.0.0" def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass class DummyBuilderWithBuilderConfigs(GeneratorBasedBuilder): BUILDER_CONFIGS = [BuilderConfig(name="custom", version="2.0.0")] def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass class CustomBuilderConfig(BuilderConfig): def __init__(self, date=None, language=None, version="2.0.0", **kwargs): name = f"{date}.{language}" super().__init__(name=name, version=version, **kwargs) self.date = date self.language = language class DummyBuilderWithCustomBuilderConfigs(GeneratorBasedBuilder): BUILDER_CONFIGS = [CustomBuilderConfig(date="20220501", language="en")] BUILDER_CONFIG_CLASS = CustomBuilderConfig def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass @pytest.mark.parametrize( "builder_class, kwargs", [ (DummyBuilderWithVersion, {}), (DummyBuilderWithBuilderConfigs, {"config_name": "custom"}), (DummyBuilderWithCustomBuilderConfigs, {"config_name": "20220501.en"}), (DummyBuilderWithCustomBuilderConfigs, {"date": "20220501", "language": "ca"}), ], ) def test_builder_config_version(builder_class, kwargs, tmp_path): cache_dir = str(tmp_path) builder = builder_class(cache_dir=cache_dir, **kwargs) assert builder.config.version == "2.0.0" def test_builder_download_and_prepare_with_absolute_output_dir(tmp_path): builder = DummyGeneratorBasedBuilder() output_dir = str(tmp_path) builder.download_and_prepare(output_dir) assert builder._output_dir.startswith(tmp_path.resolve().as_posix()) assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) assert not os.path.exists(os.path.join(output_dir + ".incomplete")) def test_builder_download_and_prepare_with_relative_output_dir(): with set_current_working_directory_to_temp_dir(): builder = DummyGeneratorBasedBuilder() output_dir = "test-out" builder.download_and_prepare(output_dir) assert Path(builder._output_dir).resolve().as_posix().startswith(Path(output_dir).resolve().as_posix()) assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) assert not os.path.exists(os.path.join(output_dir + ".incomplete")) def test_builder_with_filesystem_download_and_prepare(tmp_path, mockfs): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) assert builder._output_dir.startswith("mock://my_dataset") assert is_local_path(builder._cache_downloaded_dir) assert isinstance(builder._fs, type(mockfs)) assert builder._fs.storage_options == mockfs.storage_options assert mockfs.exists("my_dataset/dataset_info.json") assert mockfs.exists(f"my_dataset/{builder.dataset_name}-train.arrow") assert not mockfs.exists("my_dataset.incomplete") def test_builder_with_filesystem_download_and_prepare_reload(tmp_path, mockfs, caplog): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) mockfs.makedirs("my_dataset") DatasetInfo().write_to_directory("mock://my_dataset", storage_options=mockfs.storage_options) mockfs.touch(f"my_dataset/{builder.dataset_name}-train.arrow") caplog.clear() with caplog.at_level(INFO, logger=get_logger().name): builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) assert "Found cached dataset" in caplog.text def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" ) assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None def test_generator_based_builder_download_and_prepare_sharded(tmp_path): writer_batch_size = 25 builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard builder.download_and_prepare(file_format="parquet") expected_num_shards = 100 // writer_batch_size assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_generator_based_builder_download_and_prepare_with_max_shard_size(tmp_path): writer_batch_size = 25 builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one batch per shard expected_num_shards = 100 // writer_batch_size assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_generator_based_builder_download_and_prepare_with_num_proc(tmp_path): builder = DummyGeneratorBasedBuilderWithShards(cache_dir=tmp_path) builder.download_and_prepare(num_proc=2) expected_num_shards = 2 assert builder.info.splits["train"].num_examples == 400 assert builder.info.splits["train"].shard_lengths == [200, 200] arrow_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", ) assert os.path.exists(arrow_path) ds = builder.as_dataset("train") assert len(ds) == 400 assert ds.to_dict() == { "id": [i for _ in range(4) for i in range(100)], "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], } @pytest.mark.parametrize( "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] ) def test_generator_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): builder = DummyGeneratorBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) with expectation: builder.download_and_prepare(num_proc=num_proc) def test_arrow_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" ) assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None def test_arrow_based_builder_download_and_prepare_sharded(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard builder.download_and_prepare(file_format="parquet") expected_num_shards = 10 assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_arrow_based_builder_download_and_prepare_with_max_shard_size(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one table per shard expected_num_shards = 10 assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_arrow_based_builder_download_and_prepare_with_num_proc(tmp_path): builder = DummyArrowBasedBuilderWithShards(cache_dir=tmp_path) builder.download_and_prepare(num_proc=2) expected_num_shards = 2 assert builder.info.splits["train"].num_examples == 400 assert builder.info.splits["train"].shard_lengths == [200, 200] arrow_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", ) assert os.path.exists(arrow_path) ds = builder.as_dataset("train") assert len(ds) == 400 assert ds.to_dict() == { "id": [i for _ in range(4) for i in range(100)], "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], } @pytest.mark.parametrize( "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] ) def test_arrow_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): builder = DummyArrowBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) with expectation: builder.download_and_prepare(num_proc=num_proc)
datasets/tests/test_builder.py/0
{ "file_path": "datasets/tests/test_builder.py", "repo_id": "datasets", "token_count": 25355 }
88
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size", [None, 400 * 2**20, 600 * 2**20]) @pytest.mark.parametrize("input_in_memory_max_size", ["default", 0, 100 * 2**20, 900 * 2**20]) def test_is_small_dataset(dataset_size, input_in_memory_max_size, monkeypatch): if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", input_in_memory_max_size) in_memory_max_size = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: expected = dataset_size < in_memory_max_size else: expected = False result = is_small_dataset(dataset_size) assert result == expected
datasets/tests/test_info_utils.py/0
{ "file_path": "datasets/tests/test_info_utils.py", "repo_id": "datasets", "token_count": 366 }
89
import pytest from datasets.utils.version import Version @pytest.mark.parametrize( "other, expected_equality", [ (Version("1.0.0"), True), ("1.0.0", True), (Version("2.0.0"), False), ("2.0.0", False), ("1", False), ("a", False), (1, False), (None, False), ], ) def test_version_equality_and_hash(other, expected_equality): version = Version("1.0.0") assert (version == other) is expected_equality assert (version != other) is not expected_equality assert (hash(version) == hash(other)) is expected_equality
datasets/tests/test_version.py/0
{ "file_path": "datasets/tests/test_version.py", "repo_id": "datasets", "token_count": 254 }
90
<jupyter_start><jupyter_text>Unit 6: Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym 🤖In this notebook, you'll learn to use A2C with [Panda-Gym](https://github.com/qgallouedec/panda-gym). You're going **to train a robotic arm** (Franka Emika Panda robot) to perform a task:- `Reach`: the robot must place its end-effector at a target position.After that, you'll be able **to train in other robotics tasks**. 🎮 Environments:- [Panda-Gym](https://github.com/qgallouedec/panda-gym)📚 RL-Library:- [Stable-Baselines3](https://stable-baselines3.readthedocs.io/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to use **Panda-Gym**, the environment library.- Be able to **train robots using A2C**.- Understand why **we need to normalize the input**.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning CourseIn this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments**And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 Study [Actor-Critic methods by reading Unit 6](https://huggingface.co/deep-rl-course/unit6/introduction) 🤗 Let's train our first robots 🤖 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push your trained model to the Hub and get the following results:- `PandaReachDense-v3` get a result of >= -3.5.To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display 🔽During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames).Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>%%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip3 install pyvirtualdisplay # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install dependencies 🔽The first step is to install the dependencies, we’ll install multiple ones:- `gymnasium`- `panda-gym`: Contains the robotics arm environments.- `stable-baselines3`: The SB3 deep reinforcement learning library.- `huggingface_sb3`: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub.- `huggingface_hub`: Library allowing anyone to work with the Hub repositories.⏲ The installation can **take 10 minutes**.<jupyter_code>!pip install stable-baselines3[extra] !pip install gymnasium !pip install huggingface_sb3 !pip install huggingface_hub !pip install panda_gym<jupyter_output><empty_output><jupyter_text>Import the packages 📦<jupyter_code>import os import gymnasium as gym import panda_gym from huggingface_sb3 import load_from_hub, package_to_hub from stable_baselines3 import A2C from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize from stable_baselines3.common.env_util import make_vec_env from huggingface_hub import notebook_login<jupyter_output><empty_output><jupyter_text>PandaReachDense-v3 🦾The agent we're going to train is a robotic arm that needs to do controls (moving the arm and using the end-effector).In robotics, the *end-effector* is the device at the end of a robotic arm designed to interact with the environment.In `PandaReach`, the robot must place its end-effector at a target position (green ball).We're going to use the dense version of this environment. It means we'll get a *dense reward function* that **will provide a reward at each timestep** (the closer the agent is to completing the task, the higher the reward). Contrary to a *sparse reward function* where the environment **return a reward if and only if the task is completed**.Also, we're going to use the *End-effector displacement control*, it means the **action corresponds to the displacement of the end-effector**. We don't control the individual motion of each joint (joint control).This way **the training will be easier**. Create the environment The environment 🎮In `PandaReachDense-v3` the robotic arm must place its end-effector at a target position (green ball).<jupyter_code>env_id = "PandaReachDense-v3" # Create the env env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape a_size = env.action_space print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation<jupyter_output><empty_output><jupyter_text>The observation space **is a dictionary with 3 different elements**:- `achieved_goal`: (x,y,z) the current position of the end-effector.- `desired_goal`: (x,y,z) the target position for the end-effector.- `observation`: position (x,y,z) and velocity of the end-effector (vx, vy, vz).Given it's a dictionary as observation, **we will need to use a MultiInputPolicy policy instead of MlpPolicy**.<jupyter_code>print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The action space is a vector with 3 values:- Control x, y, z movement Normalize observation and rewards A good practice in reinforcement learning is to [normalize input features](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html).For that purpose, there is a wrapper that will compute a running average and standard deviation of input features.We also normalize rewards with this same wrapper by adding `norm_reward = True`[You should check the documentation to fill this cell](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.htmlvecnormalize)<jupyter_code>env = make_vec_env(env_id, n_envs=4) # Adding this wrapper to normalize the observation and the reward env = # TODO: Add the wrapper<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>env = make_vec_env(env_id, n_envs=4) env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.)<jupyter_output><empty_output><jupyter_text>Create the A2C Model 🤖For more information about A2C implementation with StableBaselines3 check: https://stable-baselines3.readthedocs.io/en/master/modules/a2c.htmlnotesTo find the best parameters I checked the [official trained agents by Stable-Baselines3 team](https://huggingface.co/sb3).<jupyter_code>model = # Create the A2C model and try to find the best parameters<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>model = A2C(policy = "MultiInputPolicy", env = env, verbose=1)<jupyter_output><empty_output><jupyter_text>Train the A2C agent 🏃- Let's train our agent for 1,000,000 timesteps, don't forget to use GPU on Colab. It will take approximately ~25-40min<jupyter_code>model.learn(1_000_000) # Save the model and VecNormalize statistics when saving the agent model.save("a2c-PandaReachDense-v3") env.save("vec_normalize.pkl")<jupyter_output><empty_output><jupyter_text>Evaluate the agent 📈- Now that's our agent is trained, we need to **check its performance**.- Stable-Baselines3 provides a method to do that: `evaluate_policy`<jupyter_code>from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaReachDense-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # We need to override the render_mode eval_env.render_mode = "rgb_array" # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load("a2c-PandaReachDense-v3") mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}")<jupyter_output><empty_output><jupyter_text>Publish your trained model on the Hub 🔥Now that we saw we got good results after the training, we can publish our trained model on the Hub with one line of code.📚 The libraries documentation 👉 https://github.com/huggingface/huggingface_sb3/tree/mainhugging-face--x-stable-baselines3-v20 By using `package_to_hub`, as we already mentionned in the former units, **you evaluate, record a replay, generate a model card of your agent and push it to the hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share with the community an agent that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**- Copy the token- Run the cell below and paste the token<jupyter_code>notebook_login() !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function For this environment, **running this cell can take approximately 10min**<jupyter_code>from huggingface_sb3 import package_to_hub package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # Change the username commit_message="Initial commit", )<jupyter_output><empty_output><jupyter_text>Some additional challenges 🏆The best way to learn **is to try things by your own**! Why not trying `PandaPickAndPlace-v3`?If you want to try more advanced tasks for panda-gym, you need to check what was done using **TQC or SAC** (a more sample-efficient algorithm suited for robotics tasks). In real robotics, you'll use a more sample-efficient algorithm for a simple reason: contrary to a simulation **if you move your robotic arm too much, you have a risk of breaking it**.PandaPickAndPlace-v1 (this model uses the v1 version of the environment): https://huggingface.co/sb3/tqc-PandaPickAndPlace-v1And don't hesitate to check panda-gym documentation here: https://panda-gym.readthedocs.io/en/latest/usage/train_with_sb3.htmlWe provide you the steps to train another agent (optional):1. Define the environment called "PandaPickAndPlace-v3"2. Make a vectorized environment3. Add a wrapper to normalize the observations and rewards. [Check the documentation](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.htmlvecnormalize)4. Create the A2C Model (don't forget verbose=1 to print the training logs).5. Train it for 1M Timesteps6. Save the model and VecNormalize statistics when saving the agent7. Evaluate your agent8. Publish your trained model on the Hub 🔥 with `package_to_hub` Solution (optional)<jupyter_code># 1 - 2 env_id = "PandaPickAndPlace-v3" env = make_vec_env(env_id, n_envs=4) # 3 env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.) # 4 model = A2C(policy = "MultiInputPolicy", env = env, verbose=1) # 5 model.learn(1_000_000) # 6 model_name = "a2c-PandaPickAndPlace-v3"; model.save(model_name) env.save("vec_normalize.pkl") # 7 from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaPickAndPlace-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load(model_name) mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") # 8 package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # TODO: Change the username commit_message="Initial commit", )<jupyter_output><empty_output>
deep-rl-class/notebooks/unit6/unit6.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit6/unit6.ipynb", "repo_id": "deep-rl-class", "token_count": 4388 }
91
# Introduction to Deep Reinforcement Learning [[introduction-to-deep-reinforcement-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Unit 1 thumbnail" width="100%"> Welcome to the most fascinating topic in Artificial Intelligence: **Deep Reinforcement Learning.** Deep RL is a type of Machine Learning where an agent learns **how to behave** in an environment **by performing actions** and **seeing the results.** In this first unit, **you'll learn the foundations of Deep Reinforcement Learning.** Then, you'll **train your Deep Reinforcement Learning agent, a lunar lander to land correctly on the Moon** using <a href="https://stable-baselines3.readthedocs.io/en/master/"> Stable-Baselines3 </a>, a Deep Reinforcement Learning library. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/lunarLander.gif" alt="LunarLander"> And finally, you'll **upload this trained agent to the Hugging Face Hub 🤗, a free, open platform where people can share ML models, datasets, and demos.** It's essential **to master these elements** before diving into implementing Deep Reinforcement Learning agents. The goal of this chapter is to give you solid foundations. After this unit, in a bonus unit, you'll be **able to train Huggy the Dog 🐶 to fetch the stick and play with him 🤗**. <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/huggy.mp4" type="video/mp4" controls autoplay loop mute /> So let's get started! 🚀
deep-rl-class/units/en/unit1/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 477 }
92
# A Q-Learning example [[q-learning-example]] To better understand Q-Learning, let's take a simple example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-Example-2.jpg" alt="Maze-Example"/> - You're a mouse in this tiny maze. You always **start at the same starting point.** - The goal is **to eat the big pile of cheese at the bottom right-hand corner** and avoid the poison. After all, who doesn't like cheese? - The episode ends if we eat the poison, **eat the big pile of cheese**, or if we take more than five steps. - The learning rate is 0.1 - The discount rate (gamma) is 0.99 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-1.jpg" alt="Maze-Example"/> The reward function goes like this: - **+0:** Going to a state with no cheese in it. - **+1:** Going to a state with a small cheese in it. - **+10:** Going to the state with the big pile of cheese. - **-10:** Going to the state with the poison and thus dying. - **+0** If we take more than five steps. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-2.jpg" alt="Maze-Example"/> To train our agent to have an optimal policy (so a policy that goes right, right, down), **we will use the Q-Learning algorithm**. ## Step 1: Initialize the Q-table [[step1]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-1.jpg" alt="Maze-Example"/> So, for now, **our Q-table is useless**; we need **to train our Q-function using the Q-Learning algorithm.** Let's do it for 2 training timesteps: Training timestep 1: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2]] Because epsilon is big (= 1.0), I take a random action. In this case, I go right. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-3.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3]] By going right, I get a small cheese, so \\(R_{t+1} = 1\\) and I'm in a new state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-4.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4]] We can now update \\(Q(S_t, A_t)\\) using our formula. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-5.jpg" alt="Maze-Example"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-4.jpg" alt="Maze-Example"/> Training timestep 2: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2-2]] **I take a random action again, since epsilon=0.99 is big**. (Notice we decay epsilon a little bit because, as the training progress, we want less and less exploration). I took the action 'down'. **This is not a good action since it leads me to the poison.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-6.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3-3]] Because I ate poison, **I get \\(R_{t+1} = -10\\), and I die.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-7.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4-4]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-8.jpg" alt="Maze-Example"/> Because we're dead, we start a new episode. But what we see here is that, **with two explorations steps, my agent became smarter.** As we continue exploring and exploiting the environment and updating Q-values using the TD target, the **Q-table will give us a better and better approximation. At the end of the training, we'll get an estimate of the optimal Q-function.**
deep-rl-class/units/en/unit2/q-learning-example.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/q-learning-example.mdx", "repo_id": "deep-rl-class", "token_count": 1402 }
93
# The advantages and disadvantages of policy-gradient methods At this point, you might ask, "but Deep Q-Learning is excellent! Why use policy-gradient methods?". To answer this question, let's study the **advantages and disadvantages of policy-gradient methods**. ## Advantages There are multiple advantages over value-based methods. Let's see some of them: ### The simplicity of integration We can estimate the policy directly without storing additional data (action values). ### Policy-gradient methods can learn a stochastic policy Policy-gradient methods can **learn a stochastic policy while value functions can't**. This has two consequences: 1. We **don't need to implement an exploration/exploitation trade-off by hand**. Since we output a probability distribution over actions, the agent explores **the state space without always taking the same trajectory.** 2. We also get rid of the problem of **perceptual aliasing**. Perceptual aliasing is when two states seem (or are) the same but need different actions. Let's take an example: we have an intelligent vacuum cleaner whose goal is to suck the dust and avoid killing the hamsters. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster1.jpg" alt="Hamster 1"/> </figure> Our vacuum cleaner can only perceive where the walls are. The problem is that the **two red (colored) states are aliased states because the agent perceives an upper and lower wall for each**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster2.jpg" alt="Hamster 1"/> </figure> Under a deterministic policy, the policy will either always move right when in a red state or always move left. **Either case will cause our agent to get stuck and never suck the dust**. Under a value-based Reinforcement learning algorithm, we learn a **quasi-deterministic policy** ("greedy epsilon strategy"). Consequently, our agent can **spend a lot of time before finding the dust**. On the other hand, an optimal stochastic policy **will randomly move left or right in red (colored) states**. Consequently, **it will not be stuck and will reach the goal state with a high probability**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster3.jpg" alt="Hamster 1"/> </figure> ### Policy-gradient methods are more effective in high-dimensional action spaces and continuous actions spaces The problem with Deep Q-learning is that their **predictions assign a score (maximum expected future reward) for each possible action**, at each time step, given the current state. But what if we have an infinite possibility of actions? For instance, with a self-driving car, at each state, you can have a (near) infinite choice of actions (turning the wheel at 15°, 17.2°, 19,4°, honking, etc.). **We'll need to output a Q-value for each possible action**! And **taking the max action of a continuous output is an optimization problem itself**! Instead, with policy-gradient methods, we output a **probability distribution over actions.** ### Policy-gradient methods have better convergence properties In value-based methods, we use an aggressive operator to **change the value function: we take the maximum over Q-estimates**. Consequently, the action probabilities may change dramatically for an arbitrarily small change in the estimated action values if that change results in a different action having the maximal value. For instance, if during the training, the best action was left (with a Q-value of 0.22) and the training step after it's right (since the right Q-value becomes 0.23), we dramatically changed the policy since now the policy will take most of the time right instead of left. On the other hand, in policy-gradient methods, stochastic policy action preferences (probability of taking action) **change smoothly over time**. ## Disadvantages Naturally, policy-gradient methods also have some disadvantages: - **Frequently, policy-gradient methods converges to a local maximum instead of a global optimum.** - Policy-gradient goes slower, **step by step: it can take longer to train (inefficient).** - Policy-gradient can have high variance. We'll see in the actor-critic unit why, and how we can solve this problem. 👉 If you want to go deeper into the advantages and disadvantages of policy-gradient methods, [you can check this video](https://youtu.be/y3oqOjHilio).
deep-rl-class/units/en/unit4/advantages-disadvantages.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/advantages-disadvantages.mdx", "repo_id": "deep-rl-class", "token_count": 1184 }
94
# Quiz The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: Which of the following tools are specifically designed for video games development? <Question choices={[ { text: "Unity (C#)", explain: "", correct: true, }, { text: "Unreal Engine (C++)", explain: "", correct: true, }, { text: "Godot (GDScript, C++, C#)", explain: "", correct: true, }, { text: "JetBrains' Rider", explain: "Although useful for its support of C# for Unity, it's not a video games development IDE", correct: false, }, { text: "JetBrains' CLion", explain: "Although useful for its support of C++ for Unreal Engine, it's not a video games development IDE", correct: false, }, { text: "Microsoft Visual Studio and Visual Studio Code", explain: "Including support for both Unity and Unreal, they are generic IDEs, not video games oriented.", correct: false, }, ]} /> ### Q2: What of the following statements are true about Unity ML-Agents? <Question choices={[ { text: "Unity ´Scene´ objects can be used to create learning environments", explain: "", correct: true, }, { text: "Unit ML-Agents allows you to create and train your agents using Reinforcement Learning", explain: "", correct: true, }, { text: "Its `Communicator` component manages the communication between Unity's C# Environments/Agents and a Python back-end", explain: "", correct: true, }, { text: "The training process uses Reinforcement Learning algorithms, implemented in Pytorch", explain: "", correct: true, }, { text: "Unity ML-Agents only support Proximal Policy Optimization (PPO)", explain: "No, Unity ML-Agents supports several families of algorithms, including Actor-Critic which is going to be explained in the next section", correct: false, }, { text: "It includes a Gym Wrapper and a multi-agent version of it called `PettingZoo`", explain: "", correct: true, }, ]} /> ### Q3: Fill the missing letters - In Unity ML-Agents, the Policy of an Agent is called a b \_ \_ \_ n - The component in charge of orchestrating the agents is called the \_ c \_ \_ \_ m \_ <details> <summary>Solution</summary> <ul> <li>b r a i n</li> <li>a c a d e m y</li> </ul> </details> ### Q4: Define with your own words what is a `raycast` <details> <summary>Solution</summary> A raycast is (most of the times) a linear projection, as a `laser` which aims to detect collisions through objects. </details> ### Q5: Which are the differences between capturing the environment using `frames` or `raycasts`? <Question choices={[ { text: "By using `frames`, the environment is defined by each of the pixels of the screen. By using `raycasts`, we only send a sample of those pixels.", explain: "`Raycasts` don't have anything to do with pixels. They are linear projections (lasers) that we spawn to look for collisions.", correct: false, }, { text: "By using `raycasts`, the environment is defined by each of the pixels of the screen. By using `frames`, we spawn a (usually) line to check what objects it collides with", explain: "It's the other way around - `frames` collect pixels, `raycasts` check for collisions.", correct: false, }, { text: "By using `frames`, we collect all the pixels of the screen, which define the environment. By using `raycast`, we don't use pixels, we spawn (normally) lines and check their collisions", explain: "", correct: true, }, ]} /> ### Q6: Name several environment and agent input variables used to train the agent in the Snowball or Pyramid environments <details> <summary>Solution</summary> - Collisions of the raycasts spawned from the agent detecting blocks, (invisible) walls, stones, our target, switches, etc. - Traditional inputs describing agent features, as its speed - Boolean vars, as the switch (on/off) in Pyramids or the `can I shoot?` in the SnowballTarget. </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read the chapter again to reinforce (😏) your knowledge.
deep-rl-class/units/en/unit5/quiz.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/quiz.mdx", "repo_id": "deep-rl-class", "token_count": 1539 }
95
# Self-Play: a classic technique to train competitive agents in adversarial games Now that we've studied the basics of multi-agents, we're ready to go deeper. As mentioned in the introduction, we're going **to train agents in an adversarial game with SoccerTwos, a 2vs2 game**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> ## What is Self-Play? Training agents correctly in an adversarial game can be **quite complex**. On the one hand, we need to find how to get a well-trained opponent to play against your training agent. And on the other hand, if you find a very good trained opponent, how will your agent improve its policy when the opponent is too strong? Think of a child that just started to learn soccer. Playing against a very good soccer player will be useless since it will be too hard to win or at least get the ball from time to time. So the child will continuously lose without having time to learn a good policy. The best solution would be **to have an opponent that is on the same level as the agent and will upgrade its level as the agent upgrades its own**. Because if the opponent is too strong, we’ll learn nothing; if it is too weak, we’ll overlearn useless behavior against a stronger opponent then. This solution is called *self-play*. In self-play, **the agent uses former copies of itself (of its policy) as an opponent**. This way, the agent will play against an agent of the same level (challenging but not too much), have opportunities to gradually improve its policy, and then update its opponent as it becomes better. It’s a way to bootstrap an opponent and progressively increase the opponent's complexity. It’s the same way humans learn in competition: - We start to train against an opponent of similar level - Then we learn from it, and when we acquire some skills, we can move further with stronger opponents. We do the same with self-play: - We **start with a copy of our agent as an opponent** this way, this opponent is on a similar level. - We **learn from it** and, when we acquire some skills, we **update our opponent with a more recent copy of our training policy**. The theory behind self-play is not something new. It was already used by Arthur Samuel’s checker player system in the fifties and by Gerald Tesauro’s TD-Gammon in 1995. If you want to learn more about the history of self-play [check out this very good blogpost by Andrew Cohen](https://blog.unity.com/technology/training-intelligent-adversaries-using-self-play-with-ml-agents) ## Self-Play in MLAgents Self-Play is integrated into the MLAgents library and is managed by multiple hyperparameters that we’re going to study. But the main focus, as explained in the documentation, is the **tradeoff between the skill level and generality of the final policy and the stability of learning**. Training against a set of slowly changing or unchanging adversaries with low diversity **results in more stable training. But a risk to overfit if the change is too slow.** So we need to control: - How **often we change opponents** with the `swap_steps` and `team_change` parameters. - The **number of opponents saved** with the `window` parameter. A larger value of `window`  means that an agent's pool of opponents will contain a larger diversity of behaviors since it will contain policies from earlier in the training run. - The **probability of playing against the current self vs opponent** sampled from the pool with `play_against_latest_model_ratio`. A larger value of `play_against_latest_model_ratio`  indicates that an agent will be playing against the current opponent more often. - The **number of training steps before saving a new opponent** with `save_steps` parameters. A larger value of `save_steps`  will yield a set of opponents that cover a wider range of skill levels and possibly play styles since the policy receives more training. To get more details about these hyperparameters, you definitely need [to check out this part of the documentation](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Training-Configuration-File.md#self-play) ## The ELO Score to evaluate our agent ### What is ELO Score? In adversarial games, tracking the **cumulative reward is not always a meaningful metric to track the learning progress:** because this metric is **dependent only on the skill of the opponent.** Instead, we’re using an ***ELO rating system*** (named after Arpad Elo) that calculates the **relative skill level** between 2 players from a given population in a zero-sum game. In a zero-sum game: one agent wins, and the other agent loses. It’s a mathematical representation of a situation in which each participant’s gain or loss of utility **is exactly balanced by the gain or loss of the utility of the other participants.** We talk about zero-sum games because the sum of utility is equal to zero. This ELO (starting at a specific score: frequently 1200) can decrease initially but should increase progressively during the training. The Elo system is **inferred from the losses and draws against other players.** It means that player ratings depend **on the ratings of their opponents and the results scored against them.** Elo defines an Elo score that is the relative skills of a player in a zero-sum game. **We say relative because it depends on the performance of opponents.** The central idea is to think of the performance of a player **as a random variable that is normally distributed.** The difference in rating between 2 players serves as **the predictor of the outcomes of a match.** If the player wins, but the probability of winning is high, it will only win a few points from its opponent since it means that it is much stronger than it. After every game: - The winning player takes **points from the losing one.** - The number of points is determined **by the difference in the 2 players ratings (hence relative).** - If the higher-rated player wins → few points will be taken from the lower-rated player. - If the lower-rated player wins → a lot of points will be taken from the high-rated player. - If it’s a draw → the lower-rated player gains a few points from the higher. So if A and B have rating Ra, and Rb, then the **expected scores are** given by: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo1.png" alt="ELO Score"/> Then, at the end of the game, we need to update the player’s actual Elo score. We use a linear adjustment **proportional to the amount by which the player over-performed or under-performed.** We also define a maximum adjustment rating per game: K-factor. - K=16 for master. - K=32 for weaker players. If Player A has Ea points but scored Sa points, then the player’s rating is updated using the formula: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo2.png" alt="ELO Score"/> ### Example If we take an example: Player A has a rating of 2600 Player B has a rating of 2300 - We first calculate the expected score: \\(E_{A} = \frac{1}{1+10^{(2300-2600)/400}} = 0.849 \\) \\(E_{B} = \frac{1}{1+10^{(2600-2300)/400}} = 0.151 \\) - If the organizers determined that K=16 and A wins, the new rating would be: \\(ELO_A = 2600 + 16*(1-0.849) = 2602 \\) \\(ELO_B = 2300 + 16*(0-0.151) = 2298 \\) - If the organizers determined that K=16 and B wins, the new rating would be: \\(ELO_A = 2600 + 16*(0-0.849) = 2586 \\) \\(ELO_B = 2300 + 16 *(1-0.151) = 2314 \\) ### The Advantages Using the ELO score has multiple advantages: - Points are **always balanced** (more points are exchanged when there is an unexpected outcome, but the sum is always the same). - It is a **self-corrected system** since if a player wins against a weak player, they will only win a few points. - It **works with team games**: we calculate the average for each team and use it in Elo. ### The Disadvantages - ELO **does not take into account the individual contribution** of each people in the team. - Rating deflation: **a good rating requires skill over time to keep the same rating**. - **Can’t compare rating in history**.
deep-rl-class/units/en/unit7/self-play.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/self-play.mdx", "repo_id": "deep-rl-class", "token_count": 2245 }
96
# Hands-on [[hands-on]] Now that you've learned to use Optuna, here are some ideas to apply what you've learned: 1️⃣ **Beat your LunarLander-v2 agent results**, by using Optuna to find a better set of hyperparameters. You can also try with another environment, such as MountainCar-v0 and CartPole-v1. 2️⃣ **Beat your SpaceInvaders agent results**. By doing this, you'll see how valuable and powerful Optuna can be in training better agents. Have fun! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unitbonus2/hands-on.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus2/hands-on.mdx", "repo_id": "deep-rl-class", "token_count": 207 }
97
# Conclusion: **Congratulations on finishing this bonus unit!** You have learned the process of recording expert demonstrations and training the agent using IL, which can be an alternative to training in-game agents with RL in some cases. This tutorial was written by [Ivan Dodic](https://github.com/Ivan-267). Thanks to [Edward Beeching](https://twitter.com/edwardbeeching) and [Thomas Simonini](https://twitter.com/thomassimonini) for their reviews and feedback.
deep-rl-class/units/en/unitbonus5/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus5/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 115 }
98
<!--- Copyright 2024- The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our open source documentation builder tool: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit the built documentation. --- ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview diffusers docs/source/en ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ```md Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ```md Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. For an example of a rich moved section set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). ## Writing Documentation - Specification The `huggingface/diffusers` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new Markdown (.md) file under `docs/source/<languageCode>`. - Link that file in `docs/source/<languageCode>/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four. ### Adding a new pipeline/scheduler When adding a new pipeline: - Create a file `xxx.md` under `docs/source/<languageCode>/api/pipelines` (don't hesitate to copy an existing file as template). - Link that file in (*Diffusers Summary*) section in `docs/source/api/pipelines/overview.md`, along with the link to the paper, and a colab notebook (if available). - Write a short overview of the diffusion model: - Overview with paper & authors - Paper abstract - Tips and tricks and how to use it best - Possible an end-to-end example of how to use it - Add all the pipeline classes that should be linked in the diffusion model. These classes should be added using our Markdown syntax. By default as follows: ``` [[autodoc]] XXXPipeline - all - __call__ ``` This will include every public method of the pipeline that is documented, as well as the `__call__` method that is not documented by default. If you just want to add additional methods that are not documented, you can put the list of all methods to add in a list that contains `all`. ``` [[autodoc]] XXXPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention ``` You can follow the same process to create a new scheduler under the `docs/source/<languageCode>/api/schedulers` folder. ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`pipelines.ImagePipelineOutput\`\]. This will be converted into a link with `pipelines.ImagePipelineOutput` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~pipelines.ImagePipelineOutput\`\] will generate a link with `ImagePipelineOutput` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[\`~XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line, another indentation is necessary before writing the description after the argument. Here's an example showcasing everything so far: ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and [`~PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ```py def my_function(x: str=None, a: float=3.14): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... a (`float`, *optional*, defaults to `3.14`): This argument is used to ... ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it on several lines. You can however write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ``` # first line of code # second line # etc ``` ```` #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.Tensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.Tensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` #### Adding an image Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## Styling the docstring We have an automatic script running with the `make style` command that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily.
diffusers/docs/README.md/0
{ "file_path": "diffusers/docs/README.md", "repo_id": "diffusers", "token_count": 3142 }
99
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Logging 🤗 Diffusers has a centralized logging system to easily manage the verbosity of the library. The default verbosity is set to `WARNING`. To change the verbosity level, use one of the direct setters. For instance, to change the verbosity to the `INFO` level. ```python import diffusers diffusers.logging.set_verbosity_info() ``` You can also use the environment variable `DIFFUSERS_VERBOSITY` to override the default verbosity. You can set it to one of the following: `debug`, `info`, `warning`, `error`, `critical`. For example: ```bash DIFFUSERS_VERBOSITY=error ./myprogram.py ``` Additionally, some `warnings` can be disabled by setting the environment variable `DIFFUSERS_NO_ADVISORY_WARNINGS` to a true value, like `1`. This disables any warning logged by [`logger.warning_advice`]. For example: ```bash DIFFUSERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py ``` Here is an example of how to use the same logger as the library in your own module or script: ```python from diffusers.utils import logging logging.set_verbosity_info() logger = logging.get_logger("diffusers") logger.info("INFO") logger.warning("WARN") ``` All methods of the logging module are documented below. The main methods are [`logging.get_verbosity`] to get the current level of verbosity in the logger and [`logging.set_verbosity`] to set the verbosity to the level of your choice. In order from the least verbose to the most verbose: | Method | Integer value | Description | |----------------------------------------------------------:|--------------:|----------------------------------------------------:| | `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` | 50 | only report the most critical errors | | `diffusers.logging.ERROR` | 40 | only report errors | | `diffusers.logging.WARNING` or `diffusers.logging.WARN` | 30 | only report errors and warnings (default) | | `diffusers.logging.INFO` | 20 | only report errors, warnings, and basic information | | `diffusers.logging.DEBUG` | 10 | report all information | By default, `tqdm` progress bars are displayed during model download. [`logging.disable_progress_bar`] and [`logging.enable_progress_bar`] are used to enable or disable this behavior. ## Base setters [[autodoc]] utils.logging.set_verbosity_error [[autodoc]] utils.logging.set_verbosity_warning [[autodoc]] utils.logging.set_verbosity_info [[autodoc]] utils.logging.set_verbosity_debug ## Other functions [[autodoc]] utils.logging.get_verbosity [[autodoc]] utils.logging.set_verbosity [[autodoc]] utils.logging.get_logger [[autodoc]] utils.logging.enable_default_handler [[autodoc]] utils.logging.disable_default_handler [[autodoc]] utils.logging.enable_explicit_format [[autodoc]] utils.logging.reset_format [[autodoc]] utils.logging.enable_progress_bar [[autodoc]] utils.logging.disable_progress_bar
diffusers/docs/source/en/api/logging.md/0
{ "file_path": "diffusers/docs/source/en/api/logging.md", "repo_id": "diffusers", "token_count": 1351 }
100
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky 2.2 Kandinsky 2.2 is created by [Arseniy Shakhmatov](https://github.com/cene555), [Anton Razzhigaev](https://github.com/razzant), [Aleksandr Nikolich](https://github.com/AlexWortega), [Vladimir Arkhipkin](https://github.com/oriBetelgeuse), [Igor Pavlov](https://github.com/boomb0om), [Andrey Kuznetsov](https://github.com/kuznetsoffandrey), and [Denis Dimitrov](https://github.com/denndimitrov). The description from it's GitHub page is: *Kandinsky 2.2 brings substantial improvements upon its predecessor, Kandinsky 2.1, by introducing a new, more powerful image encoder - CLIP-ViT-G and the ControlNet support. The switch to CLIP-ViT-G as the image encoder significantly increases the model's capability to generate more aesthetic pictures and better understand text, thus enhancing the model's overall performance. The addition of the ControlNet mechanism allows the model to effectively control the process of generating images. This leads to more accurate and visually appealing outputs and opens new possibilities for text-guided image manipulation.* The original codebase can be found at [ai-forever/Kandinsky-2](https://github.com/ai-forever/Kandinsky-2). <Tip> Check out the [Kandinsky Community](https://huggingface.co/kandinsky-community) organization on the Hub for the official model checkpoints for tasks like text-to-image, image-to-image, and inpainting. </Tip> <Tip> Make sure to check out the schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## KandinskyV22PriorPipeline [[autodoc]] KandinskyV22PriorPipeline - all - __call__ - interpolate ## KandinskyV22Pipeline [[autodoc]] KandinskyV22Pipeline - all - __call__ ## KandinskyV22CombinedPipeline [[autodoc]] KandinskyV22CombinedPipeline - all - __call__ ## KandinskyV22ControlnetPipeline [[autodoc]] KandinskyV22ControlnetPipeline - all - __call__ ## KandinskyV22PriorEmb2EmbPipeline [[autodoc]] KandinskyV22PriorEmb2EmbPipeline - all - __call__ - interpolate ## KandinskyV22Img2ImgPipeline [[autodoc]] KandinskyV22Img2ImgPipeline - all - __call__ ## KandinskyV22Img2ImgCombinedPipeline [[autodoc]] KandinskyV22Img2ImgCombinedPipeline - all - __call__ ## KandinskyV22ControlnetImg2ImgPipeline [[autodoc]] KandinskyV22ControlnetImg2ImgPipeline - all - __call__ ## KandinskyV22InpaintPipeline [[autodoc]] KandinskyV22InpaintPipeline - all - __call__ ## KandinskyV22InpaintCombinedPipeline [[autodoc]] KandinskyV22InpaintCombinedPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/kandinsky_v22.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/kandinsky_v22.md", "repo_id": "diffusers", "token_count": 1050 }
101
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # SDXL Turbo Stable Diffusion XL (SDXL) Turbo was proposed in [Adversarial Diffusion Distillation](https://stability.ai/research/adversarial-diffusion-distillation) by Axel Sauer, Dominik Lorenz, Andreas Blattmann, and Robin Rombach. The abstract from the paper is: *We introduce Adversarial Diffusion Distillation (ADD), a novel training approach that efficiently samples large-scale foundational image diffusion models in just 1–4 steps while maintaining high image quality. We use score distillation to leverage large-scale off-the-shelf image diffusion models as a teacher signal in combination with an adversarial loss to ensure high image fidelity even in the low-step regime of one or two sampling steps. Our analyses show that our model clearly outperforms existing few-step methods (GANs,Latent Consistency Models) in a single step and reaches the performance of state-of-the-art diffusion models (SDXL) in only four steps. ADD is the first method to unlock single-step, real-time image synthesis with foundation models.* ## Tips - SDXL Turbo uses the exact same architecture as [SDXL](./stable_diffusion_xl), which means it also has the same API. Please refer to the [SDXL](./stable_diffusion_xl) API reference for more details. - SDXL Turbo should disable guidance scale by setting `guidance_scale=0.0`. - SDXL Turbo should use `timestep_spacing='trailing'` for the scheduler and use between 1 and 4 steps. - SDXL Turbo has been trained to generate images of size 512x512. - SDXL Turbo is open-access, but not open-source meaning that one might have to buy a model license in order to use it for commercial applications. Make sure to read the [official model card](https://huggingface.co/stabilityai/sdxl-turbo) to learn more. <Tip> To learn how to use SDXL Turbo for various tasks, how to optimize performance, and other usage examples, take a look at the [SDXL Turbo](../../../using-diffusers/sdxl_turbo) guide. Check out the [Stability AI](https://huggingface.co/stabilityai) Hub organization for the official base and refiner model checkpoints! </Tip>
diffusers/docs/source/en/api/pipelines/stable_diffusion/sdxl_turbo.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/sdxl_turbo.md", "repo_id": "diffusers", "token_count": 677 }
102
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ConsistencyDecoderScheduler This scheduler is a part of the [`ConsistencyDecoderPipeline`] and was introduced in [DALL-E 3](https://openai.com/dall-e-3). The original codebase can be found at [openai/consistency_models](https://github.com/openai/consistency_models). ## ConsistencyDecoderScheduler [[autodoc]] schedulers.scheduling_consistency_decoder.ConsistencyDecoderScheduler
diffusers/docs/source/en/api/schedulers/consistency_decoder.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/consistency_decoder.md", "repo_id": "diffusers", "token_count": 274 }
103
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # IPNDMScheduler `IPNDMScheduler` is a fourth-order Improved Pseudo Linear Multistep scheduler. The original implementation can be found at [crowsonkb/v-diffusion-pytorch](https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296). ## IPNDMScheduler [[autodoc]] IPNDMScheduler ## SchedulerOutput [[autodoc]] schedulers.scheduling_utils.SchedulerOutput
diffusers/docs/source/en/api/schedulers/ipndm.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/ipndm.md", "repo_id": "diffusers", "token_count": 295 }
104
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ONNX Runtime 🤗 [Optimum](https://github.com/huggingface/optimum) provides a Stable Diffusion pipeline compatible with ONNX Runtime. You'll need to install 🤗 Optimum with the following command for ONNX Runtime support: ```bash pip install -q optimum["onnxruntime"] ``` This guide will show you how to use the Stable Diffusion and Stable Diffusion XL (SDXL) pipelines with ONNX Runtime. ## Stable Diffusion To load and run inference, use the [`~optimum.onnxruntime.ORTStableDiffusionPipeline`]. If you want to load a PyTorch model and convert it to the ONNX format on-the-fly, set `export=True`: ```python from optimum.onnxruntime import ORTStableDiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] pipeline.save_pretrained("./onnx-stable-diffusion-v1-5") ``` <Tip warning={true}> Generating multiple prompts in a batch seems to take too much memory. While we look into it, you may need to iterate instead of batching. </Tip> To export the pipeline in the ONNX format offline and use it later for inference, use the [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) command: ```bash optimum-cli export onnx --model runwayml/stable-diffusion-v1-5 sd_v15_onnx/ ``` Then to perform inference (you don't have to specify `export=True` again): ```python from optimum.onnxruntime import ORTStableDiffusionPipeline model_id = "sd_v15_onnx" pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/stable_diffusion_v1_5_ort_sail_boat.png"> </div> You can find more examples in 🤗 Optimum [documentation](https://huggingface.co/docs/optimum/), and Stable Diffusion is supported for text-to-image, image-to-image, and inpainting. ## Stable Diffusion XL To load and run inference with SDXL, use the [`~optimum.onnxruntime.ORTStableDiffusionXLPipeline`]: ```python from optimum.onnxruntime import ORTStableDiffusionXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = ORTStableDiffusionXLPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Leonardo da Vinci" image = pipeline(prompt).images[0] ``` To export the pipeline in the ONNX format and use it later for inference, use the [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) command: ```bash optimum-cli export onnx --model stabilityai/stable-diffusion-xl-base-1.0 --task stable-diffusion-xl sd_xl_onnx/ ``` SDXL in the ONNX format is supported for text-to-image and image-to-image.
diffusers/docs/source/en/optimization/onnx.md/0
{ "file_path": "diffusers/docs/source/en/optimization/onnx.md", "repo_id": "diffusers", "token_count": 1193 }
105
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky 2.2 <Tip warning={true}> This script is experimental, and it's easy to overfit and run into issues like catastrophic forgetting. Try exploring different hyperparameters to get the best results on your dataset. </Tip> Kandinsky 2.2 is a multilingual text-to-image model capable of producing more photorealistic images. The model includes an image prior model for creating image embeddings from text prompts, and a decoder model that generates images based on the prior model's embeddings. That's why you'll find two separate scripts in Diffusers for Kandinsky 2.2, one for training the prior model and one for training the decoder model. You can train both models separately, but to get the best results, you should train both the prior and decoder models. Depending on your GPU, you may need to enable `gradient_checkpointing` (⚠️ not supported for the prior model!), `mixed_precision`, and `gradient_accumulation_steps` to help fit the model into memory and to speedup training. You can reduce your memory-usage even more by enabling memory-efficient attention with [xFormers](../optimization/xformers) (version [v0.0.16](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212) fails for training on some GPUs so you may need to install a development version instead). This guide explores the [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py) and the [train_text_to_image_decoder.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py) scripts to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the scripts, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/kandinsky2_2/text_to_image pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training scripts that are important for understanding how to modify it, but it doesn't cover every aspect of the scripts in detail. If you're interested in learning more, feel free to read through the scripts and let us know if you have any questions or concerns. </Tip> ## Script parameters The training scripts provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L190) function. The training scripts provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the fp16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_text_to_image_prior.py \ --mixed_precision="fp16" ``` Most of the parameters are identical to the parameters in the [Text-to-image](text2image#script-parameters) training guide, so let's get straight to a walkthrough of the Kandinsky training scripts! ### Min-SNR weighting The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: ```bash accelerate launch train_text_to_image_prior.py \ --snr_gamma=5.0 ``` ## Training script The training script is also similar to the [Text-to-image](text2image#training-script) training guide, but it's been modified to support training the prior and decoder models. This guide focuses on the code that is unique to the Kandinsky 2.2 training scripts. <hfoptions id="script"> <hfoption id="prior model"> The [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L441) function contains the code for preparing the dataset and training the model. One of the main differences you'll notice right away is that the training script also loads a [`~transformers.CLIPImageProcessor`] - in addition to a scheduler and tokenizer - for preprocessing images and a [`~transformers.CLIPVisionModelWithProjection`] model for encoding the images: ```py noise_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2", prediction_type="sample") image_processor = CLIPImageProcessor.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_processor" ) tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="tokenizer") with ContextManagers(deepspeed_zero_init_disabled_context_manager()): image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() text_encoder = CLIPTextModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="text_encoder", torch_dtype=weight_dtype ).eval() ``` Kandinsky uses a [`PriorTransformer`] to generate the image embeddings, so you'll want to setup the optimizer to learn the prior mode's parameters. ```py prior = PriorTransformer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior") prior.train() optimizer = optimizer_cls( prior.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Next, the input captions are tokenized, and images are [preprocessed](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L632) by the [`~transformers.CLIPImageProcessor`]: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples) return examples ``` Finally, the [training loop](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L718) converts the input images into latents, adds noise to the image embeddings, and makes a prediction: ```py model_pred = prior( noisy_latents, timestep=timesteps, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding ``` If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. </hfoption> <hfoption id="decoder model"> The [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L440) function contains the code for preparing the dataset and training the model. Unlike the prior model, the decoder initializes a [`VQModel`] to decode the latents into images and it uses a [`UNet2DConditionModel`]: ```py with ContextManagers(deepspeed_zero_init_disabled_context_manager()): vae = VQModel.from_pretrained( args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype ).eval() image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") ``` Next, the script includes several image transforms and a [preprocessing](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L622) function for applying the transforms to the images and returning the pixel values: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values return examples ``` Lastly, the [training loop](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L706) handles converting the images to latents, adding noise, and predicting the noise residual. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ```py model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4] ``` </hfoption> </hfoptions> ## Launch the script Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀 You'll train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters, but you can also create and train on your own dataset by following the [Create a dataset for training](create_dataset) guide. Set the environment variable `DATASET_NAME` to the name of the dataset on the Hub or if you're training on your own files, set the environment variable `TRAIN_DIR` to a path to your dataset. If you’re training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. <Tip> To monitor training progress with Weights & Biases, add the `--report_to=wandb` parameter to the training command. You’ll also need to add the `--validation_prompt` to the training command to keep track of results. This can be really useful for debugging the model and viewing intermediate results. </Tip> <hfoptions id="training-inference"> <hfoption id="prior model"> ```bash export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --validation_prompts="A robot naruto, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-prior-naruto-model" ``` </hfoption> <hfoption id="decoder model"> ```bash export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --validation_prompts="A robot naruto, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-decoder-naruto-model" ``` </hfoption> </hfoptions> Once training is finished, you can use your newly trained model for inference! <hfoptions id="training-inference"> <hfoption id="prior model"> ```py from diffusers import AutoPipelineForText2Image, DiffusionPipeline import torch prior_pipeline = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16) prior_components = {"prior_" + k: v for k,v in prior_pipeline.components.items()} pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt="A robot naruto, 4k photo" image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[0] ``` <Tip> Feel free to replace `kandinsky-community/kandinsky-2-2-decoder` with your own trained decoder checkpoint! </Tip> </hfoption> <hfoption id="decoder model"> ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt="A robot naruto, 4k photo" image = pipeline(prompt=prompt).images[0] ``` For the decoder model, you can also perform inference from a saved checkpoint which can be useful for viewing intermediate results. In this case, load the checkpoint into the UNet: ```py from diffusers import AutoPipelineForText2Image, UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("path/to/saved/model" + "/checkpoint-<N>/unet") pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() image = pipeline(prompt="A robot naruto, 4k photo").images[0] ``` </hfoption> </hfoptions> ## Next steps Congratulations on training a Kandinsky 2.2 model! To learn more about how to use your new model, the following guides may be helpful: - Read the [Kandinsky](../using-diffusers/kandinsky) guide to learn how to use it for a variety of different tasks (text-to-image, image-to-image, inpainting, interpolation), and how it can be combined with a ControlNet. - Check out the [DreamBooth](dreambooth) and [LoRA](lora) training guides to learn how to train a personalized Kandinsky model with just a few example images. These two training techniques can even be combined!
diffusers/docs/source/en/training/kandinsky.md/0
{ "file_path": "diffusers/docs/source/en/training/kandinsky.md", "repo_id": "diffusers", "token_count": 5050 }
106
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Pipeline callbacks The denoising loop of a pipeline can be modified with custom defined functions using the `callback_on_step_end` parameter. The callback function is executed at the end of each step, and modifies the pipeline attributes and variables for the next step. This is really useful for *dynamically* adjusting certain pipeline attributes or modifying tensor variables. This versatility allows for interesting use cases such as changing the prompt embeddings at each timestep, assigning different weights to the prompt embeddings, and editing the guidance scale. With callbacks, you can implement new features without modifying the underlying code! > [!TIP] > 🤗 Diffusers currently only supports `callback_on_step_end`, but feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) if you have a cool use-case and require a callback function with a different execution point! This guide will demonstrate how callbacks work by a few features you can implement with them. ## Official callbacks We provide a list of callbacks you can plug into an existing pipeline and modify the denoising loop. This is the current list of official callbacks: - `SDCFGCutoffCallback`: Disables the CFG after a certain number of steps for all SD 1.5 pipelines, including text-to-image, image-to-image, inpaint, and controlnet. - `SDXLCFGCutoffCallback`: Disables the CFG after a certain number of steps for all SDXL pipelines, including text-to-image, image-to-image, inpaint, and controlnet. - `IPAdapterScaleCutoffCallback`: Disables the IP Adapter after a certain number of steps for all pipelines supporting IP-Adapter. > [!TIP] > If you want to add a new official callback, feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) or [submit a PR](https://huggingface.co/docs/diffusers/main/en/conceptual/contribution#how-to-open-a-pr). To set up a callback, you need to specify the number of denoising steps after which the callback comes into effect. You can do so by using either one of these two arguments - `cutoff_step_ratio`: Float number with the ratio of the steps. - `cutoff_step_index`: Integer number with the exact number of the step. ```python import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline from diffusers.callbacks import SDXLCFGCutoffCallback callback = SDXLCFGCutoffCallback(cutoff_step_ratio=0.4) # can also be used with cutoff_step_index # callback = SDXLCFGCutoffCallback(cutoff_step_ratio=None, cutoff_step_index=10) pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, use_karras_sigmas=True) prompt = "a sports car at the road, best quality, high quality, high detail, 8k resolution" generator = torch.Generator(device="cpu").manual_seed(2628670641) out = pipeline( prompt=prompt, negative_prompt="", guidance_scale=6.5, num_inference_steps=25, generator=generator, callback_on_step_end=callback, ) out.images[0].save("official_callback.png") ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/without_cfg_callback.png" alt="generated image of a sports car at the road" /> <figcaption class="mt-2 text-center text-sm text-gray-500">without SDXLCFGCutoffCallback</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/with_cfg_callback.png" alt="generated image of a sports car at the road with cfg callback" /> <figcaption class="mt-2 text-center text-sm text-gray-500">with SDXLCFGCutoffCallback</figcaption> </div> </div> ## Dynamic classifier-free guidance Dynamic classifier-free guidance (CFG) is a feature that allows you to disable CFG after a certain number of inference steps which can help you save compute with minimal cost to performance. The callback function for this should have the following arguments: - `pipeline` (or the pipeline instance) provides access to important properties such as `num_timesteps` and `guidance_scale`. You can modify these properties by updating the underlying attributes. For this example, you'll disable CFG by setting `pipeline._guidance_scale=0.0`. - `step_index` and `timestep` tell you where you are in the denoising loop. Use `step_index` to turn off CFG after reaching 40% of `num_timesteps`. - `callback_kwargs` is a dict that contains tensor variables you can modify during the denoising loop. It only includes variables specified in the `callback_on_step_end_tensor_inputs` argument, which is passed to the pipeline's `__call__` method. Different pipelines may use different sets of variables, so please check a pipeline's `_callback_tensor_inputs` attribute for the list of variables you can modify. Some common variables include `latents` and `prompt_embeds`. For this function, change the batch size of `prompt_embeds` after setting `guidance_scale=0.0` in order for it to work properly. Your callback function should look something like this: ```python def callback_dynamic_cfg(pipe, step_index, timestep, callback_kwargs): # adjust the batch_size of prompt_embeds according to guidance_scale if step_index == int(pipeline.num_timesteps * 0.4): prompt_embeds = callback_kwargs["prompt_embeds"] prompt_embeds = prompt_embeds.chunk(2)[-1] # update guidance_scale and prompt_embeds pipeline._guidance_scale = 0.0 callback_kwargs["prompt_embeds"] = prompt_embeds return callback_kwargs ``` Now, you can pass the callback function to the `callback_on_step_end` parameter and the `prompt_embeds` to `callback_on_step_end_tensor_inputs`. ```py import torch from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipeline = pipeline.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" generator = torch.Generator(device="cuda").manual_seed(1) out = pipeline( prompt, generator=generator, callback_on_step_end=callback_dynamic_cfg, callback_on_step_end_tensor_inputs=['prompt_embeds'] ) out.images[0].save("out_custom_cfg.png") ``` ## Interrupt the diffusion process > [!TIP] > The interruption callback is supported for text-to-image, image-to-image, and inpainting for the [StableDiffusionPipeline](../api/pipelines/stable_diffusion/overview) and [StableDiffusionXLPipeline](../api/pipelines/stable_diffusion/stable_diffusion_xl). Stopping the diffusion process early is useful when building UIs that work with Diffusers because it allows users to stop the generation process if they're unhappy with the intermediate results. You can incorporate this into your pipeline with a callback. This callback function should take the following arguments: `pipeline`, `i`, `t`, and `callback_kwargs` (this must be returned). Set the pipeline's `_interrupt` attribute to `True` to stop the diffusion process after a certain number of steps. You are also free to implement your own custom stopping logic inside the callback. In this example, the diffusion process is stopped after 10 steps even though `num_inference_steps` is set to 50. ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipeline.enable_model_cpu_offload() num_inference_steps = 50 def interrupt_callback(pipeline, i, t, callback_kwargs): stop_idx = 10 if i == stop_idx: pipeline._interrupt = True return callback_kwargs pipeline( "A photo of a cat", num_inference_steps=num_inference_steps, callback_on_step_end=interrupt_callback, ) ``` ## Display image after each generation step > [!TIP] > This tip was contributed by [asomoza](https://github.com/asomoza). Display an image after each generation step by accessing and converting the latents after each step into an image. The latent space is compressed to 128x128, so the images are also 128x128 which is useful for a quick preview. 1. Use the function below to convert the SDXL latents (4 channels) to RGB tensors (3 channels) as explained in the [Explaining the SDXL latent space](https://huggingface.co/blog/TimothyAlexisVass/explaining-the-sdxl-latent-space) blog post. ```py def latents_to_rgb(latents): weights = ( (60, -60, 25, -70), (60, -5, 15, -50), (60, 10, -5, -35) ) weights_tensor = torch.t(torch.tensor(weights, dtype=latents.dtype).to(latents.device)) biases_tensor = torch.tensor((150, 140, 130), dtype=latents.dtype).to(latents.device) rgb_tensor = torch.einsum("...lxy,lr -> ...rxy", latents, weights_tensor) + biases_tensor.unsqueeze(-1).unsqueeze(-1) image_array = rgb_tensor.clamp(0, 255)[0].byte().cpu().numpy() image_array = image_array.transpose(1, 2, 0) return Image.fromarray(image_array) ``` 2. Create a function to decode and save the latents into an image. ```py def decode_tensors(pipe, step, timestep, callback_kwargs): latents = callback_kwargs["latents"] image = latents_to_rgb(latents) image.save(f"{step}.png") return callback_kwargs ``` 3. Pass the `decode_tensors` function to the `callback_on_step_end` parameter to decode the tensors after each step. You also need to specify what you want to modify in the `callback_on_step_end_tensor_inputs` parameter, which in this case are the latents. ```py from diffusers import AutoPipelineForText2Image import torch from PIL import Image pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") image = pipeline( prompt="A croissant shaped like a cute bear.", negative_prompt="Deformed, ugly, bad anatomy", callback_on_step_end=decode_tensors, callback_on_step_end_tensor_inputs=["latents"], ).images[0] ``` <div class="flex gap-4 justify-center"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/tips_step_0.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">step 0</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/tips_step_19.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">step 19 </figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/tips_step_29.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">step 29</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/tips_step_39.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">step 39</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/tips_step_49.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">step 49</figcaption> </div> </div>
diffusers/docs/source/en/using-diffusers/callback.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/callback.md", "repo_id": "diffusers", "token_count": 3961 }
107
<!--Copyright 2024 Marigold authors and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Marigold Pipelines for Computer Vision Tasks [Marigold](../api/pipelines/marigold) is a novel diffusion-based dense prediction approach, and a set of pipelines for various computer vision tasks, such as monocular depth estimation. This guide will show you how to use Marigold to obtain fast and high-quality predictions for images and videos. Each pipeline supports one Computer Vision task, which takes an input RGB image as input and produces a *prediction* of the modality of interest, such as a depth map of the input image. Currently, the following tasks are implemented: | Pipeline | Predicted Modalities | Demos | |---------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------:| | [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-lcm), [Slow Original Demo (DDIM)](https://huggingface.co/spaces/prs-eth/marigold) | | [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-normals-lcm) | The original checkpoints can be found under the [PRS-ETH](https://huggingface.co/prs-eth/) Hugging Face organization. These checkpoints are meant to work with diffusers pipelines and the [original codebase](https://github.com/prs-eth/marigold). The original code can also be used to train new checkpoints. | Checkpoint | Modality | Comment | |-----------------------------------------------------------------------------------------------|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [prs-eth/marigold-v1-0](https://huggingface.co/prs-eth/marigold-v1-0) | Depth | The first Marigold Depth checkpoint, which predicts *affine-invariant depth* maps. The performance of this checkpoint in benchmarks was studied in the original [paper](https://huggingface.co/papers/2312.02145). Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. Affine-invariant depth prediction has a range of values in each pixel between 0 (near plane) and 1 (far plane); both planes are chosen by the model as part of the inference process. See the `MarigoldImageProcessor` reference for visualization utilities. | | [prs-eth/marigold-depth-lcm-v1-0](https://huggingface.co/prs-eth/marigold-depth-lcm-v1-0) | Depth | The fast Marigold Depth checkpoint, fine-tuned from `prs-eth/marigold-v1-0`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. | | [prs-eth/marigold-normals-v0-1](https://huggingface.co/prs-eth/marigold-normals-v0-1) | Normals | A preview checkpoint for the Marigold Normals pipeline. Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. The surface normals predictions are unit-length 3D vectors with values in the range from -1 to 1. *This checkpoint will be phased out after the release of `v1-0` version.* | | [prs-eth/marigold-normals-lcm-v0-1](https://huggingface.co/prs-eth/marigold-normals-lcm-v0-1) | Normals | The fast Marigold Normals checkpoint, fine-tuned from `prs-eth/marigold-normals-v0-1`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. *This checkpoint will be phased out after the release of `v1-0` version.* | The examples below are mostly given for depth prediction, but they can be universally applied with other supported modalities. We showcase the predictions using the same input image of Albert Einstein generated by Midjourney. This makes it easier to compare visualizations of the predictions across various modalities and checkpoints. <div class="flex gap-4" style="justify-content: center; width: 100%;"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://marigoldmonodepth.github.io/images/einstein.jpg"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Example input image for all Marigold pipelines </figcaption> </div> </div> ### Depth Prediction Quick Start To get the first depth prediction, load `prs-eth/marigold-depth-lcm-v1-0` checkpoint into `MarigoldDepthPipeline` pipeline, put the image through the pipeline, and save the predictions: ```python import diffusers import torch pipe = diffusers.MarigoldDepthPipeline.from_pretrained( "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16 ).to("cuda") image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") depth = pipe(image) vis = pipe.image_processor.visualize_depth(depth.prediction) vis[0].save("einstein_depth.png") depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction) depth_16bit[0].save("einstein_depth_16bit.png") ``` The visualization function for depth [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_depth`] applies one of [matplotlib's colormaps](https://matplotlib.org/stable/users/explain/colors/colormaps.html) (`Spectral` by default) to map the predicted pixel values from a single-channel `[0, 1]` depth range into an RGB image. With the `Spectral` colormap, pixels with near depth are painted red, and far pixels are assigned blue color. The 16-bit PNG file stores the single channel values mapped linearly from the `[0, 1]` range into `[0, 65535]`. Below are the raw and the visualized predictions; as can be seen, dark areas (mustache) are easier to distinguish in the visualization: <div class="flex gap-4"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth_16bit.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Predicted depth (16-bit PNG) </figcaption> </div> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Predicted depth visualization (Spectral) </figcaption> </div> </div> ### Surface Normals Prediction Quick Start Load `prs-eth/marigold-normals-lcm-v0-1` checkpoint into `MarigoldNormalsPipeline` pipeline, put the image through the pipeline, and save the predictions: ```python import diffusers import torch pipe = diffusers.MarigoldNormalsPipeline.from_pretrained( "prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16 ).to("cuda") image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") normals = pipe(image) vis = pipe.image_processor.visualize_normals(normals.prediction) vis[0].save("einstein_normals.png") ``` The visualization function for normals [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_normals`] maps the three-dimensional prediction with pixel values in the range `[-1, 1]` into an RGB image. The visualization function supports flipping surface normals axes to make the visualization compatible with other choices of the frame of reference. Conceptually, each pixel is painted according to the surface normal vector in the frame of reference, where `X` axis points right, `Y` axis points up, and `Z` axis points at the viewer. Below is the visualized prediction: <div class="flex gap-4" style="justify-content: center; width: 100%;"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_normals.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Predicted surface normals visualization </figcaption> </div> </div> In this example, the nose tip almost certainly has a point on the surface, in which the surface normal vector points straight at the viewer, meaning that its coordinates are `[0, 0, 1]`. This vector maps to the RGB `[128, 128, 255]`, which corresponds to the violet-blue color. Similarly, a surface normal on the cheek in the right part of the image has a large `X` component, which increases the red hue. Points on the shoulders pointing up with a large `Y` promote green color. ### Speeding up inference The above quick start snippets are already optimized for speed: they load the LCM checkpoint, use the `fp16` variant of weights and computation, and perform just one denoising diffusion step. The `pipe(image)` call completes in 280ms on RTX 3090 GPU. Internally, the input image is encoded with the Stable Diffusion VAE encoder, then the U-Net performs one denoising step, and finally, the prediction latent is decoded with the VAE decoder into pixel space. In this case, two out of three module calls are dedicated to converting between pixel and latent space of LDM. Because Marigold's latent space is compatible with the base Stable Diffusion, it is possible to speed up the pipeline call by more than 3x (85ms on RTX 3090) by using a [lightweight replacement of the SD VAE](../api/models/autoencoder_tiny): ```diff import diffusers import torch pipe = diffusers.MarigoldDepthPipeline.from_pretrained( "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16 ).to("cuda") + pipe.vae = diffusers.AutoencoderTiny.from_pretrained( + "madebyollin/taesd", torch_dtype=torch.float16 + ).cuda() image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") depth = pipe(image) ``` As suggested in [Optimizations](../optimization/torch2.0#torch.compile), adding `torch.compile` may squeeze extra performance depending on the target hardware: ```diff import diffusers import torch pipe = diffusers.MarigoldDepthPipeline.from_pretrained( "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16 ).to("cuda") + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") depth = pipe(image) ``` ## Qualitative Comparison with Depth Anything With the above speed optimizations, Marigold delivers predictions with more details and faster than [Depth Anything](https://huggingface.co/docs/transformers/main/en/model_doc/depth_anything) with the largest checkpoint [LiheYoung/depth-anything-large-hf](https://huggingface.co/LiheYoung/depth-anything-large-hf): <div class="flex gap-4"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Marigold LCM fp16 with Tiny AutoEncoder </figcaption> </div> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/einstein_depthanything_large.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Depth Anything Large </figcaption> </div> </div> ## Maximizing Precision and Ensembling Marigold pipelines have a built-in ensembling mechanism combining multiple predictions from different random latents. This is a brute-force way of improving the precision of predictions, capitalizing on the generative nature of diffusion. The ensembling path is activated automatically when the `ensemble_size` argument is set greater than `1`. When aiming for maximum precision, it makes sense to adjust `num_inference_steps` simultaneously with `ensemble_size`. The recommended values vary across checkpoints but primarily depend on the scheduler type. The effect of ensembling is particularly well-seen with surface normals: ```python import diffusers model_path = "prs-eth/marigold-normals-v1-0" model_paper_kwargs = { diffusers.schedulers.DDIMScheduler: { "num_inference_steps": 10, "ensemble_size": 10, }, diffusers.schedulers.LCMScheduler: { "num_inference_steps": 4, "ensemble_size": 5, }, } image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(model_path).to("cuda") pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)] depth = pipe(image, **pipe_kwargs) vis = pipe.image_processor.visualize_normals(depth.prediction) vis[0].save("einstein_normals.png") ``` <div class="flex gap-4"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_normals.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Surface normals, no ensembling </figcaption> </div> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Surface normals, with ensembling </figcaption> </div> </div> As can be seen, all areas with fine-grained structurers, such as hair, got more conservative and on average more correct predictions. Such a result is more suitable for precision-sensitive downstream tasks, such as 3D reconstruction. ## Quantitative Evaluation To evaluate Marigold quantitatively in standard leaderboards and benchmarks (such as NYU, KITTI, and other datasets), follow the evaluation protocol outlined in the paper: load the full precision fp32 model and use appropriate values for `num_inference_steps` and `ensemble_size`. Optionally seed randomness to ensure reproducibility. Maximizing `batch_size` will deliver maximum device utilization. ```python import diffusers import torch device = "cuda" seed = 2024 model_path = "prs-eth/marigold-v1-0" model_paper_kwargs = { diffusers.schedulers.DDIMScheduler: { "num_inference_steps": 50, "ensemble_size": 10, }, diffusers.schedulers.LCMScheduler: { "num_inference_steps": 4, "ensemble_size": 10, }, } image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") generator = torch.Generator(device=device).manual_seed(seed) pipe = diffusers.MarigoldDepthPipeline.from_pretrained(model_path).to(device) pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)] depth = pipe(image, generator=generator, **pipe_kwargs) # evaluate metrics ``` ## Using Predictive Uncertainty The ensembling mechanism built into Marigold pipelines combines multiple predictions obtained from different random latents. As a side effect, it can be used to quantify epistemic (model) uncertainty; simply specify `ensemble_size` greater than 1 and set `output_uncertainty=True`. The resulting uncertainty will be available in the `uncertainty` field of the output. It can be visualized as follows: ```python import diffusers import torch pipe = diffusers.MarigoldDepthPipeline.from_pretrained( "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16 ).to("cuda") image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") depth = pipe( image, ensemble_size=10, # any number greater than 1; higher values yield higher precision output_uncertainty=True, ) uncertainty = pipe.image_processor.visualize_uncertainty(depth.uncertainty) uncertainty[0].save("einstein_depth_uncertainty.png") ``` <div class="flex gap-4"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_depth_uncertainty.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Depth uncertainty </figcaption> </div> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals_uncertainty.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Surface normals uncertainty </figcaption> </div> </div> The interpretation of uncertainty is easy: higher values (white) correspond to pixels, where the model struggles to make consistent predictions. Evidently, the depth model is the least confident around edges with discontinuity, where the object depth changes drastically. The surface normals model is the least confident in fine-grained structures, such as hair, and dark areas, such as the collar. ## Frame-by-frame Video Processing with Temporal Consistency Due to Marigold's generative nature, each prediction is unique and defined by the random noise sampled for the latent initialization. This becomes an obvious drawback compared to traditional end-to-end dense regression networks, as exemplified in the following videos: <div class="flex gap-4"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama.gif"/> <figcaption class="mt-1 text-center text-sm text-gray-500">Input video</figcaption> </div> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_independent.gif"/> <figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth applied to input video frames independently</figcaption> </div> </div> To address this issue, it is possible to pass `latents` argument to the pipelines, which defines the starting point of diffusion. Empirically, we found that a convex combination of the very same starting point noise latent and the latent corresponding to the previous frame prediction give sufficiently smooth results, as implemented in the snippet below: ```python import imageio from PIL import Image from tqdm import tqdm import diffusers import torch device = "cuda" path_in = "obama.mp4" path_out = "obama_depth.gif" pipe = diffusers.MarigoldDepthPipeline.from_pretrained( "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16 ).to(device) pipe.vae = diffusers.AutoencoderTiny.from_pretrained( "madebyollin/taesd", torch_dtype=torch.float16 ).to(device) pipe.set_progress_bar_config(disable=True) with imageio.get_reader(path_in) as reader: size = reader.get_meta_data()['size'] last_frame_latent = None latent_common = torch.randn( (1, 4, 768 * size[1] // (8 * max(size)), 768 * size[0] // (8 * max(size))) ).to(device=device, dtype=torch.float16) out = [] for frame_id, frame in tqdm(enumerate(reader), desc="Processing Video"): frame = Image.fromarray(frame) latents = latent_common if last_frame_latent is not None: latents = 0.9 * latents + 0.1 * last_frame_latent depth = pipe( frame, match_input_resolution=False, latents=latents, output_latent=True ) last_frame_latent = depth.latent out.append(pipe.image_processor.visualize_depth(depth.prediction)[0]) diffusers.utils.export_to_gif(out, path_out, fps=reader.get_meta_data()['fps']) ``` Here, the diffusion process starts from the given computed latent. The pipeline sets `output_latent=True` to access `out.latent` and computes its contribution to the next frame's latent initialization. The result is much more stable now: <div class="flex gap-4"> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_independent.gif"/> <figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth applied to input video frames independently</figcaption> </div> <div style="flex: 1 1 50%; max-width: 50%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_consistent.gif"/> <figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth with forced latents initialization</figcaption> </div> </div> ## Marigold for ControlNet A very common application for depth prediction with diffusion models comes in conjunction with ControlNet. Depth crispness plays a crucial role in obtaining high-quality results from ControlNet. As seen in comparisons with other methods above, Marigold excels at that task. The snippet below demonstrates how to load an image, compute depth, and pass it into ControlNet in a compatible format: ```python import torch import diffusers device = "cuda" generator = torch.Generator(device=device).manual_seed(2024) image = diffusers.utils.load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_depth_source.png" ) pipe = diffusers.MarigoldDepthPipeline.from_pretrained( "prs-eth/marigold-depth-lcm-v1-0", torch_dtype=torch.float16, variant="fp16" ).to(device) depth_image = pipe(image, generator=generator).prediction depth_image = pipe.image_processor.visualize_depth(depth_image, color_map="binary") depth_image[0].save("motorcycle_controlnet_depth.png") controlnet = diffusers.ControlNetModel.from_pretrained( "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" ).to(device) pipe = diffusers.StableDiffusionXLControlNetPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", controlnet=controlnet ).to(device) pipe.scheduler = diffusers.DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True) controlnet_out = pipe( prompt="high quality photo of a sports bike, city", negative_prompt="", guidance_scale=6.5, num_inference_steps=25, image=depth_image, controlnet_conditioning_scale=0.7, control_guidance_end=0.7, generator=generator, ).images controlnet_out[0].save("motorcycle_controlnet_out.png") ``` <div class="flex gap-4"> <div style="flex: 1 1 33%; max-width: 33%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_depth_source.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Input image </figcaption> </div> <div style="flex: 1 1 33%; max-width: 33%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/motorcycle_controlnet_depth.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> Depth in the format compatible with ControlNet </figcaption> </div> <div style="flex: 1 1 33%; max-width: 33%;"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/motorcycle_controlnet_out.png"/> <figcaption class="mt-1 text-center text-sm text-gray-500"> ControlNet generation, conditioned on depth and prompt: "high quality photo of a sports bike, city" </figcaption> </div> </div> Hopefully, you will find Marigold useful for solving your downstream tasks, be it a part of a more broad generative workflow, or a perception task, such as 3D reconstruction.
diffusers/docs/source/en/using-diffusers/marigold_usage.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/marigold_usage.md", "repo_id": "diffusers", "token_count": 9939 }
108
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual inversion [[open-in-colab]] The [`StableDiffusionPipeline`] supports textual inversion, a technique that enables a model like Stable Diffusion to learn a new concept from just a few sample images. This gives you more control over the generated images and allows you to tailor the model towards specific concepts. You can get started quickly with a collection of community created concepts in the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer). This guide will show you how to run inference with textual inversion using a pre-learned concept from the Stable Diffusion Conceptualizer. If you're interested in teaching a model new concepts with textual inversion, take a look at the [Textual Inversion](../training/text_inversion) training guide. Import the necessary libraries: ```py import torch from diffusers import StableDiffusionPipeline from diffusers.utils import make_image_grid ``` ## Stable Diffusion 1 and 2 Pick a Stable Diffusion checkpoint and a pre-learned concept from the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer): ```py pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5" repo_id_embeds = "sd-concepts-library/cat-toy" ``` Now you can load a pipeline, and pass the pre-learned concept to it: ```py pipeline = StableDiffusionPipeline.from_pretrained( pretrained_model_name_or_path, torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline.load_textual_inversion(repo_id_embeds) ``` Create a prompt with the pre-learned concept by using the special placeholder token `<cat-toy>`, and choose the number of samples and rows of images you'd like to generate: ```py prompt = "a grafitti in a favela wall with a <cat-toy> on it" num_samples_per_row = 2 num_rows = 2 ``` Then run the pipeline (feel free to adjust the parameters like `num_inference_steps` and `guidance_scale` to see how they affect image quality), save the generated images and visualize them with the helper function you created at the beginning: ```py all_images = [] for _ in range(num_rows): images = pipeline(prompt, num_images_per_prompt=num_samples_per_row, num_inference_steps=50, guidance_scale=7.5).images all_images.extend(images) grid = make_image_grid(all_images, num_rows, num_samples_per_row) grid ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png"> </div> ## Stable Diffusion XL Stable Diffusion XL (SDXL) can also use textual inversion vectors for inference. In contrast to Stable Diffusion 1 and 2, SDXL has two text encoders so you'll need two textual inversion embeddings - one for each text encoder model. Let's download the SDXL textual inversion embeddings and have a closer look at it's structure: ```py from huggingface_hub import hf_hub_download from safetensors.torch import load_file file = hf_hub_download("dn118/unaestheticXL", filename="unaestheticXLv31.safetensors") state_dict = load_file(file) state_dict ``` ``` {'clip_g': tensor([[ 0.0077, -0.0112, 0.0065, ..., 0.0195, 0.0159, 0.0275], ..., [-0.0170, 0.0213, 0.0143, ..., -0.0302, -0.0240, -0.0362]], 'clip_l': tensor([[ 0.0023, 0.0192, 0.0213, ..., -0.0385, 0.0048, -0.0011], ..., [ 0.0475, -0.0508, -0.0145, ..., 0.0070, -0.0089, -0.0163]], ``` There are two tensors, `"clip_g"` and `"clip_l"`. `"clip_g"` corresponds to the bigger text encoder in SDXL and refers to `pipe.text_encoder_2` and `"clip_l"` refers to `pipe.text_encoder`. Now you can load each tensor separately by passing them along with the correct text encoder and tokenizer to [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`]: ```py from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16) pipe.to("cuda") pipe.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2) pipe.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer) # the embedding should be used as a negative embedding, so we pass it as a negative prompt generator = torch.Generator().manual_seed(33) image = pipe("a woman standing in front of a mountain", negative_prompt="unaestheticXLv31", generator=generator).images[0] image ```
diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md", "repo_id": "diffusers", "token_count": 1716 }
109
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 철학 [[philosophy]] 🧨 Diffusers는 다양한 모달리티에서 **최신의** 사전 훈련된 diffusion 모델을 제공합니다. 그 목적은 추론과 훈련을 위한 **모듈식 툴박스**로 사용되는 것입니다. 저희는 시간이 지나도 변치 않는 라이브러리를 구축하는 것을 목표로 하기에 API 설계를 매우 중요하게 생각합니다. 간단히 말해서, Diffusers는 PyTorch를 자연스럽게 확장할 수 있도록 만들어졌습니다. 따라서 대부분의 설계 선택은 [PyTorch의 설계 원칙](https://pytorch.org/docs/stable/community/design.html#pytorch-design-philosophy)에 기반합니다. 이제 가장 중요한 것들을 살펴보겠습니다: ## 성능보다는 사용성을 [[usability-over-performance]] - Diffusers는 다양한 성능 향상 기능이 내장되어 있지만 (자세한 내용은 [메모리와 속도](https://huggingface.co/docs/diffusers/optimization/fp16) 참조), 모델은 항상 가장 높은 정밀도와 최소한의 최적화로 로드됩니다. 따라서 사용자가 별도로 정의하지 않는 한 기본적으로 diffusion 파이프라인은 항상 float32 정밀도로 CPU에 인스턴스화됩니다. 이는 다양한 플랫폼과 가속기에서의 사용성을 보장하며, 라이브러리를 실행하기 위해 복잡한 설치가 필요하지 않다는 것을 의미합니다. - Diffusers는 **가벼운** 패키지를 지향하기 때문에 필수 종속성은 거의 없지만 성능을 향상시킬 수 있는 많은 선택적 종속성이 있습니다 (`accelerate`, `safetensors`, `onnx` 등). 저희는 라이브러리를 가능한 한 가볍게 유지하여 다른 패키지에 대한 종속성 걱정이 없도록 노력하고 있습니다. - Diffusers는 간결하고 이해하기 쉬운 코드를 선호합니다. 이는 람다 함수나 고급 PyTorch 연산자와 같은 압축된 코드 구문을 자주 사용하지 않는 것을 의미합니다. ## 쉬움보다는 간단함을 [[simple-over-easy]] PyTorch에서는 **명시적인 것이 암시적인 것보다 낫다**와 **단순한 것이 복잡한 것보다 낫다**라고 말합니다. 이 설계 철학은 라이브러리의 여러 부분에 반영되어 있습니다: - [`DiffusionPipeline.to`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.to)와 같은 메소드를 사용하여 사용자가 장치 관리를 할 수 있도록 PyTorch의 API를 따릅니다. - 잘못된 입력을 조용히 수정하는 대신 간결한 오류 메시지를 발생시키는 것이 우선입니다. Diffusers는 라이브러리를 가능한 한 쉽게 사용할 수 있도록 하는 것보다 사용자를 가르치는 것을 목표로 합니다. - 복잡한 모델과 스케줄러 로직이 내부에서 마법처럼 처리하는 대신 노출됩니다. 스케줄러/샘플러는 서로에게 최소한의 종속성을 가지고 분리되어 있습니다. 이로써 사용자는 언롤된 노이즈 제거 루프를 작성해야 합니다. 그러나 이 분리는 디버깅을 더 쉽게하고 노이즈 제거 과정을 조정하거나 diffusers 모델이나 스케줄러를 교체하는 데 사용자에게 더 많은 제어권을 제공합니다. - diffusers 파이프라인의 따로 훈련된 구성 요소인 text encoder, unet 및 variational autoencoder는 각각 자체 모델 클래스를 갖습니다. 이로써 사용자는 서로 다른 모델의 구성 요소 간의 상호 작용을 처리해야 하며, 직렬화 형식은 모델 구성 요소를 다른 파일로 분리합니다. 그러나 이는 디버깅과 커스터마이징을 더 쉽게합니다. DreamBooth나 Textual Inversion 훈련은 Diffusers의 'diffusion 파이프라인의 단일 구성 요소들을 분리할 수 있는 능력' 덕분에 매우 간단합니다. ## 추상화보다는 수정 가능하고 기여하기 쉬움을 [[tweakable-contributor-friendly-over-abstraction]] 라이브러리의 대부분에 대해 Diffusers는 [Transformers 라이브러리](https://github.com/huggingface/transformers)의 중요한 설계 원칙을 채택합니다, 바로 성급한 추상화보다는 copy-pasted 코드를 선호한다는 것입니다. 이 설계 원칙은 [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself)와 같은 인기 있는 설계 원칙과는 대조적으로 매우 의견이 분분한데요. 간단히 말해서, Transformers가 모델링 파일에 대해 수행하는 것처럼, Diffusers는 매우 낮은 수준의 추상화와 매우 독립적인 코드를 유지하는 것을 선호합니다. 함수, 긴 코드 블록, 심지어 클래스도 여러 파일에 복사할 수 있으며, 이는 처음에는 라이브러리를 유지할 수 없게 만드는 나쁜, 서투른 설계 선택으로 보일 수 있습니다. 하지만 이러한 설계는 매우 성공적이며, 커뮤니티 기반의 오픈 소스 기계 학습 라이브러리에 매우 적합합니다. 그 이유는 다음과 같습니다: - 기계 학습은 패러다임, 모델 아키텍처 및 알고리즘이 빠르게 변화하는 매우 빠르게 움직이는 분야이기 때문에 오랜 기간 지속되는 코드 추상화를 정의하기가 매우 어렵습니다. - 기계 학습 전문가들은 아이디어와 연구를 위해 기존 코드를 빠르게 조정할 수 있어야 하므로, 많은 추상화보다는 독립적인 코드를 선호합니다. - 오픈 소스 라이브러리는 커뮤니티 기여에 의존하므로, 기여하기 쉬운 라이브러리를 구축해야 합니다. 코드가 추상화되면 의존성이 많아지고 읽기 어렵고 기여하기 어려워집니다. 기여자들은 중요한 기능을 망가뜨릴까 두려워하여 매우 추상화된 라이브러리에 기여하지 않게 됩니다. 라이브러리에 기여하는 것이 다른 기본 코드를 망가뜨릴 수 없다면, 잠재적인 새로운 기여자에게 더욱 환영받을 수 있을 뿐만 아니라 여러 부분에 대해 병렬적으로 검토하고 기여하기가 더 쉬워집니다. Hugging Face에서는 이 설계를 **단일 파일 정책**이라고 부르며, 특정 클래스의 대부분의 코드가 단일하고 독립적인 파일에 작성되어야 한다는 의미입니다. 철학에 대해 자세히 알아보려면 [이 블로그 글](https://huggingface.co/blog/transformers-design-philosophy)을 참조할 수 있습니다. Diffusers에서는 이러한 철학을 파이프라인과 스케줄러에 모두 따르지만, diffusion 모델에 대해서는 일부만 따릅니다. 일부만 따르는 이유는 Diffusion 파이프라인인 [DDPM](https://huggingface.co/docs/diffusers/api/pipelines/ddpm), [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview#stable-diffusion-pipelines), [unCLIP (DALL·E 2)](https://huggingface.co/docs/diffusers/api/pipelines/unclip) 및 [Imagen](https://imagen.research.google/) 등 대부분의 diffusion 파이프라인은 동일한 diffusion 모델인 [UNet](https://huggingface.co/docs/diffusers/api/models/unet2d-cond)에 의존하기 때문입니다. 좋아요, 이제 🧨 Diffusers가 설계된 방식을 대략적으로 이해했을 것입니다 🤗. 우리는 이러한 설계 원칙을 일관되게 라이브러리 전체에 적용하려고 노력하고 있습니다. 그럼에도 불구하고 철학에 대한 일부 예외 사항이나 불행한 설계 선택이 있을 수 있습니다. 디자인에 대한 피드백이 있다면 [GitHub에서 직접](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=) 알려주시면 감사하겠습니다. ## 디자인 철학 자세히 알아보기 [[design-philosophy-in-details]] 이제 디자인 철학의 세부 사항을 좀 더 자세히 살펴보겠습니다. Diffusers는 주로 세 가지 주요 클래스로 구성됩니다: [파이프라인](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines), [모델](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models), 그리고 [스케줄러](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). 각 클래스에 대한 더 자세한 설계 결정 사항을 살펴보겠습니다. ### 파이프라인 [[pipelines]] 파이프라인은 사용하기 쉽도록 설계되었으며 (따라서 [*쉬움보다는 간단함을*](#쉬움보다는-간단함을)을 100% 따르지는 않음), feature-complete하지 않으며, 추론을 위한 [모델](#모델)과 [스케줄러](#스케줄러)를 사용하는 방법의 예시로 간주될 수 있습니다. 다음과 같은 설계 원칙을 따릅니다: - 파이프라인은 단일 파일 정책을 따릅니다. 모든 파이프라인은 src/diffusers/pipelines의 개별 디렉토리에 있습니다. 하나의 파이프라인 폴더는 하나의 diffusion 논문/프로젝트/릴리스에 해당합니다. 여러 파이프라인 파일은 하나의 파이프라인 폴더에 모을 수 있습니다. 예를 들어 [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion)에서 그렇게 하고 있습니다. 파이프라인이 유사한 기능을 공유하는 경우, [# Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251)을 사용할 수 있습니다. - 파이프라인은 모두 [`DiffusionPipeline`]을 상속합니다. - 각 파이프라인은 서로 다른 모델 및 스케줄러 구성 요소로 구성되어 있으며, 이는 [`model_index.json` 파일](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json)에 문서화되어 있으며, 파이프라인의 속성 이름과 동일한 이름으로 액세스할 수 있으며, [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) 함수를 통해 파이프라인 간에 공유할 수 있습니다. - 각 파이프라인은 [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) 함수를 통해 로드할 수 있어야 합니다. - 파이프라인은 추론에**만** 사용되어야 합니다. - 파이프라인은 매우 가독성이 좋고, 이해하기 쉽고, 쉽게 조정할 수 있도록 설계되어야 합니다. - 파이프라인은 서로 상호작용하고, 상위 수준 API에 쉽게 통합할 수 있도록 설계되어야 합니다. - 파이프라인은 사용자 인터페이스가 feature-complete하지 않게 하는 것을 목표로 합니다. future-complete한 사용자 인터페이스를 원한다면 [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), [lama-cleaner](https://github.com/Sanster/lama-cleaner)를 참조해야 합니다. - 모든 파이프라인은 오로지 `__call__` 메소드를 통해 실행할 수 있어야 합니다. `__call__` 인자의 이름은 모든 파이프라인에서 공유되어야 합니다. - 파이프라인은 해결하고자 하는 작업의 이름으로 지정되어야 합니다. - 대부분의 경우에 새로운 diffusion 파이프라인은 새로운 파이프라인 폴더/파일에 구현되어야 합니다. ### 모델 [[models]] 모델은 [PyTorch의 Module 클래스](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)의 자연스러운 확장이 되도록, 구성 가능한 툴박스로 설계되었습니다. 그리고 모델은 **단일 파일 정책**을 일부만 따릅니다. 다음과 같은 설계 원칙을 따릅니다: - 모델은 **모델 아키텍처 유형**에 해당합니다. 예를 들어 [`UNet2DConditionModel`] 클래스는 2D 이미지 입력을 기대하고 일부 context에 의존하는 모든 UNet 변형들에 사용됩니다. - 모든 모델은 [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models)에서 찾을 수 있으며, 각 모델 아키텍처는 해당 파일에 정의되어야 합니다. 예를 들어 [`unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py), [`transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformer_2d.py) 등이 있습니다. - 모델은 **단일 파일 정책**을 따르지 않으며, [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py) 등과 같은 작은 모델 구성 요소를 사용해야 합니다. **참고**: 이는 Transformers의 모델링 파일과는 대조적으로 모델이 실제로 단일 파일 정책을 따르지 않음을 보여줍니다. - 모델은 PyTorch의 `Module` 클래스와 마찬가지로 복잡성을 노출하고 명확한 오류 메시지를 제공해야 합니다. - 모든 모델은 `ModelMixin`과 `ConfigMixin`을 상속합니다. - 모델은 주요 코드 변경이 필요하지 않고, 역호환성을 유지하며, 메모리 또는 컴퓨팅과 관련한 중요한 이득을 제공할 때 성능을 위해 최적화할 수 있습니다. - 모델은 기본적으로 가장 높은 정밀도와 가장 낮은 성능 설정을 가져야 합니다. - Diffusers에 이미 있는 모델 아키텍처로 분류할 수 있는 새로운 모델 체크포인트를 통합할 때는 기존 모델 아키텍처를 새로운 체크포인트와 호환되도록 수정해야 합니다. 새로운 파일을 만들어야 하는 경우는 모델 아키텍처가 근본적으로 다른 경우에만 해당합니다. - 모델은 미래의 변경 사항을 쉽게 확장할 수 있도록 설계되어야 합니다. 이는 공개 함수 인수들과 구성 인수들을 제한하고,미래의 변경 사항을 "예상"하는 것을 통해 달성할 수 있습니다. 예를 들어, 불리언 `is_..._type` 인수보다는 새로운 미래 유형에 쉽게 확장할 수 있는 문자열 "...type" 인수를 추가하는 것이 일반적으로 더 좋습니다. 새로운 모델 체크포인트가 작동하도록 하기 위해 기존 아키텍처에 최소한의 변경만을 가해야 합니다. - 모델 디자인은 코드의 가독성과 간결성을 유지하는 것과 많은 모델 체크포인트를 지원하는 것 사이의 어려운 균형 조절입니다. 모델링 코드의 대부분은 새로운 모델 체크포인트를 위해 클래스를 수정하는 것이 좋지만, [UNet 블록](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) 및 [Attention 프로세서](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py)와 같이 코드를 장기적으로 간결하고 읽기 쉽게 유지하기 위해 새로운 클래스를 추가하는 예외도 있습니다. ### 스케줄러 [[schedulers]] 스케줄러는 추론을 위한 노이즈 제거 과정을 안내하고 훈련을 위한 노이즈 스케줄을 정의하는 역할을 합니다. 스케줄러는 개별 클래스로 설계되어 있으며, 로드 가능한 구성 파일과 **단일 파일 정책**을 엄격히 따릅니다. 다음과 같은 설계 원칙을 따릅니다: - 모든 스케줄러는 [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers)에서 찾을 수 있습니다. - 스케줄러는 큰 유틸리티 파일에서 가져오지 **않아야** 하며, 자체 포함성을 유지해야 합니다. - 하나의 스케줄러 Python 파일은 하나의 스케줄러 알고리즘(논문에서 정의된 것과 같은)에 해당합니다. - 스케줄러가 유사한 기능을 공유하는 경우, `# Copied from` 메커니즘을 사용할 수 있습니다. - 모든 스케줄러는 `SchedulerMixin`과 `ConfigMixin`을 상속합니다. - [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) 메소드를 사용하여 스케줄러를 쉽게 교체할 수 있습니다. 자세한 내용은 [여기](../using-diffusers/schedulers.md)에서 설명합니다. - 모든 스케줄러는 `set_num_inference_steps`와 `step` 함수를 가져야 합니다. `set_num_inference_steps(...)`는 각 노이즈 제거 과정(즉, `step(...)`이 호출되기 전) 이전에 호출되어야 합니다. - 각 스케줄러는 모델이 호출될 타임스텝의 배열인 `timesteps` 속성을 통해 루프를 돌 수 있는 타임스텝을 노출합니다. - `step(...)` 함수는 예측된 모델 출력과 "현재" 샘플(x_t)을 입력으로 받고, "이전" 약간 더 노이즈가 제거된 샘플(x_t-1)을 반환합니다. - 노이즈 제거 스케줄러의 복잡성을 고려하여, `step` 함수는 모든 복잡성을 노출하지 않으며, "블랙 박스"일 수 있습니다. - 거의 모든 경우에 새로운 스케줄러는 새로운 스케줄링 파일에 구현되어야 합니다.
diffusers/docs/source/ko/conceptual/philosophy.md/0
{ "file_path": "diffusers/docs/source/ko/conceptual/philosophy.md", "repo_id": "diffusers", "token_count": 12964 }
110
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) (ControlNet)은 Lvmin Zhang과 Maneesh Agrawala에 의해 쓰여졌습니다. 이 예시는 [원본 ControlNet 리포지토리에서 예시 학습하기](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md)에 기반합니다. ControlNet은 원들을 채우기 위해 [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k)을 사용해서 학습됩니다. ## 의존성 설치하기 아래의 스크립트를 실행하기 전에, 라이브러리의 학습 의존성을 설치해야 합니다. <Tip warning={true}> 가장 최신 버전의 예시 스크립트를 성공적으로 실행하기 위해서는, 소스에서 설치하고 최신 버전의 설치를 유지하는 것을 강력하게 추천합니다. 우리는 예시 스크립트들을 자주 업데이트하고 예시에 맞춘 특정한 요구사항을 설치합니다. </Tip> 위 사항을 만족시키기 위해서, 새로운 가상환경에서 다음 일련의 스텝을 실행하세요: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` 그 다음에는 [예시 폴더](https://github.com/huggingface/diffusers/tree/main/examples/controlnet)으로 이동합니다. ```bash cd examples/controlnet ``` 이제 실행하세요: ```bash pip install -r requirements.txt ``` [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화 합니다: ```bash accelerate config ``` 혹은 여러분의 환경이 무엇인지 몰라도 기본적인 🤗Accelerate 구성으로 초기화할 수 있습니다: ```bash accelerate config default ``` 혹은 당신의 환경이 노트북 같은 상호작용하는 쉘을 지원하지 않는다면, 아래의 코드로 초기화 할 수 있습니다: ```python from accelerate.utils import write_basic_config write_basic_config() ``` ## 원을 채우는 데이터셋 원본 데이터셋은 ControlNet [repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip)에 올라와있지만, 우리는 [여기](https://huggingface.co/datasets/fusing/fill50k)에 새롭게 다시 올려서 🤗 Datasets 과 호환가능합니다. 그래서 학습 스크립트 상에서 데이터 불러오기를 다룰 수 있습니다. 우리의 학습 예시는 원래 ControlNet의 학습에 쓰였던 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)을 사용합니다. 그렇지만 ControlNet은 대응되는 어느 Stable Diffusion 모델([`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4)) 혹은 [`stabilityai/stable-diffusion-2-1`](https://huggingface.co/stabilityai/stable-diffusion-2-1)의 증가를 위해 학습될 수 있습니다. 자체 데이터셋을 사용하기 위해서는 [학습을 위한 데이터셋 생성하기](create_dataset) 가이드를 확인하세요. ## 학습 이 학습에 사용될 다음 이미지들을 다운로드하세요: ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` `MODEL_NAME` 환경 변수 (Hub 모델 리포지토리 아이디 혹은 모델 가중치가 있는 디렉토리로 가는 주소)를 명시하고 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인자로 환경변수를 보냅니다. 학습 스크립트는 당신의 리포지토리에 `diffusion_pytorch_model.bin` 파일을 생성하고 저장합니다. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --push_to_hub ``` 이 기본적인 설정으로는 ~38GB VRAM이 필요합니다. 기본적으로 학습 스크립트는 결과를 텐서보드에 기록합니다. 가중치(weight)와 편향(bias)을 사용하기 위해 `--report_to wandb` 를 전달합니다. 더 작은 batch(배치) 크기로 gradient accumulation(기울기 누적)을 하면 학습 요구사항을 ~20 GB VRAM으로 줄일 수 있습니다. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --push_to_hub ``` ## 여러개 GPU로 학습하기 `accelerate` 은 seamless multi-GPU 학습을 고려합니다. `accelerate`과 함께 분산된 학습을 실행하기 위해 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 의 설명을 확인하세요. 아래는 예시 명령어입니다: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --mixed_precision="fp16" \ --tracker_project_name="controlnet-demo" \ --report_to=wandb \ --push_to_hub ``` ## 예시 결과 #### 배치 사이즈 8로 300 스텝 이후: | | | |-------------------|:-------------------------:| | | 푸른 배경과 빨간 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![푸른 배경과 빨간 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_300_steps.png) | | | 갈색 꽃 배경과 청록색 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![갈색 꽃 배경과 청록색 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_300_steps.png) | #### 배치 사이즈 8로 6000 스텝 이후: | | | |-------------------|:-------------------------:| | | 푸른 배경과 빨간 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![푸른 배경과 빨간 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_6000_steps.png) | | | 갈색 꽃 배경과 청록색 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![갈색 꽃 배경과 청록색 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_6000_steps.png) | ## 16GB GPU에서 학습하기 16GB GPU에서 학습하기 위해 다음의 최적화를 진행하세요: - 기울기 체크포인트 저장하기 - bitsandbyte의 [8-bit optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)가 설치되지 않았다면 링크에 연결된 설명서를 보세요. 이제 학습 스크립트를 시작할 수 있습니다: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --push_to_hub ``` ## 12GB GPU에서 학습하기 12GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요: - 기울기 체크포인트 저장하기 - bitsandbyte의 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - 기울기를 `None`으로 설정 ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --push_to_hub ``` `pip install xformers`으로 `xformers`을 확실히 설치하고 `enable_xformers_memory_efficient_attention`을 사용하세요. ## 8GB GPU에서 학습하기 우리는 ControlNet을 지원하기 위한 DeepSpeed를 철저하게 테스트하지 않았습니다. 환경설정이 메모리를 저장할 때, 그 환경이 성공적으로 학습했는지를 확정하지 않았습니다. 성공한 학습 실행을 위해 설정을 변경해야 할 가능성이 높습니다. 8GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요: - 기울기 체크포인트 저장하기 - bitsandbyte의 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - 기울기를 `None`으로 설정 - DeepSpeed stage 2 변수와 optimizer 없에기 - fp16 혼합 정밀도(precision) [DeepSpeed](https://www.deepspeed.ai/)는 CPU 또는 NVME로 텐서를 VRAM에서 오프로드할 수 있습니다. 이를 위해서 훨씬 더 많은 RAM(약 25 GB)가 필요합니다. DeepSpeed stage 2를 활성화하기 위해서 `accelerate config`로 환경을 구성해야합니다. 구성(configuration) 파일은 이런 모습이어야 합니다: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 4 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED ``` <팁> [문서](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)를 더 많은 DeepSpeed 설정 옵션을 위해 보세요. <팁> 기본 Adam optimizer를 DeepSpeed'의 Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` 으로 바꾸면 상당한 속도 향상을 이룰수 있지만, Pytorch와 같은 버전의 CUDA toolchain이 필요합니다. 8-비트 optimizer는 현재 DeepSpeed와 호환되지 않는 것 같습니다. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --mixed_precision fp16 \ --push_to_hub ``` ## 추론 학습된 모델은 [`StableDiffusionControlNetPipeline`]과 함께 실행될 수 있습니다. `base_model_path`와 `controlnet_path` 에 값을 지정하세요 `--pretrained_model_name_or_path` 와 `--output_dir` 는 학습 스크립트에 개별적으로 지정됩니다. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image import torch base_model_path = "path to model" controlnet_path = "path to controlnet" controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16 ) # 더 빠른 스케줄러와 메모리 최적화로 diffusion 프로세스 속도 올리기 pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # xformers가 설치되지 않으면 아래 줄을 삭제하기 pipe.enable_xformers_memory_efficient_attention() pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png") prompt = "pale golden rod circle with old lace background" # 이미지 생성하기 generator = torch.manual_seed(0) image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0] image.save("./output.png") ```
diffusers/docs/source/ko/training/controlnet.md/0
{ "file_path": "diffusers/docs/source/ko/training/controlnet.md", "repo_id": "diffusers", "token_count": 7776 }
111
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided depth-to-image 생성 [[open-in-colab]] [`StableDiffusionDepth2ImgPipeline`]을 사용하면 텍스트 프롬프트와 초기 이미지를 전달하여 새 이미지의 생성을 조절할 수 있습니다. 또한 이미지 구조를 보존하기 위해 `depth_map`을 전달할 수도 있습니다. `depth_map`이 제공되지 않으면 파이프라인은 통합된 [depth-estimation model](https://github.com/isl-org/MiDaS)을 통해 자동으로 깊이를 예측합니다. 먼저 [`StableDiffusionDepth2ImgPipeline`]의 인스턴스를 생성합니다: ```python import torch import requests from PIL import Image from diffusers import StableDiffusionDepth2ImgPipeline pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", torch_dtype=torch.float16, ).to("cuda") ``` 이제 프롬프트를 파이프라인에 전달합니다. 특정 단어가 이미지 생성을 가이드 하는것을 방지하기 위해 `negative_prompt`를 전달할 수도 있습니다: ```python url = "http://images.cocodataset.org/val2017/000000039769.jpg" init_image = Image.open(requests.get(url, stream=True).raw) prompt = "two tigers" n_prompt = "bad, deformed, ugly, bad anatomy" image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0] image ``` | Input | Output | |---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> | 아래의 Spaces를 가지고 놀며 depth map이 있는 이미지와 없는 이미지의 차이가 있는지 확인해 보세요! <iframe src="https://radames-stable-diffusion-depth2img.hf.space" frameborder="0" width="850" height="500" ></iframe>
diffusers/docs/source/ko/using-diffusers/depth2img.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/depth2img.md", "repo_id": "diffusers", "token_count": 1376 }
112
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 프롬프트에 가중치 부여하기 [[open-in-colab]] 텍스트 가이드 기반의 diffusion 모델은 주어진 텍스트 프롬프트를 기반으로 이미지를 생성합니다. 텍스트 프롬프트에는 모델이 생성해야 하는 여러 개념이 포함될 수 있으며 프롬프트의 특정 부분에 가중치를 부여하는 것이 바람직한 경우가 많습니다. Diffusion 모델은 문맥화된 텍스트 임베딩으로 diffusion 모델의 cross attention 레이어를 조절함으로써 작동합니다. ([더 많은 정보를 위한 Stable Diffusion Guide](https://huggingface.co/docs/optimum-neuron/main/en/package_reference/modeling#stable-diffusion)를 참고하세요). 따라서 프롬프트의 특정 부분을 강조하는(또는 강조하지 않는) 간단한 방법은 프롬프트의 관련 부분에 해당하는 텍스트 임베딩 벡터의 크기를 늘리거나 줄이는 것입니다. 이것은 "프롬프트 가중치 부여" 라고 하며, 커뮤니티에서 가장 요구하는 기능입니다.([이곳](https://github.com/huggingface/diffusers/issues/2431)의 issue를 보세요 ). ## Diffusers에서 프롬프트 가중치 부여하는 방법 우리는 `diffusers`의 역할이 다른 프로젝트를 가능하게 하는 필수적인 기능을 제공하는 toolbex라고 생각합니다. [InvokeAI](https://github.com/invoke-ai/InvokeAI) 나 [diffuzers](https://github.com/abhishekkrthakur/diffuzers) 같은 강력한 UI를 구축할 수 있습니다. 프롬프트를 조작하는 방법을 지원하기 위해, `diffusers` 는 [StableDiffusionPipeline](https://huggingface.co/docs/diffusers/v0.18.2/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline)와 같은 많은 파이프라인에 [prompt_embeds](https://huggingface.co/docs/diffusers/v0.14.0/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) 인수를 노출시켜, "prompt-weighted"/축척된 텍스트 임베딩을 파이프라인에 바로 전달할 수 있게 합니다. [Compel 라이브러리](https://github.com/damian0815/compel)는 프롬프트의 일부를 강조하거나 강조하지 않을 수 있는 쉬운 방법을 제공합니다. 임베딩을 직접 준비하는 것 대신 이 방법을 사용하는 것을 강력히 추천합니다. 간단한 예제를 살펴보겠습니다. 다음과 같이 `"공을 갖고 노는 붉은색 고양이"` 이미지를 생성하고 싶습니다: ```py from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) prompt = "a red cat playing with a ball" generator = torch.Generator(device="cpu").manual_seed(33) image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] image ``` 생성된 이미지: ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_0.png) 사진에서 알 수 있듯이, "공"은 이미지에 없습니다. 이 부분을 강조해 볼까요! 먼저 `compel` 라이브러리를 설치해야합니다: ```sh pip install compel ``` 그런 다음에는 `Compel` 오브젝트를 생성합니다: ```py from compel import Compel compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) ``` 이제 `"++"` 를 사용해서 "공" 을 강조해 봅시다: ```py prompt = "a red cat playing with a ball++" ``` 그리고 이 프롬프트를 파이프라인에 바로 전달하지 않고, `compel_proc` 를 사용하여 처리해야합니다: ```py prompt_embeds = compel_proc(prompt) ``` 파이프라인에 `prompt_embeds` 를 바로 전달할 수 있습니다: ```py generator = torch.Generator(device="cpu").manual_seed(33) images = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` 이제 "공"이 있는 그림을 출력할 수 있습니다! ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_1.png) 마찬가지로 `--` 접미사를 단어에 사용하여 문장의 일부를 강조하지 않을 수 있습니다. 한번 시도해 보세요! 즐겨찾는 파이프라인에 `prompt_embeds` 입력이 없는 경우 issue를 새로 만들어주세요. Diffusers 팀은 최대한 대응하려고 노력합니다. Compel 1.1.6 는 textual inversions을 사용하여 단순화하는 유티릴티 클래스를 추가합니다. `DiffusersTextualInversionManager`를 인스턴스화 한 후 이를 Compel init에 전달합니다: ``` textual_inversion_manager = DiffusersTextualInversionManager(pipe) compel = Compel( tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, textual_inversion_manager=textual_inversion_manager) ``` 더 많은 정보를 얻고 싶다면 [compel](https://github.com/damian0815/compel) 라이브러리 문서를 참고하세요.
diffusers/docs/source/ko/using-diffusers/weighted_prompts.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/weighted_prompts.md", "repo_id": "diffusers", "token_count": 3376 }
113
## Amused training Amused can be finetuned on simple datasets relatively cheaply and quickly. Using 8bit optimizers, lora, and gradient accumulation, amused can be finetuned with as little as 5.5 GB. Here are a set of examples for finetuning amused on some relatively simple datasets. These training recipies are aggressively oriented towards minimal resources and fast verification -- i.e. the batch sizes are quite low and the learning rates are quite high. For optimal quality, you will probably want to increase the batch sizes and decrease learning rates. All training examples use fp16 mixed precision and gradient checkpointing. We don't show 8 bit adam + lora as its about the same memory use as just using lora (bitsandbytes uses full precision optimizer states for weights below a minimum size). ### Finetuning the 256 checkpoint These examples finetune on this [nouns](https://huggingface.co/datasets/m1guelpf/nouns) dataset. Example results: ![noun1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun1.png) ![noun2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun2.png) ![noun3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun3.png) #### Full finetuning Batch size: 8, Learning rate: 1e-4, Gives decent results in 750-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 19.7 GB | | 4 | 2 | 8 | 18.3 GB | | 1 | 8 | 8 | 17.9 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 1e-4 \ --pretrained_model_name_or_path amused/amused-256 \ --instance_data_dataset 'm1guelpf/nouns' \ --image_key image \ --prompt_key text \ --resolution 256 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \ 'a pixel art character with square red glasses' \ 'a pixel art character' \ 'square red glasses on a pixel art character' \ 'square red glasses on a pixel art character with a baseball-shaped head' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + 8 bit adam Note that this training config keeps the batch size low and the learning rate high to get results fast with low resources. However, due to 8 bit adam, it will diverge eventually. If you want to train for longer, you will have to up the batch size and lower the learning rate. Batch size: 16, Learning rate: 2e-5, Gives decent results in ~750 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 16 | 1 | 16 | 20.1 GB | | 8 | 2 | 16 | 15.6 GB | | 1 | 16 | 16 | 10.7 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 2e-5 \ --use_8bit_adam \ --pretrained_model_name_or_path amused/amused-256 \ --instance_data_dataset 'm1guelpf/nouns' \ --image_key image \ --prompt_key text \ --resolution 256 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \ 'a pixel art character with square red glasses' \ 'a pixel art character' \ 'square red glasses on a pixel art character' \ 'square red glasses on a pixel art character with a baseball-shaped head' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + lora Batch size: 16, Learning rate: 8e-4, Gives decent results in 1000-1250 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 16 | 1 | 16 | 14.1 GB | | 8 | 2 | 16 | 10.1 GB | | 1 | 16 | 16 | 6.5 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 8e-4 \ --use_lora \ --pretrained_model_name_or_path amused/amused-256 \ --instance_data_dataset 'm1guelpf/nouns' \ --image_key image \ --prompt_key text \ --resolution 256 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \ 'a pixel art character with square red glasses' \ 'a pixel art character' \ 'square red glasses on a pixel art character' \ 'square red glasses on a pixel art character with a baseball-shaped head' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` ### Finetuning the 512 checkpoint These examples finetune on this [minecraft](https://huggingface.co/monadical-labs/minecraft-preview) dataset. Example results: ![minecraft1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft1.png) ![minecraft2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft2.png) ![minecraft3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft3.png) #### Full finetuning Batch size: 8, Learning rate: 8e-5, Gives decent results in 500-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 24.2 GB | | 4 | 2 | 8 | 19.7 GB | | 1 | 8 | 8 | 16.99 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 8e-5 \ --pretrained_model_name_or_path amused/amused-512 \ --instance_data_dataset 'monadical-labs/minecraft-preview' \ --prompt_prefix 'minecraft ' \ --image_key image \ --prompt_key text \ --resolution 512 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'minecraft Avatar' \ 'minecraft character' \ 'minecraft' \ 'minecraft president' \ 'minecraft pig' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + 8 bit adam Batch size: 8, Learning rate: 5e-6, Gives decent results in 500-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 21.2 GB | | 4 | 2 | 8 | 13.3 GB | | 1 | 8 | 8 | 9.9 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 5e-6 \ --pretrained_model_name_or_path amused/amused-512 \ --instance_data_dataset 'monadical-labs/minecraft-preview' \ --prompt_prefix 'minecraft ' \ --image_key image \ --prompt_key text \ --resolution 512 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'minecraft Avatar' \ 'minecraft character' \ 'minecraft' \ 'minecraft president' \ 'minecraft pig' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + lora Batch size: 8, Learning rate: 1e-4, Gives decent results in 500-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 12.7 GB | | 4 | 2 | 8 | 9.0 GB | | 1 | 8 | 8 | 5.6 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 1e-4 \ --use_lora \ --pretrained_model_name_or_path amused/amused-512 \ --instance_data_dataset 'monadical-labs/minecraft-preview' \ --prompt_prefix 'minecraft ' \ --image_key image \ --prompt_key text \ --resolution 512 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'minecraft Avatar' \ 'minecraft character' \ 'minecraft' \ 'minecraft president' \ 'minecraft pig' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` ### Styledrop [Styledrop](https://arxiv.org/abs/2306.00983) is an efficient finetuning method for learning a new style from just one or very few images. It has an optional first stage to generate human picked additional training samples. The additional training samples can be used to augment the initial images. Our examples exclude the optional additional image selection stage and instead we just finetune on a single image. This is our example style image: ![example](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/A%20mushroom%20in%20%5BV%5D%20style.png) Download it to your local directory with ```sh wget https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/A%20mushroom%20in%20%5BV%5D%20style.png ``` #### 256 Example results: ![glowing_256_1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_1.png) ![glowing_256_2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_2.png) ![glowing_256_3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_3.png) Learning rate: 4e-4, Gives decent results in 1500-2000 steps Memory used: 6.5 GB ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --mixed_precision fp16 \ --report_to wandb \ --use_lora \ --pretrained_model_name_or_path amused/amused-256 \ --train_batch_size 1 \ --lr_scheduler constant \ --learning_rate 4e-4 \ --validation_prompts \ 'A chihuahua walking on the street in [V] style' \ 'A banana on the table in [V] style' \ 'A church on the street in [V] style' \ 'A tabby cat walking in the forest in [V] style' \ --instance_data_image 'A mushroom in [V] style.png' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 100 \ --resolution 256 ``` #### 512 Example results: ![glowing_512_1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_1.png) ![glowing_512_2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_2.png) ![glowing_512_3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_3.png) Learning rate: 1e-3, Lora alpha 1, Gives decent results in 1500-2000 steps Memory used: 5.6 GB ``` accelerate launch train_amused.py \ --output_dir <output path> \ --mixed_precision fp16 \ --report_to wandb \ --use_lora \ --pretrained_model_name_or_path amused/amused-512 \ --train_batch_size 1 \ --lr_scheduler constant \ --learning_rate 1e-3 \ --validation_prompts \ 'A chihuahua walking on the street in [V] style' \ 'A banana on the table in [V] style' \ 'A church on the street in [V] style' \ 'A tabby cat walking in the forest in [V] style' \ --instance_data_image 'A mushroom in [V] style.png' \ --max_train_steps 100000 \ --checkpointing_steps 500 \ --validation_steps 100 \ --resolution 512 \ --lora_alpha 1 ```
diffusers/examples/amused/README.md/0
{ "file_path": "diffusers/examples/amused/README.md", "repo_id": "diffusers", "token_count": 5918 }
114
from typing import List, Optional, Tuple, Union import torch from diffusers import DiffusionPipeline from diffusers.configuration_utils import ConfigMixin from diffusers.pipelines.pipeline_utils import ImagePipelineOutput from diffusers.schedulers.scheduling_utils import SchedulerMixin class IADBScheduler(SchedulerMixin, ConfigMixin): """ IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist. For more details, see the original paper: https://arxiv.org/abs/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html """ def step( self, model_output: torch.Tensor, timestep: int, x_alpha: torch.Tensor, ) -> torch.Tensor: """ Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. It is the direction from x0 to x1. timestep (`float`): current timestep in the diffusion chain. x_alpha (`torch.Tensor`): x_alpha sample for the current timestep Returns: `torch.Tensor`: the sample at the previous timestep """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) alpha = timestep / self.num_inference_steps alpha_next = (timestep + 1) / self.num_inference_steps d = model_output x_alpha = x_alpha + (alpha_next - alpha) * d return x_alpha def set_timesteps(self, num_inference_steps: int): self.num_inference_steps = num_inference_steps def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, alpha: torch.Tensor, ) -> torch.Tensor: return original_samples * alpha + noise * (1 - alpha) def __len__(self): return self.config.num_train_timesteps class IADBPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size, int): image_shape = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(num_inference_steps) x_alpha = image.clone() for t in self.progress_bar(range(num_inference_steps)): alpha = t / num_inference_steps # 1. predict noise model_output model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample # 2. step x_alpha = self.scheduler.step(model_output, t, x_alpha) image = (x_alpha * 0.5 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/community/iadb.py/0
{ "file_path": "diffusers/examples/community/iadb.py", "repo_id": "diffusers", "token_count": 2500 }
115
from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from diffusers import StableDiffusionImg2ImgPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline): debug_save = False @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: Union[ torch.Tensor, PIL.Image.Image, np.ndarray, List[torch.Tensor], List[PIL.Image.Image], List[np.ndarray], ] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, mask: Union[ torch.Tensor, PIL.Image.Image, np.ndarray, List[torch.Tensor], List[PIL.Image.Image], List[np.ndarray], ] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be used as the starting point. Can also accept image latents as `image`, but if passing latents directly it is not encoded again. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). mask (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*): A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # code adapted from parent class StableDiffusionImg2ImgPipeline # 0. Check inputs. Raise error if not correct self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) # 1. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 2. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 3. Preprocess image image = self.image_processor.preprocess(image) # 4. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 5. Prepare latent variables # it is sampled from the latent distribution of the VAE latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator ) # mean of the latent distribution init_latents = [ self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) # 6. create latent mask latent_mask = self._make_latent_mask(latents, mask) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if latent_mask is not None: latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask) noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": scaled = latents / self.vae.config.scaling_factor if latent_mask is not None: # scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask) scaled = torch.lerp(init_latents, scaled, latent_mask) image = self.vae.decode(scaled, return_dict=False)[0] if self.debug_save: image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True]) image_gen[0].save("from_latent.png") image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def _make_latent_mask(self, latents, mask): if mask is not None: latent_mask = [] if not isinstance(mask, list): tmp_mask = [mask] else: tmp_mask = mask _, l_channels, l_height, l_width = latents.shape for m in tmp_mask: if not isinstance(m, PIL.Image.Image): if len(m.shape) == 2: m = m[..., np.newaxis] if m.max() > 1: m = m / 255.0 m = self.image_processor.numpy_to_pil(m)[0] if m.mode != "L": m = m.convert("L") resized = self.image_processor.resize(m, l_height, l_width) if self.debug_save: resized.save("latent_mask.png") latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0)) latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents) latent_mask = latent_mask / latent_mask.max() return latent_mask
diffusers/examples/community/masked_stable_diffusion_img2img.py/0
{ "file_path": "diffusers/examples/community/masked_stable_diffusion_img2img.py", "repo_id": "diffusers", "token_count": 6298 }
116
from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class TextInpainting(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text based inpainting using Stable Diffusion. Uses CLIPSeg to get a mask from the given text, then calls the Inpainting pipeline with the generated mask This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: segmentation_model ([`CLIPSegForImageSegmentation`]): CLIPSeg Model to generate mask from the given text. Please refer to the [model card]() for details. segmentation_processor ([`CLIPSegProcessor`]): CLIPSeg processor to get image, text features to translate prompt to English, if necessary. Please refer to the [model card](https://huggingface.co/docs/transformers/model_doc/clipseg) for details. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, segmentation_model: CLIPSegForImageSegmentation, segmentation_processor: CLIPSegProcessor, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["skip_prk_steps"] = True scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=segmentation_model, segmentation_processor=segmentation_processor, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image], text: str, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. text (`str``): The text to use to generate the mask. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # We use the input text to generate the mask inputs = self.segmentation_processor( text=[text], images=[image], padding="max_length", return_tensors="pt" ).to(self.device) outputs = self.segmentation_model(**inputs) mask = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy() mask_pil = self.numpy_to_pil(mask)[0].resize(image.size) # Run inpainting pipeline with the generated mask inpainting_pipeline = StableDiffusionInpaintPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, ) return inpainting_pipeline( prompt=prompt, image=image, mask_image=mask_pil, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, )
diffusers/examples/community/text_inpainting.py/0
{ "file_path": "diffusers/examples/community/text_inpainting.py", "repo_id": "diffusers", "token_count": 5464 }
117
# ControlNet training example for Stable Diffusion XL (SDXL) The `train_controlnet_sdxl.py` script shows how to implement the ControlNet training procedure and adapt it for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/controlnet` folder and run ```bash pip install -r requirements_sdxl.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. ## Circle filling dataset The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script. ## Training Our training examples use two test conditioning images. They can be downloaded by running ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` Then run `huggingface-cli login` to log into your Hugging Face account. This is needed to be able to push the trained ControlNet parameters to Hugging Face Hub. ```bash export MODEL_DIR="stabilityai/stable-diffusion-xl-base-1.0" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet_sdxl.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --mixed_precision="fp16" \ --resolution=1024 \ --learning_rate=1e-5 \ --max_train_steps=15000 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=100 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --report_to="wandb" \ --seed=42 \ --push_to_hub ``` To better track our training experiments, we're using the following flags in the command above: * `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. * `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 40GB A100 GPU. ### Inference Once training is done, we can perform inference like so: ```python from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image import torch base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" controlnet_path = "path to controlnet" controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16 ) # speed up diffusion process with faster scheduler and memory optimization pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # remove following line if xformers is not installed or when using Torch 2.0. pipe.enable_xformers_memory_efficient_attention() # memory optimization. pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png").resize((1024, 1024)) prompt = "pale golden rod circle with old lace background" # generate image generator = torch.manual_seed(0) image = pipe( prompt, num_inference_steps=20, generator=generator, image=control_image ).images[0] image.save("./output.png") ``` ## Notes ### Specifying a better VAE SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of an alternative VAE (such as [`madebyollin/sdxl-vae-fp16-fix`](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). If you're using this VAE during training, you need to ensure you're using it during inference too. You do so by: ```diff + vae = AutoencoderKL.from_pretrained(vae_path_or_repo_id, torch_dtype=torch.float16) controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16, + vae=vae, )
diffusers/examples/controlnet/README_sdxl.md/0
{ "file_path": "diffusers/examples/controlnet/README_sdxl.md", "repo_id": "diffusers", "token_count": 1698 }
118
# DreamBooth training example for Stable Diffusion XL (SDXL) [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject. The `train_dreambooth_lora_sdxl.py` script shows how to implement the training procedure and adapt it for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). > 💡 **Note**: For now, we only allow DreamBooth fine-tuning of the SDXL UNet via LoRA. LoRA is a parameter-efficient fine-tuning technique introduced in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/dreambooth` folder and run ```bash pip install -r requirements_sdxl.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Dog toy example Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example. Let's first download it locally: ```python from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform. Now, we can launch training using: ```bash export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="lora-trained-xl" export VAE_PATH="madebyollin/sdxl-vae-fp16-fix" accelerate launch train_dreambooth_lora_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --pretrained_vae_model_name_or_path=$VAE_PATH \ --output_dir=$OUTPUT_DIR \ --mixed_precision="fp16" \ --instance_prompt="a photo of sks dog" \ --resolution=1024 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=25 \ --seed="0" \ --push_to_hub ``` To better track our training experiments, we're using the following flags in the command above: * `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 40GB A100 GPU. ### Dog toy example with < 16GB VRAM By making use of [`gradient_checkpointing`](https://pytorch.org/docs/stable/checkpoint.html) (which is natively supported in Diffusers), [`xformers`](https://github.com/facebookresearch/xformers), and [`bitsandbytes`](https://github.com/TimDettmers/bitsandbytes) libraries, you can train SDXL LoRAs with less than 16GB of VRAM by adding the following flags to your accelerate launch command: ```diff + --enable_xformers_memory_efficient_attention \ + --gradient_checkpointing \ + --use_8bit_adam \ + --mixed_precision="fp16" \ ``` and making sure that you have the following libraries installed: ``` bitsandbytes>=0.40.0 xformers>=0.0.20 ``` ### Inference Once training is done, we can perform inference like so: ```python from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline import torch lora_model_id = <"lora-sdxl-dreambooth-id"> card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.load_lora_weights(lora_model_id) image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] image.save("sks_dog.png") ``` We can further refine the outputs with the [Refiner](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0): ```python from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline, StableDiffusionXLImg2ImgPipeline import torch lora_model_id = <"lora-sdxl-dreambooth-id"> card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] # Load the base pipeline and load the LoRA parameters into it. pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.load_lora_weights(lora_model_id) # Load the refiner. refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" ) refiner.to("cuda") prompt = "A picture of a sks dog in a bucket" generator = torch.Generator("cuda").manual_seed(0) # Run inference. image = pipe(prompt=prompt, output_type="latent", generator=generator).images[0] image = refiner(prompt=prompt, image=image[None, :], generator=generator).images[0] image.save("refined_sks_dog.png") ``` Here's a side-by-side comparison of the with and without Refiner pipeline outputs: | Without Refiner | With Refiner | |---|---| | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/sks_dog.png) | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/refined_sks_dog.png) | ### Training with text encoder(s) Alongside the UNet, LoRA fine-tuning of the text encoders is also supported. To do so, just specify `--train_text_encoder` while launching training. Please keep the following points in mind: * SDXL has two text encoders. So, we fine-tune both using LoRA. * When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory. ### Specifying a better VAE SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). ## Notes In our experiments, we found that SDXL yields good initial results without extensive hyperparameter tuning. For example, without fine-tuning the text encoders and without using prior-preservation, we observed decent results. We didn't explore further hyper-parameter tuning experiments, but we do encourage the community to explore this avenue further and share their results with us 🤗 ## Results You can explore the results from a couple of our internal experiments by checking out this link: [https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl](https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl). Specifically, we used the same script with the exact same hyperparameters on the following datasets: * [Dogs](https://huggingface.co/datasets/diffusers/dog-example) * [Starbucks logo](https://huggingface.co/datasets/diffusers/starbucks-example) * [Mr. Potato Head](https://huggingface.co/datasets/diffusers/potato-head-example) * [Keramer face](https://huggingface.co/datasets/diffusers/keramer-face-example) ## Running on a free-tier Colab Notebook Check out [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_DreamBooth_LoRA_.ipynb). ## Conducting EDM-style training It's now possible to perform EDM-style training as proposed in [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364). For the SDXL model, simple set: ```diff + --do_edm_style_training \ ``` Other SDXL-like models that use the EDM formulation, such as [playgroundai/playground-v2.5-1024px-aesthetic](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic), can also be DreamBooth'd with the script. Below is an example command: ```bash accelerate launch train_dreambooth_lora_sdxl.py \ --pretrained_model_name_or_path="playgroundai/playground-v2.5-1024px-aesthetic" \ --instance_data_dir="dog" \ --output_dir="dog-playground-lora" \ --mixed_precision="fp16" \ --instance_prompt="a photo of sks dog" \ --resolution=1024 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --learning_rate=1e-4 \ --use_8bit_adam \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=25 \ --seed="0" \ --push_to_hub ``` > [!CAUTION] > Min-SNR gamma is not supported with the EDM-style training yet. When training with the PlaygroundAI model, it's recommended to not pass any "variant". ### DoRA training The script now supports DoRA training too! > Proposed in [DoRA: Weight-Decomposed Low-Rank Adaptation](https://arxiv.org/abs/2402.09353), **DoRA** is very similar to LoRA, except it decomposes the pre-trained weight into two components, **magnitude** and **direction** and employs LoRA for _directional_ updates to efficiently minimize the number of trainable parameters. The authors found that by using DoRA, both the learning capacity and training stability of LoRA are enhanced without any additional overhead during inference. > [!NOTE] > 💡DoRA training is still _experimental_ > and is likely to require different hyperparameter values to perform best compared to a LoRA. > Specifically, we've noticed 2 differences to take into account your training: > 1. **LoRA seem to converge faster than DoRA** (so a set of parameters that may lead to overfitting when training a LoRA may be working well for a DoRA) > 2. **DoRA quality superior to LoRA especially in lower ranks** the difference in quality of DoRA of rank 8 and LoRA of rank 8 appears to be more significant than when training ranks of 32 or 64 for example. > This is also aligned with some of the quantitative analysis shown in the paper. **Usage** 1. To use DoRA you need to upgrade the installation of `peft`: ```bash pip install -U peft ``` 2. Enable DoRA training by adding this flag ```bash --use_dora ``` **Inference** The inference is the same as if you train a regular LoRA 🤗 ## Format compatibility You can pass `--output_kohya_format` to additionally generate a state dictionary which should be compatible with other platforms and tools such as Automatic 1111, Comfy, Kohya, etc. The `output_dir` will contain a file named "pytorch_lora_weights_kohya.safetensors".
diffusers/examples/dreambooth/README_sdxl.md/0
{ "file_path": "diffusers/examples/dreambooth/README_sdxl.md", "repo_id": "diffusers", "token_count": 3733 }
119
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from tqdm import tqdm from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from transformers.utils import ContextManagers import diffusers from diffusers import AutoPipelineForText2Image, DDPMScheduler, UNet2DConditionModel, VQModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.31.0.dev0") logger = get_logger(__name__, log_level="INFO") def save_model_card( args, repo_id: str, images=None, repo_folder=None, ): img_str = "" if len(images) > 0: image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {args.pretrained_decoder_model_name_or_path} datasets: - {args.dataset_name} prior: - {args.pretrained_prior_model_name_or_path} tags: - kandinsky - text-to-image - diffusers - diffusers-training inference: true --- """ model_card = f""" # Finetuning - {repo_id} This pipeline was finetuned from **{args.pretrained_decoder_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n {img_str} ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = AutoPipelineForText2Image.from_pretrained("{repo_id}", torch_dtype=torch.float16) prompt = "{args.validation_prompts[0]}" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Epochs: {args.num_train_epochs} * Learning rate: {args.learning_rate} * Batch size: {args.train_batch_size} * Gradient accumulation steps: {args.gradient_accumulation_steps} * Image resolution: {args.resolution} * Mixed-precision: {args.mixed_precision} """ wandb_info = "" if is_wandb_available(): wandb_run_url = None if wandb.run is not None: wandb_run_url = wandb.run.url if wandb_run_url is not None: wandb_info = f""" More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). """ model_card += wandb_info with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def log_validation(vae, image_encoder, image_processor, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = AutoPipelineForText2Image.from_pretrained( args.pretrained_decoder_model_name_or_path, vae=accelerator.unwrap_model(vae), prior_image_encoder=accelerator.unwrap_model(image_encoder), prior_image_processor=image_processor, unet=accelerator.unwrap_model(unet), torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() return images def parse_args(): parser = argparse.ArgumentParser(description="Simple example of finetuning Kandinsky 2.2.") parser.add_argument( "--pretrained_decoder_model_name_or_path", type=str, default="kandinsky-community/kandinsky-2-2-decoder", required=False, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_prior_model_name_or_path", type=str, default="kandinsky-community/kandinsky-2-2-prior", required=False, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="kandi_2_2-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--train_batch_size", type=int, default=1, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="learning rate", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument( "--adam_weight_decay", type=float, default=0.0, required=False, help="weight decay_to_use", ) parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") return args def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="scheduler") image_processor = CLIPImageProcessor.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_processor" ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 with ContextManagers(deepspeed_zero_init_disabled_context_manager()): vae = VQModel.from_pretrained( args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype ).eval() image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") # Freeze vae and image_encoder vae.requires_grad_(False) image_encoder.requires_grad_(False) # Set unet to trainable. unet.train() # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) ema_unet.to(accelerator.device) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names image_column = args.image_column if image_column not in column_names: raise ValueError(f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}") def center_crop(image): width, height = image.size new_size = min(width, height) left = (width - new_size) / 2 top = (height - new_size) / 2 right = (width + new_size) / 2 bottom = (height + new_size) / 2 return image.crop((left, top, right, bottom)) def train_transforms(img): img = center_crop(img) img = img.resize((args.resolution, args.resolution), resample=Image.BICUBIC, reducing_gap=1) img = np.array(img).astype(np.float32) / 127.5 - 1 img = torch.from_numpy(np.transpose(img, [2, 0, 1])) return img def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() clip_pixel_values = torch.stack([example["clip_pixel_values"] for example in examples]) clip_pixel_values = clip_pixel_values.to(memory_format=torch.contiguous_format).float() return {"pixel_values": pixel_values, "clip_pixel_values": clip_pixel_values} train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # Move image_encode and vae to gpu and cast to weight_dtype image_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space images = batch["pixel_values"].to(weight_dtype) clip_images = batch["clip_pixel_values"].to(weight_dtype) latents = vae.encode(images).latents image_embeds = image_encoder(clip_images).image_embeds # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) target = noise # Predict the noise residual and compute loss added_cond_kwargs = {"image_embeds": image_embeds} model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4] if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, image_encoder, image_processor, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = AutoPipelineForText2Image.from_pretrained( args.pretrained_decoder_model_name_or_path, vae=vae, unet=unet, ) pipeline.decoder_pipe.save_pretrained(args.output_dir) # Run a final round of inference. images = [] if args.validation_prompts is not None: logger.info("Running inference for collecting generated images...") pipeline.torch_dtype = weight_dtype pipeline.set_progress_bar_config(disable=True) pipeline.enable_model_cpu_offload() if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) if args.push_to_hub: save_model_card(args, repo_id, images, repo_folder=args.output_dir) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py/0
{ "file_path": "diffusers/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py", "repo_id": "diffusers", "token_count": 16388 }
120
import argparse import os import torch from PIL import Image, ImageFilter from transformers import CLIPTextModel from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel parser = argparse.ArgumentParser(description="Inference") parser.add_argument( "--model_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--validation_image", type=str, default=None, required=True, help="The directory of the validation image", ) parser.add_argument( "--validation_mask", type=str, default=None, required=True, help="The directory of the validation mask", ) parser.add_argument( "--output_dir", type=str, default="./test-infer/", help="The output directory where predictions are saved", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible inference.") args = parser.parse_args() if __name__ == "__main__": os.makedirs(args.output_dir, exist_ok=True) generator = None # create & load model pipe = StableDiffusionInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-2-inpainting", torch_dtype=torch.float32, revision=None ) pipe.unet = UNet2DConditionModel.from_pretrained( args.model_path, subfolder="unet", revision=None, ) pipe.text_encoder = CLIPTextModel.from_pretrained( args.model_path, subfolder="text_encoder", revision=None, ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") if args.seed is not None: generator = torch.Generator(device="cuda").manual_seed(args.seed) image = Image.open(args.validation_image) mask_image = Image.open(args.validation_mask) results = pipe( ["a photo of sks"] * 16, image=image, mask_image=mask_image, num_inference_steps=25, guidance_scale=5, generator=generator, ).images erode_kernel = ImageFilter.MaxFilter(3) mask_image = mask_image.filter(erode_kernel) blur_kernel = ImageFilter.BoxBlur(1) mask_image = mask_image.filter(blur_kernel) for idx, result in enumerate(results): result = Image.composite(result, image, mask_image) result.save(f"{args.output_dir}/{idx}.png") del pipe torch.cuda.empty_cache()
diffusers/examples/research_projects/realfill/infer.py/0
{ "file_path": "diffusers/examples/research_projects/realfill/infer.py", "repo_id": "diffusers", "token_count": 984 }
121
# Show best practices for SDXL JAX import time import jax import jax.numpy as jnp import numpy as np from flax.jax_utils import replicate # Let's cache the model compilation, so that it doesn't take as long the next time around. from jax.experimental.compilation_cache import compilation_cache as cc from diffusers import FlaxStableDiffusionXLPipeline cc.initialize_cache("/tmp/sdxl_cache") NUM_DEVICES = jax.device_count() # 1. Let's start by downloading the model and loading it into our pipeline class # Adhering to JAX's functional approach, the model's parameters are returned seperatetely and # will have to be passed to the pipeline during inference pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True ) # 2. We cast all parameters to bfloat16 EXCEPT the scheduler which we leave in # float32 to keep maximal precision scheduler_state = params.pop("scheduler") params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params) params["scheduler"] = scheduler_state # 3. Next, we define the different inputs to the pipeline default_prompt = "a colorful photo of a castle in the middle of a forest with trees and bushes, by Ismail Inceoglu, shadows, high contrast, dynamic shading, hdr, detailed vegetation, digital painting, digital drawing, detailed painting, a detailed digital painting, gothic art, featured on deviantart" default_neg_prompt = "fog, grainy, purple" default_seed = 33 default_guidance_scale = 5.0 default_num_steps = 25 # 4. In order to be able to compile the pipeline # all inputs have to be tensors or strings # Let's tokenize the prompt and negative prompt def tokenize_prompt(prompt, neg_prompt): prompt_ids = pipeline.prepare_inputs(prompt) neg_prompt_ids = pipeline.prepare_inputs(neg_prompt) return prompt_ids, neg_prompt_ids # 5. To make full use of JAX's parallelization capabilities # the parameters and input tensors are duplicated across devices # To make sure every device generates a different image, we create # different seeds for each image. The model parameters won't change # during inference so we do not wrap them into a function p_params = replicate(params) def replicate_all(prompt_ids, neg_prompt_ids, seed): p_prompt_ids = replicate(prompt_ids) p_neg_prompt_ids = replicate(neg_prompt_ids) rng = jax.random.PRNGKey(seed) rng = jax.random.split(rng, NUM_DEVICES) return p_prompt_ids, p_neg_prompt_ids, rng # 6. Let's now put it all together in a generate function def generate( prompt, negative_prompt, seed=default_seed, guidance_scale=default_guidance_scale, num_inference_steps=default_num_steps, ): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) images = pipeline( prompt_ids, p_params, rng, num_inference_steps=num_inference_steps, neg_prompt_ids=neg_prompt_ids, guidance_scale=guidance_scale, jit=True, ).images # convert the images to PIL images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) return pipeline.numpy_to_pil(np.array(images)) # 7. Remember that the first call will compile the function and hence be very slow. Let's run generate once # so that the pipeline call is compiled start = time.time() print("Compiling ...") generate(default_prompt, default_neg_prompt) print(f"Compiled in {time.time() - start}") # 8. Now the model forward pass will run very quickly, let's try it again start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"Inference in {time.time() - start}") for i, image in enumerate(images): image.save(f"castle_{i}.png")
diffusers/examples/research_projects/sdxl_flax/sdxl_single.py/0
{ "file_path": "diffusers/examples/research_projects/sdxl_flax/sdxl_single.py", "repo_id": "diffusers", "token_count": 1341 }
122
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors from diffusers import DiffusionPipeline # noqa: E402 sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class TextToImageLoRA(ExamplesTestsAccelerate): def test_text_to_image_lora_sdxl_checkpointing_checkpoints_total_limit(self): prompt = "a prompt" pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_lora_checkpointing_checkpoints_total_limit(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # resume and we should try to checkpoint at 6, where we'll have to remove # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint resume_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 8 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + resume_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}, ) class TextToImageLoRASDXL(ExamplesTestsAccelerate): def test_text_to_image_lora_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) def test_text_to_image_lora_sdxl_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names. keys = lora_state_dict.keys() starts_with_unet = all( k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys ) self.assertTrue(starts_with_unet) def test_text_to_image_lora_sdxl_text_encoder_checkpointing_checkpoints_total_limit(self): prompt = "a prompt" pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --train_text_encoder --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
diffusers/examples/text_to_image/test_text_to_image_lora.py/0
{ "file_path": "diffusers/examples/text_to_image/test_text_to_image_lora.py", "repo_id": "diffusers", "token_count": 6179 }
123
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the LDM checkpoints.""" import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNet2DConditionModel, UNet2DModel do_only_config = False do_only_weights = True do_only_renaming = False if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--repo_path", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() config_parameters_to_change = { "image_size": "sample_size", "num_res_blocks": "layers_per_block", "block_channels": "block_out_channels", "down_blocks": "down_block_types", "up_blocks": "up_block_types", "downscale_freq_shift": "freq_shift", "resnet_num_groups": "norm_num_groups", "resnet_act_fn": "act_fn", "resnet_eps": "norm_eps", "num_head_channels": "attention_head_dim", } key_parameters_to_change = { "time_steps": "time_proj", "mid": "mid_block", "downsample_blocks": "down_blocks", "upsample_blocks": "up_blocks", } subfolder = "" if has_file(args.repo_path, "config.json") else "unet" with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader: text = reader.read() config = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, "config.json"): model = UNet2DModel(**config) else: class_name = UNet2DConditionModel if "ldm-text2im-large-256" in args.repo_path else UNet2DModel model = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) config = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: config[value] = config[key] del config[key] config["down_block_types"] = [k.replace("UNetRes", "") for k in config["down_block_types"]] config["up_block_types"] = [k.replace("UNetRes", "") for k in config["up_block_types"]] if do_only_weights: state_dict = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin")) new_state_dict = {} for param_key, param_value in state_dict.items(): if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"): continue has_changed = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(".")[0] == key: new_state_dict[".".join([new_key] + param_key.split(".")[1:])] = param_value has_changed = True if not has_changed: new_state_dict[param_key] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
diffusers/scripts/change_naming_configs_and_checkpoints.py/0
{ "file_path": "diffusers/scripts/change_naming_configs_and_checkpoints.py", "repo_id": "diffusers", "token_count": 1631 }
124
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the LDM checkpoints.""" import argparse import torch from diffusers import UNet3DConditionModel def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear weight = old_checkpoint[path["old"]] names = ["proj_attn.weight"] names_2 = ["proj_out.weight", "proj_in.weight"] if any(k in new_path for k in names): checkpoint[new_path] = weight[:, :, 0] elif any(k in new_path for k in names_2) and len(weight.shape) > 2 and ".attentions." not in new_path: checkpoint[new_path] = weight[:, :, 0] else: checkpoint[new_path] = weight def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_temp_conv_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: mapping.append({"old": old_item, "new": old_item}) return mapping def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) if "temopral_conv" not in old_item: mapping.append({"old": old_item, "new": new_item}) return mapping def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False): """ Takes a state dict and a config, and returns a converted checkpoint. """ # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) unet_key = "model.diffusion_model." # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: print(f"Checkpoint {path} has both EMA and non-EMA weights.") print( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: print( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] if config["class_embed_type"] is None: # No parameters to port ... elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] else: raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] first_temp_attention = [v for v in unet_state_dict if v.startswith("input_blocks.0.1")] paths = renew_attention_paths(first_temp_attention) meta_path = {"old": "input_blocks.0.1", "new": "transformer_in"} assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] temp_attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.2" in key] if f"input_blocks.{i}.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.op.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) temporal_convs = [key for key in resnets if "temopral_conv" in key] paths = renew_temp_conv_paths(temporal_convs) meta_path = { "old": f"input_blocks.{i}.0.temopral_conv", "new": f"down_blocks.{block_id}.temp_convs.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(temp_attentions): paths = renew_attention_paths(temp_attentions) meta_path = { "old": f"input_blocks.{i}.2", "new": f"down_blocks.{block_id}.temp_attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] temporal_convs_0 = [key for key in resnet_0 if "temopral_conv" in key] attentions = middle_blocks[1] temp_attentions = middle_blocks[2] resnet_1 = middle_blocks[3] temporal_convs_1 = [key for key in resnet_1 if "temopral_conv" in key] resnet_0_paths = renew_resnet_paths(resnet_0) meta_path = {"old": "middle_block.0", "new": "mid_block.resnets.0"} assign_to_checkpoint( resnet_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) temp_conv_0_paths = renew_temp_conv_paths(temporal_convs_0) meta_path = {"old": "middle_block.0.temopral_conv", "new": "mid_block.temp_convs.0"} assign_to_checkpoint( temp_conv_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) resnet_1_paths = renew_resnet_paths(resnet_1) meta_path = {"old": "middle_block.3", "new": "mid_block.resnets.1"} assign_to_checkpoint( resnet_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) temp_conv_1_paths = renew_temp_conv_paths(temporal_convs_1) meta_path = {"old": "middle_block.3.temopral_conv", "new": "mid_block.temp_convs.1"} assign_to_checkpoint( temp_conv_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) temp_attentions_paths = renew_attention_paths(temp_attentions) meta_path = {"old": "middle_block.2", "new": "mid_block.temp_attentions.0"} assign_to_checkpoint( temp_attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] temp_attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.2" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) temporal_convs = [key for key in resnets if "temopral_conv" in key] paths = renew_temp_conv_paths(temporal_convs) meta_path = { "old": f"output_blocks.{i}.0.temopral_conv", "new": f"up_blocks.{block_id}.temp_convs.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(temp_attentions): paths = renew_attention_paths(temp_attentions) meta_path = { "old": f"output_blocks.{i}.2", "new": f"up_blocks.{block_id}.temp_attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] temopral_conv_paths = [l for l in output_block_layers if "temopral_conv" in l] for path in temopral_conv_paths: pruned_path = path.split("temopral_conv.")[-1] old_path = ".".join(["output_blocks", str(i), str(block_id), "temopral_conv", pruned_path]) new_path = ".".join(["up_blocks", str(block_id), "temp_convs", str(layer_in_block_id), pruned_path]) new_checkpoint[new_path] = unet_state_dict[old_path] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() unet_checkpoint = torch.load(args.checkpoint_path, map_location="cpu") unet = UNet3DConditionModel() converted_ckpt = convert_ldm_unet_checkpoint(unet_checkpoint, unet.config) diff_0 = set(unet.state_dict().keys()) - set(converted_ckpt.keys()) diff_1 = set(converted_ckpt.keys()) - set(unet.state_dict().keys()) assert len(diff_0) == len(diff_1) == 0, "Converted weights don't match" # load state_dict unet.load_state_dict(converted_ckpt) unet.save_pretrained(args.dump_path) # -- finish converting the unet --
diffusers/scripts/convert_ms_text_to_video_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ms_text_to_video_to_diffusers.py", "repo_id": "diffusers", "token_count": 8415 }
125
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def onnx_export( model, model_args: tuple, output_path: Path, ordered_input_names, output_names, dynamic_axes, opset, use_external_data_format=False, ): output_path.parent.mkdir(parents=True, exist_ok=True) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, use_external_data_format=use_external_data_format, enable_onnx_checker=True, opset_version=opset, ) else: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset, ) @torch.no_grad() def convert_models(model_path: str, output_path: str, opset: int, fp16: bool = False): dtype = torch.float16 if fp16 else torch.float32 if fp16 and torch.cuda.is_available(): device = "cuda" elif fp16 and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA") else: device = "cpu" pipeline = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=dtype).to(device) output_path = Path(output_path) # TEXT ENCODER num_tokens = pipeline.text_encoder.config.max_position_embeddings text_hidden_size = pipeline.text_encoder.config.hidden_size text_input = pipeline.tokenizer( "A sample prompt", padding="max_length", max_length=pipeline.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) onnx_export( pipeline.text_encoder, # casting to torch.int32 until the CLIP fix is released: https://github.com/huggingface/transformers/pull/18515/files model_args=(text_input.input_ids.to(device=device, dtype=torch.int32)), output_path=output_path / "text_encoder" / "model.onnx", ordered_input_names=["input_ids"], output_names=["last_hidden_state", "pooler_output"], dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, }, opset=opset, ) del pipeline.text_encoder # UNET unet_in_channels = pipeline.unet.config.in_channels unet_sample_size = pipeline.unet.config.sample_size unet_path = output_path / "unet" / "model.onnx" onnx_export( pipeline.unet, model_args=( torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype), torch.randn(2).to(device=device, dtype=dtype), torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype), False, ), output_path=unet_path, ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"], output_names=["out_sample"], # has to be different from "sample" for correct tracing dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, }, opset=opset, use_external_data_format=True, # UNet is > 2GB, so the weights need to be split ) unet_model_path = str(unet_path.absolute().as_posix()) unet_dir = os.path.dirname(unet_model_path) unet = onnx.load(unet_model_path) # clean up existing tensor files shutil.rmtree(unet_dir) os.mkdir(unet_dir) # collate external tensor files into one onnx.save_model( unet, unet_model_path, save_as_external_data=True, all_tensors_to_one_file=True, location="weights.pb", convert_attribute=False, ) del pipeline.unet # VAE ENCODER vae_encoder = pipeline.vae vae_in_channels = vae_encoder.config.in_channels vae_sample_size = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder vae_encoder.forward = lambda sample, return_dict: vae_encoder.encode(sample, return_dict)[0].sample() onnx_export( vae_encoder, model_args=( torch.randn(1, vae_in_channels, vae_sample_size, vae_sample_size).to(device=device, dtype=dtype), False, ), output_path=output_path / "vae_encoder" / "model.onnx", ordered_input_names=["sample", "return_dict"], output_names=["latent_sample"], dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, }, opset=opset, ) # VAE DECODER vae_decoder = pipeline.vae vae_latent_channels = vae_decoder.config.latent_channels vae_out_channels = vae_decoder.config.out_channels # forward only through the decoder part vae_decoder.forward = vae_encoder.decode onnx_export( vae_decoder, model_args=( torch.randn(1, vae_latent_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype), False, ), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, }, opset=opset, ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: safety_checker = pipeline.safety_checker clip_num_channels = safety_checker.config.vision_config.num_channels clip_image_size = safety_checker.config.vision_config.image_size safety_checker.forward = safety_checker.forward_onnx onnx_export( pipeline.safety_checker, model_args=( torch.randn( 1, clip_num_channels, clip_image_size, clip_image_size, ).to(device=device, dtype=dtype), torch.randn(1, vae_sample_size, vae_sample_size, vae_out_channels).to(device=device, dtype=dtype), ), output_path=output_path / "safety_checker" / "model.onnx", ordered_input_names=["clip_input", "images"], output_names=["out_images", "has_nsfw_concepts"], dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, }, opset=opset, ) del pipeline.safety_checker safety_checker = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker") feature_extractor = pipeline.feature_extractor else: safety_checker = None feature_extractor = None onnx_pipeline = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder"), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder"), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder"), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / "unet"), scheduler=pipeline.scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, requires_safety_checker=safety_checker is not None, ) onnx_pipeline.save_pretrained(output_path) print("ONNX pipeline saved to", output_path) del pipeline del onnx_pipeline _ = OnnxStableDiffusionPipeline.from_pretrained(output_path, provider="CPUExecutionProvider") print("ONNX pipeline is loadable") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") args = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fp16)
diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py/0
{ "file_path": "diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py", "repo_id": "diffusers", "token_count": 4384 }
126
from typing import Any, Dict, List from .configuration_utils import ConfigMixin, register_to_config from .utils import CONFIG_NAME class PipelineCallback(ConfigMixin): """ Base class for all the official callbacks used in a pipeline. This class provides a structure for implementing custom callbacks and ensures that all callbacks have a consistent interface. Please implement the following: `tensor_inputs`: This should return a list of tensor inputs specific to your callback. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. `callback_fn`: This method defines the core functionality of your callback. """ config_name = CONFIG_NAME @register_to_config def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None): super().__init__() if (cutoff_step_ratio is None and cutoff_step_index is None) or ( cutoff_step_ratio is not None and cutoff_step_index is not None ): raise ValueError("Either cutoff_step_ratio or cutoff_step_index should be provided, not both or none.") if cutoff_step_ratio is not None and ( not isinstance(cutoff_step_ratio, float) or not (0.0 <= cutoff_step_ratio <= 1.0) ): raise ValueError("cutoff_step_ratio must be a float between 0.0 and 1.0.") @property def tensor_inputs(self) -> List[str]: raise NotImplementedError(f"You need to set the attribute `tensor_inputs` for {self.__class__}") def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]: raise NotImplementedError(f"You need to implement the method `callback_fn` for {self.__class__}") def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: return self.callback_fn(pipeline, step_index, timestep, callback_kwargs) class MultiPipelineCallbacks: """ This class is designed to handle multiple pipeline callbacks. It accepts a list of PipelineCallback objects and provides a unified interface for calling all of them. """ def __init__(self, callbacks: List[PipelineCallback]): self.callbacks = callbacks @property def tensor_inputs(self) -> List[str]: return [input for callback in self.callbacks for input in callback.tensor_inputs] def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: """ Calls all the callbacks in order with the given arguments and returns the final callback_kwargs. """ for callback in self.callbacks: callback_kwargs = callback(pipeline, step_index, timestep, callback_kwargs) return callback_kwargs class SDCFGCutoffCallback(PipelineCallback): """ Callback function for Stable Diffusion Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = ["prompt_embeds"] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds return callback_kwargs class SDXLCFGCutoffCallback(PipelineCallback): """ Callback function for Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = ["prompt_embeds", "add_text_embeds", "add_time_ids"] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. add_text_embeds = callback_kwargs[self.tensor_inputs[1]] add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens add_time_ids = callback_kwargs[self.tensor_inputs[2]] add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds callback_kwargs[self.tensor_inputs[1]] = add_text_embeds callback_kwargs[self.tensor_inputs[2]] = add_time_ids return callback_kwargs class IPAdapterScaleCutoffCallback(PipelineCallback): """ Callback function for any pipeline that inherits `IPAdapterMixin`. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will set the IP Adapter scale to `0.0`. Note: This callback mutates the IP Adapter attention processors by setting the scale to 0.0 after the cutoff step. """ tensor_inputs = [] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: pipeline.set_ip_adapter_scale(0.0) return callback_kwargs
diffusers/src/diffusers/callbacks.py/0
{ "file_path": "diffusers/src/diffusers/callbacks.py", "repo_id": "diffusers", "token_count": 2521 }
127
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import torch from ..utils import is_peft_version, logging logger = logging.get_logger(__name__) def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5): # 1. get all state_dict_keys all_keys = list(state_dict.keys()) sgm_patterns = ["input_blocks", "middle_block", "output_blocks"] # 2. check if needs remapping, if not return original dict is_in_sgm_format = False for key in all_keys: if any(p in key for p in sgm_patterns): is_in_sgm_format = True break if not is_in_sgm_format: return state_dict # 3. Else remap from SGM patterns new_state_dict = {} inner_block_map = ["resnets", "attentions", "upsamplers"] # Retrieves # of down, mid and up blocks input_block_ids, middle_block_ids, output_block_ids = set(), set(), set() for layer in all_keys: if "text" in layer: new_state_dict[layer] = state_dict.pop(layer) else: layer_id = int(layer.split(delimiter)[:block_slice_pos][-1]) if sgm_patterns[0] in layer: input_block_ids.add(layer_id) elif sgm_patterns[1] in layer: middle_block_ids.add(layer_id) elif sgm_patterns[2] in layer: output_block_ids.add(layer_id) else: raise ValueError(f"Checkpoint not supported because layer {layer} not supported.") input_blocks = { layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key] for layer_id in input_block_ids } middle_blocks = { layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key] for layer_id in middle_block_ids } output_blocks = { layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key] for layer_id in output_block_ids } # Rename keys accordingly for i in input_block_ids: block_id = (i - 1) // (unet_config.layers_per_block + 1) layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1) for key in input_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers" inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0" new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1 :] ) new_state_dict[new_key] = state_dict.pop(key) for i in middle_block_ids: key_part = None if i == 0: key_part = [inner_block_map[0], "0"] elif i == 1: key_part = [inner_block_map[1], "0"] elif i == 2: key_part = [inner_block_map[0], "1"] else: raise ValueError(f"Invalid middle block id {i}.") for key in middle_blocks[i]: new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:] ) new_state_dict[new_key] = state_dict.pop(key) for i in output_block_ids: block_id = i // (unet_config.layers_per_block + 1) layer_in_block_id = i % (unet_config.layers_per_block + 1) for key in output_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0" new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1 :] ) new_state_dict[new_key] = state_dict.pop(key) if len(state_dict) > 0: raise ValueError("At this point all state dict entries have to be converted.") return new_state_dict def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"): """ Converts a non-Diffusers LoRA state dict to a Diffusers compatible state dict. Args: state_dict (`dict`): The state dict to convert. unet_name (`str`, optional): The name of the U-Net module in the Diffusers model. Defaults to "unet". text_encoder_name (`str`, optional): The name of the text encoder module in the Diffusers model. Defaults to "text_encoder". Returns: `tuple`: A tuple containing the converted state dict and a dictionary of alphas. """ unet_state_dict = {} te_state_dict = {} te2_state_dict = {} network_alphas = {} # Check for DoRA-enabled LoRAs. dora_present_in_unet = any("dora_scale" in k and "lora_unet_" in k for k in state_dict) dora_present_in_te = any("dora_scale" in k and ("lora_te_" in k or "lora_te1_" in k) for k in state_dict) dora_present_in_te2 = any("dora_scale" in k and "lora_te2_" in k for k in state_dict) if dora_present_in_unet or dora_present_in_te or dora_present_in_te2: if is_peft_version("<", "0.9.0"): raise ValueError( "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." ) # Iterate over all LoRA weights. all_lora_keys = list(state_dict.keys()) for key in all_lora_keys: if not key.endswith("lora_down.weight"): continue # Extract LoRA name. lora_name = key.split(".")[0] # Find corresponding up weight and alpha. lora_name_up = lora_name + ".lora_up.weight" lora_name_alpha = lora_name + ".alpha" # Handle U-Net LoRAs. if lora_name.startswith("lora_unet_"): diffusers_name = _convert_unet_lora_key(key) # Store down and up weights. unet_state_dict[diffusers_name] = state_dict.pop(key) unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) # Store DoRA scale if present. if dora_present_in_unet: dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down." unet_state_dict[ diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.") ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) # Handle text encoder LoRAs. elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")): diffusers_name = _convert_text_encoder_lora_key(key, lora_name) # Store down and up weights for te or te2. if lora_name.startswith(("lora_te_", "lora_te1_")): te_state_dict[diffusers_name] = state_dict.pop(key) te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) else: te2_state_dict[diffusers_name] = state_dict.pop(key) te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) # Store DoRA scale if present. if dora_present_in_te or dora_present_in_te2: dora_scale_key_to_replace_te = ( "_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer." ) if lora_name.startswith(("lora_te_", "lora_te1_")): te_state_dict[ diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) elif lora_name.startswith("lora_te2_"): te2_state_dict[ diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) # Store alpha if present. if lora_name_alpha in state_dict: alpha = state_dict.pop(lora_name_alpha).item() network_alphas.update(_get_alpha_name(lora_name_alpha, diffusers_name, alpha)) # Check if any keys remain. if len(state_dict) > 0: raise ValueError(f"The following keys have not been correctly renamed: \n\n {', '.join(state_dict.keys())}") logger.info("Non-diffusers checkpoint detected.") # Construct final state dict. unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()} te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()} te2_state_dict = ( {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()} if len(te2_state_dict) > 0 else None ) if te2_state_dict is not None: te_state_dict.update(te2_state_dict) new_state_dict = {**unet_state_dict, **te_state_dict} return new_state_dict, network_alphas def _convert_unet_lora_key(key): """ Converts a U-Net LoRA key to a Diffusers compatible key. """ diffusers_name = key.replace("lora_unet_", "").replace("_", ".") # Replace common U-Net naming patterns. diffusers_name = diffusers_name.replace("input.blocks", "down_blocks") diffusers_name = diffusers_name.replace("down.blocks", "down_blocks") diffusers_name = diffusers_name.replace("middle.block", "mid_block") diffusers_name = diffusers_name.replace("mid.block", "mid_block") diffusers_name = diffusers_name.replace("output.blocks", "up_blocks") diffusers_name = diffusers_name.replace("up.blocks", "up_blocks") diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks") diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora") diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora") diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora") diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora") diffusers_name = diffusers_name.replace("proj.in", "proj_in") diffusers_name = diffusers_name.replace("proj.out", "proj_out") diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj") # SDXL specific conversions. if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name: pattern = r"\.\d+(?=\D*$)" diffusers_name = re.sub(pattern, "", diffusers_name, count=1) if ".in." in diffusers_name: diffusers_name = diffusers_name.replace("in.layers.2", "conv1") if ".out." in diffusers_name: diffusers_name = diffusers_name.replace("out.layers.3", "conv2") if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name: diffusers_name = diffusers_name.replace("op", "conv") if "skip" in diffusers_name: diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut") # LyCORIS specific conversions. if "time.emb.proj" in diffusers_name: diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj") if "conv.shortcut" in diffusers_name: diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut") # General conversions. if "transformer_blocks" in diffusers_name: if "attn1" in diffusers_name or "attn2" in diffusers_name: diffusers_name = diffusers_name.replace("attn1", "attn1.processor") diffusers_name = diffusers_name.replace("attn2", "attn2.processor") elif "ff" in diffusers_name: pass elif any(key in diffusers_name for key in ("proj_in", "proj_out")): pass else: pass return diffusers_name def _convert_text_encoder_lora_key(key, lora_name): """ Converts a text encoder LoRA key to a Diffusers compatible key. """ if lora_name.startswith(("lora_te_", "lora_te1_")): key_to_replace = "lora_te_" if lora_name.startswith("lora_te_") else "lora_te1_" else: key_to_replace = "lora_te2_" diffusers_name = key.replace(key_to_replace, "").replace("_", ".") diffusers_name = diffusers_name.replace("text.model", "text_model") diffusers_name = diffusers_name.replace("self.attn", "self_attn") diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora") diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora") diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora") diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora") diffusers_name = diffusers_name.replace("text.projection", "text_projection") if "self_attn" in diffusers_name or "text_projection" in diffusers_name: pass elif "mlp" in diffusers_name: # Be aware that this is the new diffusers convention and the rest of the code might # not utilize it yet. diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.") return diffusers_name def _get_alpha_name(lora_name_alpha, diffusers_name, alpha): """ Gets the correct alpha name for the Diffusers model. """ if lora_name_alpha.startswith("lora_unet_"): prefix = "unet." elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")): prefix = "text_encoder." else: prefix = "text_encoder_2." new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha" return {new_name: alpha} # The utilities under `_convert_kohya_flux_lora_to_diffusers()` # are taken from https://github.com/kohya-ss/sd-scripts/blob/a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb/networks/convert_flux_lora.py # All credits go to `kohya-ss`. def _convert_kohya_flux_lora_to_diffusers(state_dict): def _convert_to_ai_toolkit(sds_sd, ait_sd, sds_key, ait_key): if sds_key + ".lora_down.weight" not in sds_sd: return down_weight = sds_sd.pop(sds_key + ".lora_down.weight") # scale weight by alpha and dim rank = down_weight.shape[0] alpha = sds_sd.pop(sds_key + ".alpha").item() # alpha is scalar scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here # calculate scale_down and scale_up to keep the same value. if scale is 4, scale_down is 2 and scale_up is 2 scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 ait_sd[ait_key + ".lora_A.weight"] = down_weight * scale_down ait_sd[ait_key + ".lora_B.weight"] = sds_sd.pop(sds_key + ".lora_up.weight") * scale_up def _convert_to_ai_toolkit_cat(sds_sd, ait_sd, sds_key, ait_keys, dims=None): if sds_key + ".lora_down.weight" not in sds_sd: return down_weight = sds_sd.pop(sds_key + ".lora_down.weight") up_weight = sds_sd.pop(sds_key + ".lora_up.weight") sd_lora_rank = down_weight.shape[0] # scale weight by alpha and dim alpha = sds_sd.pop(sds_key + ".alpha") scale = alpha / sd_lora_rank # calculate scale_down and scale_up scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 down_weight = down_weight * scale_down up_weight = up_weight * scale_up # calculate dims if not provided num_splits = len(ait_keys) if dims is None: dims = [up_weight.shape[0] // num_splits] * num_splits else: assert sum(dims) == up_weight.shape[0] # check upweight is sparse or not is_sparse = False if sd_lora_rank % num_splits == 0: ait_rank = sd_lora_rank // num_splits is_sparse = True i = 0 for j in range(len(dims)): for k in range(len(dims)): if j == k: continue is_sparse = is_sparse and torch.all( up_weight[i : i + dims[j], k * ait_rank : (k + 1) * ait_rank] == 0 ) i += dims[j] if is_sparse: logger.info(f"weight is sparse: {sds_key}") # make ai-toolkit weight ait_down_keys = [k + ".lora_A.weight" for k in ait_keys] ait_up_keys = [k + ".lora_B.weight" for k in ait_keys] if not is_sparse: # down_weight is copied to each split ait_sd.update({k: down_weight for k in ait_down_keys}) # up_weight is split to each split ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416 else: # down_weight is chunked to each split ait_sd.update({k: v for k, v in zip(ait_down_keys, torch.chunk(down_weight, num_splits, dim=0))}) # noqa: C416 # up_weight is sparse: only non-zero values are copied to each split i = 0 for j in range(len(dims)): ait_sd[ait_up_keys[j]] = up_weight[i : i + dims[j], j * ait_rank : (j + 1) * ait_rank].contiguous() i += dims[j] def _convert_sd_scripts_to_ai_toolkit(sds_sd): ait_sd = {} for i in range(19): _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_attn_proj", f"transformer.transformer_blocks.{i}.attn.to_out.0", ) _convert_to_ai_toolkit_cat( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_attn_qkv", [ f"transformer.transformer_blocks.{i}.attn.to_q", f"transformer.transformer_blocks.{i}.attn.to_k", f"transformer.transformer_blocks.{i}.attn.to_v", ], ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_mlp_0", f"transformer.transformer_blocks.{i}.ff.net.0.proj", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_mlp_2", f"transformer.transformer_blocks.{i}.ff.net.2", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_mod_lin", f"transformer.transformer_blocks.{i}.norm1.linear", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_attn_proj", f"transformer.transformer_blocks.{i}.attn.to_add_out", ) _convert_to_ai_toolkit_cat( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_attn_qkv", [ f"transformer.transformer_blocks.{i}.attn.add_q_proj", f"transformer.transformer_blocks.{i}.attn.add_k_proj", f"transformer.transformer_blocks.{i}.attn.add_v_proj", ], ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_mlp_0", f"transformer.transformer_blocks.{i}.ff_context.net.0.proj", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_mlp_2", f"transformer.transformer_blocks.{i}.ff_context.net.2", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_mod_lin", f"transformer.transformer_blocks.{i}.norm1_context.linear", ) for i in range(38): _convert_to_ai_toolkit_cat( sds_sd, ait_sd, f"lora_unet_single_blocks_{i}_linear1", [ f"transformer.single_transformer_blocks.{i}.attn.to_q", f"transformer.single_transformer_blocks.{i}.attn.to_k", f"transformer.single_transformer_blocks.{i}.attn.to_v", f"transformer.single_transformer_blocks.{i}.proj_mlp", ], dims=[3072, 3072, 3072, 12288], ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_single_blocks_{i}_linear2", f"transformer.single_transformer_blocks.{i}.proj_out", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_single_blocks_{i}_modulation_lin", f"transformer.single_transformer_blocks.{i}.norm.linear", ) if len(sds_sd) > 0: logger.warning(f"Unsuppored keys for ai-toolkit: {sds_sd.keys()}") return ait_sd return _convert_sd_scripts_to_ai_toolkit(state_dict) # Adapted from https://gist.github.com/Leommm-byte/6b331a1e9bd53271210b26543a7065d6 # Some utilities were reused from # https://github.com/kohya-ss/sd-scripts/blob/a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb/networks/convert_flux_lora.py def _convert_xlabs_flux_lora_to_diffusers(old_state_dict): new_state_dict = {} orig_keys = list(old_state_dict.keys()) def handle_qkv(sds_sd, ait_sd, sds_key, ait_keys, dims=None): down_weight = sds_sd.pop(sds_key) up_weight = sds_sd.pop(sds_key.replace(".down.weight", ".up.weight")) # calculate dims if not provided num_splits = len(ait_keys) if dims is None: dims = [up_weight.shape[0] // num_splits] * num_splits else: assert sum(dims) == up_weight.shape[0] # make ai-toolkit weight ait_down_keys = [k + ".lora_A.weight" for k in ait_keys] ait_up_keys = [k + ".lora_B.weight" for k in ait_keys] # down_weight is copied to each split ait_sd.update({k: down_weight for k in ait_down_keys}) # up_weight is split to each split ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416 for old_key in orig_keys: # Handle double_blocks if old_key.startswith(("diffusion_model.double_blocks", "double_blocks")): block_num = re.search(r"double_blocks\.(\d+)", old_key).group(1) new_key = f"transformer.transformer_blocks.{block_num}" if "processor.proj_lora1" in old_key: new_key += ".attn.to_out.0" elif "processor.proj_lora2" in old_key: new_key += ".attn.to_add_out" elif "processor.qkv_lora1" in old_key and "up" not in old_key: handle_qkv( old_state_dict, new_state_dict, old_key, [ f"transformer.transformer_blocks.{block_num}.attn.add_q_proj", f"transformer.transformer_blocks.{block_num}.attn.add_k_proj", f"transformer.transformer_blocks.{block_num}.attn.add_v_proj", ], ) # continue elif "processor.qkv_lora2" in old_key and "up" not in old_key: handle_qkv( old_state_dict, new_state_dict, old_key, [ f"transformer.transformer_blocks.{block_num}.attn.to_q", f"transformer.transformer_blocks.{block_num}.attn.to_k", f"transformer.transformer_blocks.{block_num}.attn.to_v", ], ) # continue if "down" in old_key: new_key += ".lora_A.weight" elif "up" in old_key: new_key += ".lora_B.weight" # Handle single_blocks elif old_key.startswith("diffusion_model.single_blocks", "single_blocks"): block_num = re.search(r"single_blocks\.(\d+)", old_key).group(1) new_key = f"transformer.single_transformer_blocks.{block_num}" if "proj_lora1" in old_key or "proj_lora2" in old_key: new_key += ".proj_out" elif "qkv_lora1" in old_key or "qkv_lora2" in old_key: new_key += ".norm.linear" if "down" in old_key: new_key += ".lora_A.weight" elif "up" in old_key: new_key += ".lora_B.weight" else: # Handle other potential key patterns here new_key = old_key # Since we already handle qkv above. if "qkv" not in old_key: new_state_dict[new_key] = old_state_dict.pop(old_key) if len(old_state_dict) > 0: raise ValueError(f"`old_state_dict` should be at this point but has: {list(old_state_dict.keys())}.") return new_state_dict
diffusers/src/diffusers/loaders/lora_conversion_utils.py/0
{ "file_path": "diffusers/src/diffusers/loaders/lora_conversion_utils.py", "repo_id": "diffusers", "token_count": 13067 }
128
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ..image_processor import IPAdapterMaskProcessor from ..utils import deprecate, logging from ..utils.import_utils import is_torch_npu_available, is_xformers_available from ..utils.torch_utils import is_torch_version, maybe_allow_in_graph logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_torch_npu_available(): import torch_npu if is_xformers_available(): import xformers import xformers.ops else: xformers = None @maybe_allow_in_graph class Attention(nn.Module): r""" A cross attention layer. Parameters: query_dim (`int`): The number of channels in the query. cross_attention_dim (`int`, *optional*): The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention. kv_heads (`int`, *optional*, defaults to `None`): The number of key and value heads to use for multi-head attention. Defaults to `heads`. If `kv_heads=heads`, the model will use Multi Head Attention (MHA), if `kv_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. dim_head (`int`, *optional*, defaults to 64): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. bias (`bool`, *optional*, defaults to False): Set to `True` for the query, key, and value linear layers to contain a bias parameter. upcast_attention (`bool`, *optional*, defaults to False): Set to `True` to upcast the attention computation to `float32`. upcast_softmax (`bool`, *optional*, defaults to False): Set to `True` to upcast the softmax computation to `float32`. cross_attention_norm (`str`, *optional*, defaults to `None`): The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. cross_attention_norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the group norm in the cross attention. added_kv_proj_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the added key and value projections. If `None`, no projection is used. norm_num_groups (`int`, *optional*, defaults to `None`): The number of groups to use for the group norm in the attention. spatial_norm_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the spatial normalization. out_bias (`bool`, *optional*, defaults to `True`): Set to `True` to use a bias in the output linear layer. scale_qk (`bool`, *optional*, defaults to `True`): Set to `True` to scale the query and key by `1 / sqrt(dim_head)`. only_cross_attention (`bool`, *optional*, defaults to `False`): Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if `added_kv_proj_dim` is not `None`. eps (`float`, *optional*, defaults to 1e-5): An additional value added to the denominator in group normalization that is used for numerical stability. rescale_output_factor (`float`, *optional*, defaults to 1.0): A factor to rescale the output by dividing it with this value. residual_connection (`bool`, *optional*, defaults to `False`): Set to `True` to add the residual connection to the output. _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`): Set to `True` if the attention block is loaded from a deprecated state dict. processor (`AttnProcessor`, *optional*, defaults to `None`): The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and `AttnProcessor` otherwise. """ def __init__( self, query_dim: int, cross_attention_dim: Optional[int] = None, heads: int = 8, kv_heads: Optional[int] = None, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, upcast_attention: bool = False, upcast_softmax: bool = False, cross_attention_norm: Optional[str] = None, cross_attention_norm_num_groups: int = 32, qk_norm: Optional[str] = None, added_kv_proj_dim: Optional[int] = None, added_proj_bias: Optional[bool] = True, norm_num_groups: Optional[int] = None, spatial_norm_dim: Optional[int] = None, out_bias: bool = True, scale_qk: bool = True, only_cross_attention: bool = False, eps: float = 1e-5, rescale_output_factor: float = 1.0, residual_connection: bool = False, _from_deprecated_attn_block: bool = False, processor: Optional["AttnProcessor"] = None, out_dim: int = None, context_pre_only=None, pre_only=False, ): super().__init__() # To prevent circular import. from .normalization import FP32LayerNorm, RMSNorm self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.inner_kv_dim = self.inner_dim if kv_heads is None else dim_head * kv_heads self.query_dim = query_dim self.use_bias = bias self.is_cross_attention = cross_attention_dim is not None self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim self.upcast_attention = upcast_attention self.upcast_softmax = upcast_softmax self.rescale_output_factor = rescale_output_factor self.residual_connection = residual_connection self.dropout = dropout self.fused_projections = False self.out_dim = out_dim if out_dim is not None else query_dim self.context_pre_only = context_pre_only self.pre_only = pre_only # we make use of this private variable to know whether this class is loaded # with an deprecated state dict so that we can convert it on the fly self._from_deprecated_attn_block = _from_deprecated_attn_block self.scale_qk = scale_qk self.scale = dim_head**-0.5 if self.scale_qk else 1.0 self.heads = out_dim // dim_head if out_dim is not None else heads # for slice_size > 0 the attention score computation # is split across the batch axis to save memory # You can set slice_size with `set_attention_slice` self.sliceable_head_dim = heads self.added_kv_proj_dim = added_kv_proj_dim self.only_cross_attention = only_cross_attention if self.added_kv_proj_dim is None and self.only_cross_attention: raise ValueError( "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`." ) if norm_num_groups is not None: self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True) else: self.group_norm = None if spatial_norm_dim is not None: self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim) else: self.spatial_norm = None if qk_norm is None: self.norm_q = None self.norm_k = None elif qk_norm == "layer_norm": self.norm_q = nn.LayerNorm(dim_head, eps=eps) self.norm_k = nn.LayerNorm(dim_head, eps=eps) elif qk_norm == "fp32_layer_norm": self.norm_q = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) self.norm_k = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) elif qk_norm == "layer_norm_across_heads": # Lumina applys qk norm across all heads self.norm_q = nn.LayerNorm(dim_head * heads, eps=eps) self.norm_k = nn.LayerNorm(dim_head * kv_heads, eps=eps) elif qk_norm == "rms_norm": self.norm_q = RMSNorm(dim_head, eps=eps) self.norm_k = RMSNorm(dim_head, eps=eps) else: raise ValueError(f"unknown qk_norm: {qk_norm}. Should be None or 'layer_norm'") if cross_attention_norm is None: self.norm_cross = None elif cross_attention_norm == "layer_norm": self.norm_cross = nn.LayerNorm(self.cross_attention_dim) elif cross_attention_norm == "group_norm": if self.added_kv_proj_dim is not None: # The given `encoder_hidden_states` are initially of shape # (batch_size, seq_len, added_kv_proj_dim) before being projected # to (batch_size, seq_len, cross_attention_dim). The norm is applied # before the projection, so we need to use `added_kv_proj_dim` as # the number of channels for the group norm. norm_cross_num_channels = added_kv_proj_dim else: norm_cross_num_channels = self.cross_attention_dim self.norm_cross = nn.GroupNorm( num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True ) else: raise ValueError( f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'" ) self.to_q = nn.Linear(query_dim, self.inner_dim, bias=bias) if not self.only_cross_attention: # only relevant for the `AddedKVProcessor` classes self.to_k = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) self.to_v = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) else: self.to_k = None self.to_v = None self.added_proj_bias = added_proj_bias if self.added_kv_proj_dim is not None: self.add_k_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=added_proj_bias) self.add_v_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=added_proj_bias) if self.context_pre_only is not None: self.add_q_proj = nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) if not self.pre_only: self.to_out = nn.ModuleList([]) self.to_out.append(nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(nn.Dropout(dropout)) if self.context_pre_only is not None and not self.context_pre_only: self.to_add_out = nn.Linear(self.inner_dim, self.out_dim, bias=out_bias) if qk_norm is not None and added_kv_proj_dim is not None: if qk_norm == "fp32_layer_norm": self.norm_added_q = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) self.norm_added_k = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) elif qk_norm == "rms_norm": self.norm_added_q = RMSNorm(dim_head, eps=eps) self.norm_added_k = RMSNorm(dim_head, eps=eps) else: self.norm_added_q = None self.norm_added_k = None # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 if processor is None: processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_use_npu_flash_attention(self, use_npu_flash_attention: bool) -> None: r""" Set whether to use npu flash attention from `torch_npu` or not. """ if use_npu_flash_attention: processor = AttnProcessorNPU() else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_use_memory_efficient_attention_xformers( self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None ) -> None: r""" Set whether to use memory efficient attention from `xformers` or not. Args: use_memory_efficient_attention_xformers (`bool`): Whether to use memory efficient attention from `xformers` or not. attention_op (`Callable`, *optional*): The attention operation to use. Defaults to `None` which uses the default attention operation from `xformers`. """ is_custom_diffusion = hasattr(self, "processor") and isinstance( self.processor, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0), ) is_added_kv_processor = hasattr(self, "processor") and isinstance( self.processor, ( AttnAddedKVProcessor, AttnAddedKVProcessor2_0, SlicedAttnAddedKVProcessor, XFormersAttnAddedKVProcessor, ), ) if use_memory_efficient_attention_xformers: if is_added_kv_processor and is_custom_diffusion: raise NotImplementedError( f"Memory efficient attention is currently not supported for custom diffusion for attention processor type {self.processor}" ) if not is_xformers_available(): raise ModuleNotFoundError( ( "Refer to https://github.com/facebookresearch/xformers for more information on how to install" " xformers" ), name="xformers", ) elif not torch.cuda.is_available(): raise ValueError( "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" " only available for GPU " ) else: try: # Make sure we can run the memory efficient attention _ = xformers.ops.memory_efficient_attention( torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), ) except Exception as e: raise e if is_custom_diffusion: processor = CustomDiffusionXFormersAttnProcessor( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, attention_op=attention_op, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) elif is_added_kv_processor: # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP # which uses this type of cross attention ONLY because the attention mask of format # [0, ..., -10.000, ..., 0, ...,] is not supported # throw warning logger.info( "Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation." ) processor = XFormersAttnAddedKVProcessor(attention_op=attention_op) else: processor = XFormersAttnProcessor(attention_op=attention_op) else: if is_custom_diffusion: attn_processor_class = ( CustomDiffusionAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else CustomDiffusionAttnProcessor ) processor = attn_processor_class( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_attention_slice(self, slice_size: int) -> None: r""" Set the slice size for attention computation. Args: slice_size (`int`): The slice size for attention computation. """ if slice_size is not None and slice_size > self.sliceable_head_dim: raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") if slice_size is not None and self.added_kv_proj_dim is not None: processor = SlicedAttnAddedKVProcessor(slice_size) elif slice_size is not None: processor = SlicedAttnProcessor(slice_size) elif self.added_kv_proj_dim is not None: processor = AttnAddedKVProcessor() else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_processor(self, processor: "AttnProcessor") -> None: r""" Set the attention processor to use. Args: processor (`AttnProcessor`): The attention processor to use. """ # if current processor is in `self._modules` and if passed `processor` is not, we need to # pop `processor` from `self._modules` if ( hasattr(self, "processor") and isinstance(self.processor, torch.nn.Module) and not isinstance(processor, torch.nn.Module) ): logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") self._modules.pop("processor") self.processor = processor def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": r""" Get the attention processor in use. Args: return_deprecated_lora (`bool`, *optional*, defaults to `False`): Set to `True` to return the deprecated LoRA attention processor. Returns: "AttentionProcessor": The attention processor in use. """ if not return_deprecated_lora: return self.processor def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **cross_attention_kwargs, ) -> torch.Tensor: r""" The forward method of the `Attention` class. Args: hidden_states (`torch.Tensor`): The hidden states of the query. encoder_hidden_states (`torch.Tensor`, *optional*): The hidden states of the encoder. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. **cross_attention_kwargs: Additional keyword arguments to pass along to the cross attention. Returns: `torch.Tensor`: The output of the attention layer. """ # The `Attention` class can call different attention processors / attention functions # here we simply pass along all tensors to the selected processor class # For standard processors that are defined here, `**cross_attention_kwargs` is empty attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) quiet_attn_parameters = {"ip_adapter_masks"} unused_kwargs = [ k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters ] if len(unused_kwargs) > 0: logger.warning( f"cross_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) cross_attention_kwargs = {k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters} return self.processor( self, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, ) def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads batch_size, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is reshaped to `[batch_size * heads, seq_len, dim // heads]`. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads if tensor.ndim == 3: batch_size, seq_len, dim = tensor.shape extra_dim = 1 else: batch_size, extra_dim, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size, seq_len * extra_dim, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3) if out_dim == 3: tensor = tensor.reshape(batch_size * head_size, seq_len * extra_dim, dim // head_size) return tensor def get_attention_scores( self, query: torch.Tensor, key: torch.Tensor, attention_mask: Optional[torch.Tensor] = None ) -> torch.Tensor: r""" Compute the attention scores. Args: query (`torch.Tensor`): The query tensor. key (`torch.Tensor`): The key tensor. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. Returns: `torch.Tensor`: The attention probabilities/scores. """ dtype = query.dtype if self.upcast_attention: query = query.float() key = key.float() if attention_mask is None: baddbmm_input = torch.empty( query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device ) beta = 0 else: baddbmm_input = attention_mask beta = 1 attention_scores = torch.baddbmm( baddbmm_input, query, key.transpose(-1, -2), beta=beta, alpha=self.scale, ) del baddbmm_input if self.upcast_softmax: attention_scores = attention_scores.float() attention_probs = attention_scores.softmax(dim=-1) del attention_scores attention_probs = attention_probs.to(dtype) return attention_probs def prepare_attention_mask( self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3 ) -> torch.Tensor: r""" Prepare the attention mask for the attention computation. Args: attention_mask (`torch.Tensor`): The attention mask to prepare. target_length (`int`): The target length of the attention mask. This is the length of the attention mask after padding. batch_size (`int`): The batch size, which is used to repeat the attention mask. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the attention mask. Can be either `3` or `4`. Returns: `torch.Tensor`: The prepared attention mask. """ head_size = self.heads if attention_mask is None: return attention_mask current_length: int = attention_mask.shape[-1] if current_length != target_length: if attention_mask.device.type == "mps": # HACK: MPS: Does not support padding by greater than dimension of input tensor. # Instead, we can manually construct the padding tensor. padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat([attention_mask, padding], dim=2) else: # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: # we want to instead pad by (0, remaining_length), where remaining_length is: # remaining_length: int = target_length - current_length # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) if out_dim == 3: if attention_mask.shape[0] < batch_size * head_size: attention_mask = attention_mask.repeat_interleave(head_size, dim=0) elif out_dim == 4: attention_mask = attention_mask.unsqueeze(1) attention_mask = attention_mask.repeat_interleave(head_size, dim=1) return attention_mask def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: r""" Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the `Attention` class. Args: encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. Returns: `torch.Tensor`: The normalized encoder hidden states. """ assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states" if isinstance(self.norm_cross, nn.LayerNorm): encoder_hidden_states = self.norm_cross(encoder_hidden_states) elif isinstance(self.norm_cross, nn.GroupNorm): # Group norm norms along the channels dimension and expects # input to be in the shape of (N, C, *). In this case, we want # to norm along the hidden dimension, so we need to move # (batch_size, sequence_length, hidden_size) -> # (batch_size, hidden_size, sequence_length) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) encoder_hidden_states = self.norm_cross(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) else: assert False return encoder_hidden_states @torch.no_grad() def fuse_projections(self, fuse=True): device = self.to_q.weight.data.device dtype = self.to_q.weight.data.dtype if not self.is_cross_attention: # fetch weight matrices. concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] # create a new single projection layer and copy over the weights. self.to_qkv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_qkv.weight.copy_(concatenated_weights) if self.use_bias: concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) self.to_qkv.bias.copy_(concatenated_bias) else: concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_kv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_kv.weight.copy_(concatenated_weights) if self.use_bias: concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) self.to_kv.bias.copy_(concatenated_bias) # handle added projections for SD3 and others. if hasattr(self, "add_q_proj") and hasattr(self, "add_k_proj") and hasattr(self, "add_v_proj"): concatenated_weights = torch.cat( [self.add_q_proj.weight.data, self.add_k_proj.weight.data, self.add_v_proj.weight.data] ) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_added_qkv = nn.Linear( in_features, out_features, bias=self.added_proj_bias, device=device, dtype=dtype ) self.to_added_qkv.weight.copy_(concatenated_weights) if self.added_proj_bias: concatenated_bias = torch.cat( [self.add_q_proj.bias.data, self.add_k_proj.bias.data, self.add_v_proj.bias.data] ) self.to_added_qkv.bias.copy_(concatenated_bias) self.fused_projections = fuse class AttnProcessor: r""" Default processor for performing attention-related computations. """ def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionAttnProcessor(nn.Module): r""" Processor for implementing attention for the Custom Diffusion method. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ def __init__( self, train_kv: bool = True, train_q_out: bool = True, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class AttnAddedKVProcessor: r""" Processor for performing attention-related computations with extra learnable key and value matrices for the text encoder. """ def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class AttnAddedKVProcessor2_0: r""" Processor for performing scaled dot-product attention (enabled by default if you're using PyTorch 2.0), with extra learnable key and value matrices for the text encoder. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "AttnAddedKVProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, out_dim=4) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query, out_dim=4) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj, out_dim=4) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key, out_dim=4) value = attn.head_to_batch_dim(value, out_dim=4) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1]) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class JointAttnProcessor2_0: """Attention processor used typically in processing the SD3-like self-attention projections.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: batch_size, channel, height, width = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size = encoder_hidden_states.shape[0] # `sample` projections. query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) # `context` projections. encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # attention query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # Split the attention outputs. hidden_states, encoder_hidden_states = ( hidden_states[:, : residual.shape[1]], hidden_states[:, residual.shape[1] :], ) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if not attn.context_pre_only: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) return hidden_states, encoder_hidden_states class PAGJointAttnProcessor2_0: """Attention processor used typically in processing the SD3-like self-attention projections.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "PAGJointAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, ) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: batch_size, channel, height, width = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) # store the length of image patch sequences to create a mask that prevents interaction between patches # similar to making the self-attention map an identity matrix identity_block_size = hidden_states.shape[1] # chunk hidden_states_org, hidden_states_ptb = hidden_states.chunk(2) encoder_hidden_states_org, encoder_hidden_states_ptb = encoder_hidden_states.chunk(2) ################## original path ################## batch_size = encoder_hidden_states_org.shape[0] # `sample` projections. query_org = attn.to_q(hidden_states_org) key_org = attn.to_k(hidden_states_org) value_org = attn.to_v(hidden_states_org) # `context` projections. encoder_hidden_states_org_query_proj = attn.add_q_proj(encoder_hidden_states_org) encoder_hidden_states_org_key_proj = attn.add_k_proj(encoder_hidden_states_org) encoder_hidden_states_org_value_proj = attn.add_v_proj(encoder_hidden_states_org) # attention query_org = torch.cat([query_org, encoder_hidden_states_org_query_proj], dim=1) key_org = torch.cat([key_org, encoder_hidden_states_org_key_proj], dim=1) value_org = torch.cat([value_org, encoder_hidden_states_org_value_proj], dim=1) inner_dim = key_org.shape[-1] head_dim = inner_dim // attn.heads query_org = query_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_org = key_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_org = value_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states_org = F.scaled_dot_product_attention( query_org, key_org, value_org, dropout_p=0.0, is_causal=False ) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query_org.dtype) # Split the attention outputs. hidden_states_org, encoder_hidden_states_org = ( hidden_states_org[:, : residual.shape[1]], hidden_states_org[:, residual.shape[1] :], ) # linear proj hidden_states_org = attn.to_out[0](hidden_states_org) # dropout hidden_states_org = attn.to_out[1](hidden_states_org) if not attn.context_pre_only: encoder_hidden_states_org = attn.to_add_out(encoder_hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_org = encoder_hidden_states_org.transpose(-1, -2).reshape( batch_size, channel, height, width ) ################## perturbed path ################## batch_size = encoder_hidden_states_ptb.shape[0] # `sample` projections. query_ptb = attn.to_q(hidden_states_ptb) key_ptb = attn.to_k(hidden_states_ptb) value_ptb = attn.to_v(hidden_states_ptb) # `context` projections. encoder_hidden_states_ptb_query_proj = attn.add_q_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_key_proj = attn.add_k_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_value_proj = attn.add_v_proj(encoder_hidden_states_ptb) # attention query_ptb = torch.cat([query_ptb, encoder_hidden_states_ptb_query_proj], dim=1) key_ptb = torch.cat([key_ptb, encoder_hidden_states_ptb_key_proj], dim=1) value_ptb = torch.cat([value_ptb, encoder_hidden_states_ptb_value_proj], dim=1) inner_dim = key_ptb.shape[-1] head_dim = inner_dim // attn.heads query_ptb = query_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_ptb = key_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_ptb = value_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # create a full mask with all entries set to 0 seq_len = query_ptb.size(2) full_mask = torch.zeros((seq_len, seq_len), device=query_ptb.device, dtype=query_ptb.dtype) # set the attention value between image patches to -inf full_mask[:identity_block_size, :identity_block_size] = float("-inf") # set the diagonal of the attention value between image patches to 0 full_mask[:identity_block_size, :identity_block_size].fill_diagonal_(0) # expand the mask to match the attention weights shape full_mask = full_mask.unsqueeze(0).unsqueeze(0) # Add batch and num_heads dimensions hidden_states_ptb = F.scaled_dot_product_attention( query_ptb, key_ptb, value_ptb, attn_mask=full_mask, dropout_p=0.0, is_causal=False ) hidden_states_ptb = hidden_states_ptb.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_ptb = hidden_states_ptb.to(query_ptb.dtype) # split the attention outputs. hidden_states_ptb, encoder_hidden_states_ptb = ( hidden_states_ptb[:, : residual.shape[1]], hidden_states_ptb[:, residual.shape[1] :], ) # linear proj hidden_states_ptb = attn.to_out[0](hidden_states_ptb) # dropout hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if not attn.context_pre_only: encoder_hidden_states_ptb = attn.to_add_out(encoder_hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_ptb = encoder_hidden_states_ptb.transpose(-1, -2).reshape( batch_size, channel, height, width ) ################ concat ############### hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) encoder_hidden_states = torch.cat([encoder_hidden_states_org, encoder_hidden_states_ptb]) return hidden_states, encoder_hidden_states class PAGCFGJointAttnProcessor2_0: """Attention processor used typically in processing the SD3-like self-attention projections.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "PAGCFGJointAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: batch_size, channel, height, width = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) identity_block_size = hidden_states.shape[ 1 ] # patch embeddings width * height (correspond to self-attention map width or height) # chunk hidden_states_uncond, hidden_states_org, hidden_states_ptb = hidden_states.chunk(3) hidden_states_org = torch.cat([hidden_states_uncond, hidden_states_org]) ( encoder_hidden_states_uncond, encoder_hidden_states_org, encoder_hidden_states_ptb, ) = encoder_hidden_states.chunk(3) encoder_hidden_states_org = torch.cat([encoder_hidden_states_uncond, encoder_hidden_states_org]) ################## original path ################## batch_size = encoder_hidden_states_org.shape[0] # `sample` projections. query_org = attn.to_q(hidden_states_org) key_org = attn.to_k(hidden_states_org) value_org = attn.to_v(hidden_states_org) # `context` projections. encoder_hidden_states_org_query_proj = attn.add_q_proj(encoder_hidden_states_org) encoder_hidden_states_org_key_proj = attn.add_k_proj(encoder_hidden_states_org) encoder_hidden_states_org_value_proj = attn.add_v_proj(encoder_hidden_states_org) # attention query_org = torch.cat([query_org, encoder_hidden_states_org_query_proj], dim=1) key_org = torch.cat([key_org, encoder_hidden_states_org_key_proj], dim=1) value_org = torch.cat([value_org, encoder_hidden_states_org_value_proj], dim=1) inner_dim = key_org.shape[-1] head_dim = inner_dim // attn.heads query_org = query_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_org = key_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_org = value_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states_org = F.scaled_dot_product_attention( query_org, key_org, value_org, dropout_p=0.0, is_causal=False ) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query_org.dtype) # Split the attention outputs. hidden_states_org, encoder_hidden_states_org = ( hidden_states_org[:, : residual.shape[1]], hidden_states_org[:, residual.shape[1] :], ) # linear proj hidden_states_org = attn.to_out[0](hidden_states_org) # dropout hidden_states_org = attn.to_out[1](hidden_states_org) if not attn.context_pre_only: encoder_hidden_states_org = attn.to_add_out(encoder_hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_org = encoder_hidden_states_org.transpose(-1, -2).reshape( batch_size, channel, height, width ) ################## perturbed path ################## batch_size = encoder_hidden_states_ptb.shape[0] # `sample` projections. query_ptb = attn.to_q(hidden_states_ptb) key_ptb = attn.to_k(hidden_states_ptb) value_ptb = attn.to_v(hidden_states_ptb) # `context` projections. encoder_hidden_states_ptb_query_proj = attn.add_q_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_key_proj = attn.add_k_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_value_proj = attn.add_v_proj(encoder_hidden_states_ptb) # attention query_ptb = torch.cat([query_ptb, encoder_hidden_states_ptb_query_proj], dim=1) key_ptb = torch.cat([key_ptb, encoder_hidden_states_ptb_key_proj], dim=1) value_ptb = torch.cat([value_ptb, encoder_hidden_states_ptb_value_proj], dim=1) inner_dim = key_ptb.shape[-1] head_dim = inner_dim // attn.heads query_ptb = query_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_ptb = key_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_ptb = value_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # create a full mask with all entries set to 0 seq_len = query_ptb.size(2) full_mask = torch.zeros((seq_len, seq_len), device=query_ptb.device, dtype=query_ptb.dtype) # set the attention value between image patches to -inf full_mask[:identity_block_size, :identity_block_size] = float("-inf") # set the diagonal of the attention value between image patches to 0 full_mask[:identity_block_size, :identity_block_size].fill_diagonal_(0) # expand the mask to match the attention weights shape full_mask = full_mask.unsqueeze(0).unsqueeze(0) # Add batch and num_heads dimensions hidden_states_ptb = F.scaled_dot_product_attention( query_ptb, key_ptb, value_ptb, attn_mask=full_mask, dropout_p=0.0, is_causal=False ) hidden_states_ptb = hidden_states_ptb.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_ptb = hidden_states_ptb.to(query_ptb.dtype) # split the attention outputs. hidden_states_ptb, encoder_hidden_states_ptb = ( hidden_states_ptb[:, : residual.shape[1]], hidden_states_ptb[:, residual.shape[1] :], ) # linear proj hidden_states_ptb = attn.to_out[0](hidden_states_ptb) # dropout hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if not attn.context_pre_only: encoder_hidden_states_ptb = attn.to_add_out(encoder_hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_ptb = encoder_hidden_states_ptb.transpose(-1, -2).reshape( batch_size, channel, height, width ) ################ concat ############### hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) encoder_hidden_states = torch.cat([encoder_hidden_states_org, encoder_hidden_states_ptb]) return hidden_states, encoder_hidden_states class FusedJointAttnProcessor2_0: """Attention processor used typically in processing the SD3-like self-attention projections.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: batch_size, channel, height, width = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size = encoder_hidden_states.shape[0] # `sample` projections. qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) # `context` projections. encoder_qkv = attn.to_added_qkv(encoder_hidden_states) split_size = encoder_qkv.shape[-1] // 3 ( encoder_hidden_states_query_proj, encoder_hidden_states_key_proj, encoder_hidden_states_value_proj, ) = torch.split(encoder_qkv, split_size, dim=-1) # attention query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # Split the attention outputs. hidden_states, encoder_hidden_states = ( hidden_states[:, : residual.shape[1]], hidden_states[:, residual.shape[1] :], ) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if not attn.context_pre_only: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) return hidden_states, encoder_hidden_states class AuraFlowAttnProcessor2_0: """Attention processor used typically in processing Aura Flow.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention") and is_torch_version("<", "2.1"): raise ImportError( "AuraFlowAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to at least 2.1 or above as we use `scale` in `F.scaled_dot_product_attention()`. " ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, *args, **kwargs, ) -> torch.FloatTensor: batch_size = hidden_states.shape[0] # `sample` projections. query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) # `context` projections. if encoder_hidden_states is not None: encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # Reshape. inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, attn.heads, head_dim) value = value.view(batch_size, -1, attn.heads, head_dim) # Apply QK norm. if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Concatenate the projections. if encoder_hidden_states is not None: encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view( batch_size, -1, attn.heads, head_dim ) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view(batch_size, -1, attn.heads, head_dim) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view( batch_size, -1, attn.heads, head_dim ) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_q(encoder_hidden_states_key_proj) query = torch.cat([encoder_hidden_states_query_proj, query], dim=1) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) # Attention. hidden_states = F.scaled_dot_product_attention( query, key, value, dropout_p=0.0, scale=attn.scale, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # Split the attention outputs. if encoder_hidden_states is not None: hidden_states, encoder_hidden_states = ( hidden_states[:, encoder_hidden_states.shape[1] :], hidden_states[:, : encoder_hidden_states.shape[1]], ) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if encoder_hidden_states is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if encoder_hidden_states is not None: return hidden_states, encoder_hidden_states else: return hidden_states class FusedAuraFlowAttnProcessor2_0: """Attention processor used typically in processing Aura Flow with fused projections.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention") and is_torch_version("<", "2.1"): raise ImportError( "FusedAuraFlowAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to at least 2.1 or above as we use `scale` in `F.scaled_dot_product_attention()`. " ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, *args, **kwargs, ) -> torch.FloatTensor: batch_size = hidden_states.shape[0] # `sample` projections. qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) # `context` projections. if encoder_hidden_states is not None: encoder_qkv = attn.to_added_qkv(encoder_hidden_states) split_size = encoder_qkv.shape[-1] // 3 ( encoder_hidden_states_query_proj, encoder_hidden_states_key_proj, encoder_hidden_states_value_proj, ) = torch.split(encoder_qkv, split_size, dim=-1) # Reshape. inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, attn.heads, head_dim) value = value.view(batch_size, -1, attn.heads, head_dim) # Apply QK norm. if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Concatenate the projections. if encoder_hidden_states is not None: encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view( batch_size, -1, attn.heads, head_dim ) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view(batch_size, -1, attn.heads, head_dim) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view( batch_size, -1, attn.heads, head_dim ) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_q(encoder_hidden_states_key_proj) query = torch.cat([encoder_hidden_states_query_proj, query], dim=1) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) # Attention. hidden_states = F.scaled_dot_product_attention( query, key, value, dropout_p=0.0, scale=attn.scale, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # Split the attention outputs. if encoder_hidden_states is not None: hidden_states, encoder_hidden_states = ( hidden_states[:, encoder_hidden_states.shape[1] :], hidden_states[:, : encoder_hidden_states.shape[1]], ) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if encoder_hidden_states is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if encoder_hidden_states is not None: return hidden_states, encoder_hidden_states else: return hidden_states class FluxAttnProcessor2_0: """Attention processor used typically in processing the SD3-like self-attention projections.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("FluxAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # `sample` projections. query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # the attention in FluxSingleTransformerBlock does not use `encoder_hidden_states` if encoder_hidden_states is not None: # `context` projections. encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view( batch_size, -1, attn.heads, head_dim ).transpose(1, 2) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view( batch_size, -1, attn.heads, head_dim ).transpose(1, 2) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view( batch_size, -1, attn.heads, head_dim ).transpose(1, 2) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # attention query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query = apply_rotary_emb(query, image_rotary_emb) key = apply_rotary_emb(key, image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = ( hidden_states[:, : encoder_hidden_states.shape[1]], hidden_states[:, encoder_hidden_states.shape[1] :], ) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states else: return hidden_states class FusedFluxAttnProcessor2_0: """Attention processor used typically in processing the SD3-like self-attention projections.""" def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "FusedFluxAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # `sample` projections. qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # the attention in FluxSingleTransformerBlock does not use `encoder_hidden_states` # `context` projections. if encoder_hidden_states is not None: encoder_qkv = attn.to_added_qkv(encoder_hidden_states) split_size = encoder_qkv.shape[-1] // 3 ( encoder_hidden_states_query_proj, encoder_hidden_states_key_proj, encoder_hidden_states_value_proj, ) = torch.split(encoder_qkv, split_size, dim=-1) encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view( batch_size, -1, attn.heads, head_dim ).transpose(1, 2) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view( batch_size, -1, attn.heads, head_dim ).transpose(1, 2) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view( batch_size, -1, attn.heads, head_dim ).transpose(1, 2) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # attention query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query = apply_rotary_emb(query, image_rotary_emb) key = apply_rotary_emb(key, image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = ( hidden_states[:, : encoder_hidden_states.shape[1]], hidden_states[:, encoder_hidden_states.shape[1] :], ) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states else: return hidden_states class CogVideoXAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on query and key vectors, but does not include spatial normalization. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) return hidden_states, encoder_hidden_states class FusedCogVideoXAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on query and key vectors, but does not include spatial normalization. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) return hidden_states, encoder_hidden_states class XFormersAttnAddedKVProcessor: r""" Processor for implementing memory efficient attention using xFormers. Args: attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__(self, attention_op: Optional[Callable] = None): self.attention_op = attention_op def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class XFormersAttnProcessor: r""" Processor for implementing memory efficient attention using xFormers. Args: attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__(self, attention_op: Optional[Callable] = None): self.attention_op = attention_op def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, key_tokens, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, key_tokens, batch_size) if attention_mask is not None: # expand our mask's singleton query_tokens dimension: # [batch*heads, 1, key_tokens] -> # [batch*heads, query_tokens, key_tokens] # so that it can be added as a bias onto the attention scores that xformers computes: # [batch*heads, query_tokens, key_tokens] # we do this explicitly because xformers doesn't broadcast the singleton dimension for us. _, query_tokens, _ = hidden_states.shape attention_mask = attention_mask.expand(-1, query_tokens, -1) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class AttnProcessorNPU: r""" Processor for implementing flash attention using torch_npu. Torch_npu supports only fp16 and bf16 data types. If fp32 is used, F.scaled_dot_product_attention will be used for computation, but the acceleration effect on NPU is not significant. """ def __init__(self): if not is_torch_npu_available(): raise ImportError("AttnProcessorNPU requires torch_npu extensions and is supported only on npu devices.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) if query.dtype in (torch.float16, torch.bfloat16): hidden_states = torch_npu.npu_fusion_attention( query, key, value, attn.heads, input_layout="BNSD", pse=None, atten_mask=attention_mask, scale=1.0 / math.sqrt(query.shape[-1]), pre_tockens=65536, next_tockens=65536, keep_prob=1.0, sync=False, inner_precise=0, )[0] else: # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class AttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class StableAudioAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the Stable Audio model. It applies rotary embedding on query and key vector, and allows MHA, GQA or MQA. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "StableAudioAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def apply_partial_rotary_emb( self, x: torch.Tensor, freqs_cis: Tuple[torch.Tensor], ) -> torch.Tensor: from .embeddings import apply_rotary_emb rot_dim = freqs_cis[0].shape[-1] x_to_rotate, x_unrotated = x[..., :rot_dim], x[..., rot_dim:] x_rotated = apply_rotary_emb(x_to_rotate, freqs_cis, use_real=True, use_real_unbind_dim=-2) out = torch.cat((x_rotated, x_unrotated), dim=-1) return out def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) head_dim = query.shape[-1] // attn.heads kv_heads = key.shape[-1] // head_dim query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, kv_heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, kv_heads, head_dim).transpose(1, 2) if kv_heads != attn.heads: # if GQA or MQA, repeat the key/value heads to reach the number of query heads. heads_per_kv_head = attn.heads // kv_heads key = torch.repeat_interleave(key, heads_per_kv_head, dim=1) value = torch.repeat_interleave(value, heads_per_kv_head, dim=1) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if rotary_emb is not None: query_dtype = query.dtype key_dtype = key.dtype query = query.to(torch.float32) key = key.to(torch.float32) rot_dim = rotary_emb[0].shape[-1] query_to_rotate, query_unrotated = query[..., :rot_dim], query[..., rot_dim:] query_rotated = apply_rotary_emb(query_to_rotate, rotary_emb, use_real=True, use_real_unbind_dim=-2) query = torch.cat((query_rotated, query_unrotated), dim=-1) if not attn.is_cross_attention: key_to_rotate, key_unrotated = key[..., :rot_dim], key[..., rot_dim:] key_rotated = apply_rotary_emb(key_to_rotate, rotary_emb, use_real=True, use_real_unbind_dim=-2) key = torch.cat((key_rotated, key_unrotated), dim=-1) query = query.to(query_dtype) key = key.to(key_dtype) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class HunyuanAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the HunyuanDiT model. It applies a s normalization layer and rotary embedding on query and key vector. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class FusedHunyuanAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0) with fused projection layers. This is used in the HunyuanDiT model. It applies a s normalization layer and rotary embedding on query and key vector. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "FusedHunyuanAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) if encoder_hidden_states is None: qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) else: if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) query = attn.to_q(hidden_states) kv = attn.to_kv(encoder_hidden_states) split_size = kv.shape[-1] // 2 key, value = torch.split(kv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGHunyuanAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the HunyuanDiT model. It applies a normalization layer and rotary embedding on query and key vector. This variant of the processor employs [Pertubed Attention Guidance](https://arxiv.org/abs/2403.17377). """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "PAGHunyuanAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) # chunk hidden_states_org, hidden_states_ptb = hidden_states.chunk(2) # 1. Original Path batch_size, sequence_length, _ = ( hidden_states_org.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) if encoder_hidden_states is None: encoder_hidden_states = hidden_states_org elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states_org = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) # linear proj hidden_states_org = attn.to_out[0](hidden_states_org) # dropout hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) # 2. Perturbed Path if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) hidden_states_ptb = attn.to_v(hidden_states_ptb) hidden_states_ptb = hidden_states_ptb.to(query.dtype) # linear proj hidden_states_ptb = attn.to_out[0](hidden_states_ptb) # dropout hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) # cat hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGCFGHunyuanAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the HunyuanDiT model. It applies a normalization layer and rotary embedding on query and key vector. This variant of the processor employs [Pertubed Attention Guidance](https://arxiv.org/abs/2403.17377). """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "PAGCFGHunyuanAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) # chunk hidden_states_uncond, hidden_states_org, hidden_states_ptb = hidden_states.chunk(3) hidden_states_org = torch.cat([hidden_states_uncond, hidden_states_org]) # 1. Original Path batch_size, sequence_length, _ = ( hidden_states_org.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) if encoder_hidden_states is None: encoder_hidden_states = hidden_states_org elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states_org = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) # linear proj hidden_states_org = attn.to_out[0](hidden_states_org) # dropout hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) # 2. Perturbed Path if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) hidden_states_ptb = attn.to_v(hidden_states_ptb) hidden_states_ptb = hidden_states_ptb.to(query.dtype) # linear proj hidden_states_ptb = attn.to_out[0](hidden_states_ptb) # dropout hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) # cat hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LuminaAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the LuminaNextDiT model. It applies a s normalization layer and rotary embedding on query and key vector. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, query_rotary_emb: Optional[torch.Tensor] = None, key_rotary_emb: Optional[torch.Tensor] = None, base_sequence_length: Optional[int] = None, ) -> torch.Tensor: from .embeddings import apply_rotary_emb input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape # Get Query-Key-Value Pair query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query_dim = query.shape[-1] inner_dim = key.shape[-1] head_dim = query_dim // attn.heads dtype = query.dtype # Get key-value heads kv_heads = inner_dim // head_dim # Apply Query-Key Norm if needed if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, kv_heads, head_dim) value = value.view(batch_size, -1, kv_heads, head_dim) # Apply RoPE if needed if query_rotary_emb is not None: query = apply_rotary_emb(query, query_rotary_emb, use_real=False) if key_rotary_emb is not None: key = apply_rotary_emb(key, key_rotary_emb, use_real=False) query, key = query.to(dtype), key.to(dtype) # Apply proportional attention if true if key_rotary_emb is None: softmax_scale = None else: if base_sequence_length is not None: softmax_scale = math.sqrt(math.log(sequence_length, base_sequence_length)) * attn.scale else: softmax_scale = attn.scale # perform Grouped-qurey Attention (GQA) n_rep = attn.heads // kv_heads if n_rep >= 1: key = key.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) value = value.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.bool().view(batch_size, 1, 1, -1) attention_mask = attention_mask.expand(-1, attn.heads, sequence_length, -1) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, scale=softmax_scale ) hidden_states = hidden_states.transpose(1, 2).to(dtype) return hidden_states class FusedAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). It uses fused projection layers. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is currently 🧪 experimental in nature and can change in future. </Tip> """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "FusedAttnProcessor2_0 requires at least PyTorch 2.0, to use it. Please upgrade PyTorch to > 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) if encoder_hidden_states is None: qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) else: if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) query = attn.to_q(hidden_states) kv = attn.to_kv(encoder_hidden_states) split_size = kv.shape[-1] // 2 key, value = torch.split(kv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionXFormersAttnProcessor(nn.Module): r""" Processor for implementing memory efficient attention using xFormers for the Custom Diffusion method. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__( self, train_kv: bool = True, train_q_out: bool = False, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, attention_op: Optional[Callable] = None, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.attention_op = attention_op # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CustomDiffusionAttnProcessor2_0(nn.Module): r""" Processor for implementing attention for the Custom Diffusion method using PyTorch 2.0’s memory-efficient scaled dot-product attention. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ def __init__( self, train_kv: bool = True, train_q_out: bool = True, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states) else: query = attn.to_q(hidden_states) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() inner_dim = hidden_states.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class SlicedAttnProcessor: r""" Processor for implementing sliced attention. Args: slice_size (`int`, *optional*): The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and `attention_head_dim` must be a multiple of the `slice_size`. """ def __init__(self, slice_size: int): self.slice_size = slice_size def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) batch_size_attention, query_tokens, _ = query.shape hidden_states = torch.zeros( (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype ) for i in range((batch_size_attention - 1) // self.slice_size + 1): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class SlicedAttnAddedKVProcessor: r""" Processor for implementing sliced attention with extra learnable key and value matrices for the text encoder. Args: slice_size (`int`, *optional*): The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and `attention_head_dim` must be a multiple of the `slice_size`. """ def __init__(self, slice_size): self.slice_size = slice_size def __call__( self, attn: "Attention", hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, ) -> torch.Tensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj batch_size_attention, query_tokens, _ = query.shape hidden_states = torch.zeros( (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype ) for i in range((batch_size_attention - 1) // self.slice_size + 1): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class SpatialNorm(nn.Module): """ Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. Args: f_channels (`int`): The number of channels for input to group normalization layer, and output of the spatial norm layer. zq_channels (`int`): The number of channels for the quantized vector as described in the paper. """ def __init__( self, f_channels: int, zq_channels: int, ): super().__init__() self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=32, eps=1e-6, affine=True) self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) def forward(self, f: torch.Tensor, zq: torch.Tensor) -> torch.Tensor: f_size = f.shape[-2:] zq = F.interpolate(zq, size=f_size, mode="nearest") norm_f = self.norm_layer(f) new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) return new_f class IPAdapterAttnProcessor(nn.Module): r""" Attention processor for Multiple IP-Adapters. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): The context length of the image features. scale (`float` or List[`float`], defaults to 1.0): the weight scale of image prompt. """ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.") self.scale = scale self.to_k_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) self.to_v_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, scale: float = 1.0, ip_adapter_masks: Optional[torch.Tensor] = None, ): residual = hidden_states # separate ip_hidden_states from encoder_hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): encoder_hidden_states, ip_hidden_states = encoder_hidden_states else: deprecation_message = ( "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release." " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning." ) deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]], ) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) if ip_adapter_masks is not None: if not isinstance(ip_adapter_masks, List): # for backward compatibility, we accept `ip_adapter_mask` as a tensor of shape [num_ip_adapter, 1, height, width] ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1)) if not (len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states)): raise ValueError( f"Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match " f"length of self.scale array ({len(self.scale)}) and number of ip_hidden_states " f"({len(ip_hidden_states)})" ) else: for index, (mask, scale, ip_state) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)): if not isinstance(mask, torch.Tensor) or mask.ndim != 4: raise ValueError( "Each element of the ip_adapter_masks array should be a tensor with shape " "[1, num_images_for_ip_adapter, height, width]." " Please use `IPAdapterMaskProcessor` to preprocess your mask" ) if mask.shape[1] != ip_state.shape[1]: raise ValueError( f"Number of masks ({mask.shape[1]}) does not match " f"number of ip images ({ip_state.shape[1]}) at index {index}" ) if isinstance(scale, list) and not len(scale) == mask.shape[1]: raise ValueError( f"Number of masks ({mask.shape[1]}) does not match " f"number of scales ({len(scale)}) at index {index}" ) else: ip_adapter_masks = [None] * len(self.scale) # for ip-adapter for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip( ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks ): skip = False if isinstance(scale, list): if all(s == 0 for s in scale): skip = True elif scale == 0: skip = True if not skip: if mask is not None: if not isinstance(scale, list): scale = [scale] * mask.shape[1] current_num_images = mask.shape[1] for i in range(current_num_images): ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :]) ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :]) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) ip_attention_probs = attn.get_attention_scores(query, ip_key, None) _current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) _current_ip_hidden_states = attn.batch_to_head_dim(_current_ip_hidden_states) mask_downsample = IPAdapterMaskProcessor.downsample( mask[:, i, :, :], batch_size, _current_ip_hidden_states.shape[1], _current_ip_hidden_states.shape[2], ) mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device) hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample) else: ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) ip_attention_probs = attn.get_attention_scores(query, ip_key, None) current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states) hidden_states = hidden_states + scale * current_ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class IPAdapterAttnProcessor2_0(torch.nn.Module): r""" Attention processor for IP-Adapter for PyTorch 2.0. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): The context length of the image features. scale (`float` or `List[float]`, defaults to 1.0): the weight scale of image prompt. """ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.") self.scale = scale self.to_k_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) self.to_v_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, scale: float = 1.0, ip_adapter_masks: Optional[torch.Tensor] = None, ): residual = hidden_states # separate ip_hidden_states from encoder_hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): encoder_hidden_states, ip_hidden_states = encoder_hidden_states else: deprecation_message = ( "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release." " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning." ) deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]], ) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if ip_adapter_masks is not None: if not isinstance(ip_adapter_masks, List): # for backward compatibility, we accept `ip_adapter_mask` as a tensor of shape [num_ip_adapter, 1, height, width] ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1)) if not (len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states)): raise ValueError( f"Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match " f"length of self.scale array ({len(self.scale)}) and number of ip_hidden_states " f"({len(ip_hidden_states)})" ) else: for index, (mask, scale, ip_state) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)): if not isinstance(mask, torch.Tensor) or mask.ndim != 4: raise ValueError( "Each element of the ip_adapter_masks array should be a tensor with shape " "[1, num_images_for_ip_adapter, height, width]." " Please use `IPAdapterMaskProcessor` to preprocess your mask" ) if mask.shape[1] != ip_state.shape[1]: raise ValueError( f"Number of masks ({mask.shape[1]}) does not match " f"number of ip images ({ip_state.shape[1]}) at index {index}" ) if isinstance(scale, list) and not len(scale) == mask.shape[1]: raise ValueError( f"Number of masks ({mask.shape[1]}) does not match " f"number of scales ({len(scale)}) at index {index}" ) else: ip_adapter_masks = [None] * len(self.scale) # for ip-adapter for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip( ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks ): skip = False if isinstance(scale, list): if all(s == 0 for s in scale): skip = True elif scale == 0: skip = True if not skip: if mask is not None: if not isinstance(scale, list): scale = [scale] * mask.shape[1] current_num_images = mask.shape[1] for i in range(current_num_images): ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :]) ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :]) ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 _current_ip_hidden_states = F.scaled_dot_product_attention( query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False ) _current_ip_hidden_states = _current_ip_hidden_states.transpose(1, 2).reshape( batch_size, -1, attn.heads * head_dim ) _current_ip_hidden_states = _current_ip_hidden_states.to(query.dtype) mask_downsample = IPAdapterMaskProcessor.downsample( mask[:, i, :, :], batch_size, _current_ip_hidden_states.shape[1], _current_ip_hidden_states.shape[2], ) mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device) hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample) else: ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 current_ip_hidden_states = F.scaled_dot_product_attention( query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False ) current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape( batch_size, -1, attn.heads * head_dim ) current_ip_hidden_states = current_ip_hidden_states.to(query.dtype) hidden_states = hidden_states + scale * current_ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGIdentitySelfAttnProcessor2_0: r""" Processor for implementing PAG using scaled dot-product attention (enabled by default if you're using PyTorch 2.0). PAG reference: https://arxiv.org/abs/2403.17377 """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "PAGIdentitySelfAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) # chunk hidden_states_org, hidden_states_ptb = hidden_states.chunk(2) # original path batch_size, sequence_length, _ = hidden_states_org.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) key = attn.to_k(hidden_states_org) value = attn.to_v(hidden_states_org) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states_org = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) # linear proj hidden_states_org = attn.to_out[0](hidden_states_org) # dropout hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) # perturbed path (identity attention) batch_size, sequence_length, _ = hidden_states_ptb.shape if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) hidden_states_ptb = attn.to_v(hidden_states_ptb) hidden_states_ptb = hidden_states_ptb.to(query.dtype) # linear proj hidden_states_ptb = attn.to_out[0](hidden_states_ptb) # dropout hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) # cat hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGCFGIdentitySelfAttnProcessor2_0: r""" Processor for implementing PAG using scaled dot-product attention (enabled by default if you're using PyTorch 2.0). PAG reference: https://arxiv.org/abs/2403.17377 """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "PAGCFGIdentitySelfAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) # chunk hidden_states_uncond, hidden_states_org, hidden_states_ptb = hidden_states.chunk(3) hidden_states_org = torch.cat([hidden_states_uncond, hidden_states_org]) # original path batch_size, sequence_length, _ = hidden_states_org.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) key = attn.to_k(hidden_states_org) value = attn.to_v(hidden_states_org) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states_org = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) # linear proj hidden_states_org = attn.to_out[0](hidden_states_org) # dropout hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) # perturbed path (identity attention) batch_size, sequence_length, _ = hidden_states_ptb.shape if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) value = attn.to_v(hidden_states_ptb) hidden_states_ptb = value hidden_states_ptb = hidden_states_ptb.to(query.dtype) # linear proj hidden_states_ptb = attn.to_out[0](hidden_states_ptb) # dropout hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) # cat hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LoRAAttnProcessor: def __init__(self): pass class LoRAAttnProcessor2_0: def __init__(self): pass class LoRAXFormersAttnProcessor: def __init__(self): pass class LoRAAttnAddedKVProcessor: def __init__(self): pass class FluxSingleAttnProcessor2_0(FluxAttnProcessor2_0): r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). """ def __init__(self): deprecation_message = "`FluxSingleAttnProcessor2_0` is deprecated and will be removed in a future version. Please use `FluxAttnProcessor2_0` instead." deprecate("FluxSingleAttnProcessor2_0", "0.32.0", deprecation_message) super().__init__() ADDED_KV_ATTENTION_PROCESSORS = ( AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, ) CROSS_ATTENTION_PROCESSORS = ( AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, ) AttentionProcessor = Union[ AttnProcessor, AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0, PAGCFGIdentitySelfAttnProcessor2_0, PAGIdentitySelfAttnProcessor2_0, PAGCFGHunyuanAttnProcessor2_0, PAGHunyuanAttnProcessor2_0, ]
diffusers/src/diffusers/models/attention_processor.py/0
{ "file_path": "diffusers/src/diffusers/models/attention_processor.py", "repo_id": "diffusers", "token_count": 83791 }
129
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from ..configuration_utils import ConfigMixin, register_to_config from ..loaders import FromOriginalModelMixin from ..utils import BaseOutput, logging from .attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unets.unet_2d_blocks import UNetMidBlock2DCrossAttn from .unets.unet_2d_condition import UNet2DConditionModel from .unets.unet_motion_model import CrossAttnDownBlockMotion, DownBlockMotion logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class SparseControlNetOutput(BaseOutput): """ The output of [`SparseControlNetModel`]. Args: down_block_res_samples (`tuple[torch.Tensor]`): A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be used to condition the original UNet's downsampling activations. mid_down_block_re_sample (`torch.Tensor`): The activation of the middle block (the lowest sample resolution). Each tensor should be of shape `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. Output can be used to condition the original UNet's middle block activation. """ down_block_res_samples: Tuple[torch.Tensor] mid_block_res_sample: torch.Tensor class SparseControlNetConditioningEmbedding(nn.Module): def __init__( self, conditioning_embedding_channels: int, conditioning_channels: int = 3, block_out_channels: Tuple[int, ...] = (16, 32, 96, 256), ): super().__init__() self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) self.blocks = nn.ModuleList([]) for i in range(len(block_out_channels) - 1): channel_in = block_out_channels[i] channel_out = block_out_channels[i + 1] self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) self.conv_out = zero_module( nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) ) def forward(self, conditioning: torch.Tensor) -> torch.Tensor: embedding = self.conv_in(conditioning) embedding = F.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = F.silu(embedding) embedding = self.conv_out(embedding) return embedding class SparseControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): """ A SparseControlNet model as described in [SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion Models](https://arxiv.org/abs/2311.16933). Args: in_channels (`int`, defaults to 4): The number of channels in the input sample. conditioning_channels (`int`, defaults to 4): The number of input channels in the controlnet conditional embedding module. If `concat_condition_embedding` is True, the value provided here is incremented by 1. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, defaults to 2): The number of layers per block. downsample_padding (`int`, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, defaults to 1): The scale factor to use for the mid block. act_fn (`str`, defaults to "silu"): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If None, normalization and activation layers is skipped in post-processing. norm_eps (`float`, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. transformer_layers_per_mid_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer layers to use in each layer in the middle block. attention_head_dim (`int` or `Tuple[int]`, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of heads to use for multi-head attention. use_linear_projection (`bool`, defaults to `False`): upcast_attention (`bool`, defaults to `False`): resnet_time_scale_shift (`str`, defaults to `"default"`): Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. conditioning_embedding_out_channels (`Tuple[int]`, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. global_pool_conditions (`bool`, defaults to `False`): TODO(Patrick) - unused parameter controlnet_conditioning_channel_order (`str`, defaults to `rgb`): motion_max_seq_length (`int`, defaults to `32`): The maximum sequence length to use in the motion module. motion_num_attention_heads (`int` or `Tuple[int]`, defaults to `8`): The number of heads to use in each attention layer of the motion module. concat_conditioning_mask (`bool`, defaults to `True`): use_simplified_condition_embedding (`bool`, defaults to `True`): """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 4, conditioning_channels: int = 4, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "DownBlockMotion", ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 768, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]] = None, temporal_transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, controlnet_conditioning_channel_order: str = "rgb", motion_max_seq_length: int = 32, motion_num_attention_heads: int = 8, concat_conditioning_mask: bool = True, use_simplified_condition_embedding: bool = True, ): super().__init__() self.use_simplified_condition_embedding = use_simplified_condition_embedding # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = [temporal_transformer_layers_per_block] * len(down_block_types) # input conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) if concat_conditioning_mask: conditioning_channels = conditioning_channels + 1 self.concat_conditioning_mask = concat_conditioning_mask # control net conditioning embedding if use_simplified_condition_embedding: self.controlnet_cond_embedding = zero_module( nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) ) else: self.controlnet_cond_embedding = SparseControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels, ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, ) self.down_blocks = nn.ModuleList([]) self.controlnet_down_blocks = nn.ModuleList([]) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(motion_num_attention_heads, int): motion_num_attention_heads = (motion_num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == "CrossAttnDownBlockMotion": down_block = CrossAttnDownBlockMotion( in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, dropout=0, num_layers=layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, num_attention_heads=num_attention_heads[i], cross_attention_dim=cross_attention_dim[i], add_downsample=not is_final_block, dual_cross_attention=False, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], temporal_double_self_attention=False, ) elif down_block_type == "DownBlockMotion": down_block = DownBlockMotion( in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, dropout=0, num_layers=layers_per_block, resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, add_downsample=not is_final_block, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], temporal_double_self_attention=False, ) else: raise ValueError( "Invalid `block_type` encountered. Must be one of `CrossAttnDownBlockMotion` or `DownBlockMotion`" ) self.down_blocks.append(down_block) for _ in range(layers_per_block): controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) # mid mid_block_channels = block_out_channels[-1] controlnet_block = nn.Conv2d(mid_block_channels, mid_block_channels, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_mid_block = controlnet_block if transformer_layers_per_mid_block is None: transformer_layers_per_mid_block = ( transformer_layers_per_block[-1] if isinstance(transformer_layers_per_block[-1], int) else 1 ) self.mid_block = UNetMidBlock2DCrossAttn( in_channels=mid_block_channels, temb_channels=time_embed_dim, dropout=0, num_layers=1, transformer_layers_per_block=transformer_layers_per_mid_block, resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, num_attention_heads=num_attention_heads[-1], output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], dual_cross_attention=False, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type="default", ) @classmethod def from_unet( cls, unet: UNet2DConditionModel, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), load_weights_from_unet: bool = True, conditioning_channels: int = 3, ) -> "SparseControlNetModel": r""" Instantiate a [`SparseControlNetModel`] from [`UNet2DConditionModel`]. Parameters: unet (`UNet2DConditionModel`): The UNet model weights to copy to the [`SparseControlNetModel`]. All configuration options are also copied where applicable. """ transformer_layers_per_block = ( unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 ) down_block_types = unet.config.down_block_types for i in range(len(down_block_types)): if "CrossAttn" in down_block_types[i]: down_block_types[i] = "CrossAttnDownBlockMotion" elif "Down" in down_block_types[i]: down_block_types[i] = "DownBlockMotion" else: raise ValueError("Invalid `block_type` encountered. Must be a cross-attention or down block") controlnet = cls( in_channels=unet.config.in_channels, conditioning_channels=conditioning_channels, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, downsample_padding=unet.config.downsample_padding, mid_block_scale_factor=unet.config.mid_block_scale_factor, act_fn=unet.config.act_fn, norm_num_groups=unet.config.norm_num_groups, norm_eps=unet.config.norm_eps, cross_attention_dim=unet.config.cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, attention_head_dim=unet.config.attention_head_dim, num_attention_heads=unet.config.num_attention_heads, use_linear_projection=unet.config.use_linear_projection, upcast_attention=unet.config.upcast_attention, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, conditioning_embedding_out_channels=conditioning_embedding_out_channels, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, ) if load_weights_from_unet: controlnet.conv_in.load_state_dict(unet.conv_in.state_dict(), strict=False) controlnet.time_proj.load_state_dict(unet.time_proj.state_dict(), strict=False) controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict(), strict=False) controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict(), strict=False) controlnet.mid_block.load_state_dict(unet.mid_block.state_dict(), strict=False) return controlnet @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value: bool = False) -> None: if isinstance(module, (CrossAttnDownBlockMotion, DownBlockMotion, UNetMidBlock2DCrossAttn)): module.gradient_checkpointing = value def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, conditioning_scale: float = 1.0, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, conditioning_mask: Optional[torch.Tensor] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[SparseControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: """ The [`SparseControlNetModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. guess_mode (`bool`, defaults to `False`): In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ sample_batch_size, sample_channels, sample_num_frames, sample_height, sample_width = sample.shape sample = torch.zeros_like(sample) # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... elif channel_order == "bgr": controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) emb = emb.repeat_interleave(sample_num_frames, dim=0) # 2. pre-process batch_size, channels, num_frames, height, width = sample.shape sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) sample = self.conv_in(sample) batch_frames, channels, height, width = sample.shape sample = sample[:, None].reshape(sample_batch_size, sample_num_frames, channels, height, width) if self.concat_conditioning_mask: controlnet_cond = torch.cat([controlnet_cond, conditioning_mask], dim=1) batch_size, channels, num_frames, height, width = controlnet_cond.shape controlnet_cond = controlnet_cond.permute(0, 2, 1, 3, 4).reshape( batch_size * num_frames, channels, height, width ) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) batch_frames, channels, height, width = controlnet_cond.shape controlnet_cond = controlnet_cond[:, None].reshape(batch_size, num_frames, channels, height, width) sample = sample + controlnet_cond batch_size, num_frames, channels, height, width = sample.shape sample = sample.reshape(sample_batch_size * sample_num_frames, channels, height, width) # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = self.mid_block(sample, emb) # 5. Control net blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling if guess_mode and not self.config.global_pool_conditions: scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return SparseControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample ) # Copied from diffusers.models.controlnet.zero_module def zero_module(module: nn.Module) -> nn.Module: for p in module.parameters(): nn.init.zeros_(p) return module
diffusers/src/diffusers/models/controlnet_sparsectrl.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnet_sparsectrl.py", "repo_id": "diffusers", "token_count": 16729 }
130
# Copyright 2024 AuraFlow Authors, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Union import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version, logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention_processor import ( Attention, AttentionProcessor, AuraFlowAttnProcessor2_0, FusedAuraFlowAttnProcessor2_0, ) from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormZero, FP32LayerNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Taken from the original aura flow inference code. def find_multiple(n: int, k: int) -> int: if n % k == 0: return n return n + k - (n % k) # Aura Flow patch embed doesn't use convs for projections. # Additionally, it uses learned positional embeddings. class AuraFlowPatchEmbed(nn.Module): def __init__( self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, pos_embed_max_size=None, ): super().__init__() self.num_patches = (height // patch_size) * (width // patch_size) self.pos_embed_max_size = pos_embed_max_size self.proj = nn.Linear(patch_size * patch_size * in_channels, embed_dim) self.pos_embed = nn.Parameter(torch.randn(1, pos_embed_max_size, embed_dim) * 0.1) self.patch_size = patch_size self.height, self.width = height // patch_size, width // patch_size self.base_size = height // patch_size def pe_selection_index_based_on_dim(self, h, w): # select subset of positional embedding based on H, W, where H, W is size of latent # PE will be viewed as 2d-grid, and H/p x W/p of the PE will be selected # because original input are in flattened format, we have to flatten this 2d grid as well. h_p, w_p = h // self.patch_size, w // self.patch_size original_pe_indexes = torch.arange(self.pos_embed.shape[1]) h_max, w_max = int(self.pos_embed_max_size**0.5), int(self.pos_embed_max_size**0.5) original_pe_indexes = original_pe_indexes.view(h_max, w_max) starth = h_max // 2 - h_p // 2 endh = starth + h_p startw = w_max // 2 - w_p // 2 endw = startw + w_p original_pe_indexes = original_pe_indexes[starth:endh, startw:endw] return original_pe_indexes.flatten() def forward(self, latent): batch_size, num_channels, height, width = latent.size() latent = latent.view( batch_size, num_channels, height // self.patch_size, self.patch_size, width // self.patch_size, self.patch_size, ) latent = latent.permute(0, 2, 4, 1, 3, 5).flatten(-3).flatten(1, 2) latent = self.proj(latent) pe_index = self.pe_selection_index_based_on_dim(height, width) return latent + self.pos_embed[:, pe_index] # Taken from the original Aura flow inference code. # Our feedforward only has GELU but Aura uses SiLU. class AuraFlowFeedForward(nn.Module): def __init__(self, dim, hidden_dim=None) -> None: super().__init__() if hidden_dim is None: hidden_dim = 4 * dim final_hidden_dim = int(2 * hidden_dim / 3) final_hidden_dim = find_multiple(final_hidden_dim, 256) self.linear_1 = nn.Linear(dim, final_hidden_dim, bias=False) self.linear_2 = nn.Linear(dim, final_hidden_dim, bias=False) self.out_projection = nn.Linear(final_hidden_dim, dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = F.silu(self.linear_1(x)) * self.linear_2(x) x = self.out_projection(x) return x class AuraFlowPreFinalBlock(nn.Module): def __init__(self, embedding_dim: int, conditioning_embedding_dim: int): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=False) def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) scale, shift = torch.chunk(emb, 2, dim=1) x = x * (1 + scale)[:, None, :] + shift[:, None, :] return x @maybe_allow_in_graph class AuraFlowSingleTransformerBlock(nn.Module): """Similar to `AuraFlowJointTransformerBlock` with a single DiT instead of an MMDiT.""" def __init__(self, dim, num_attention_heads, attention_head_dim): super().__init__() self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm") processor = AuraFlowAttnProcessor2_0() self.attn = Attention( query_dim=dim, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm="fp32_layer_norm", out_dim=dim, bias=False, out_bias=False, processor=processor, ) self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff = AuraFlowFeedForward(dim, dim * 4) def forward(self, hidden_states: torch.FloatTensor, temb: torch.FloatTensor): residual = hidden_states # Norm + Projection. norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # Attention. attn_output = self.attn(hidden_states=norm_hidden_states) # Process attention outputs for the `hidden_states`. hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output) hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(hidden_states) hidden_states = gate_mlp.unsqueeze(1) * ff_output hidden_states = residual + hidden_states return hidden_states @maybe_allow_in_graph class AuraFlowJointTransformerBlock(nn.Module): r""" Transformer block for Aura Flow. Similar to SD3 MMDiT. Differences (non-exhaustive): * QK Norm in the attention blocks * No bias in the attention blocks * Most LayerNorms are in FP32 Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. is_last (`bool`): Boolean to determine if this is the last block in the model. """ def __init__(self, dim, num_attention_heads, attention_head_dim): super().__init__() self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm") self.norm1_context = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm") processor = AuraFlowAttnProcessor2_0() self.attn = Attention( query_dim=dim, cross_attention_dim=None, added_kv_proj_dim=dim, added_proj_bias=False, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm="fp32_layer_norm", out_dim=dim, bias=False, out_bias=False, processor=processor, context_pre_only=False, ) self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff = AuraFlowFeedForward(dim, dim * 4) self.norm2_context = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff_context = AuraFlowFeedForward(dim, dim * 4) def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor ): residual = hidden_states residual_context = encoder_hidden_states # Norm + Projection. norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) # Attention. attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states ) # Process attention outputs for the `hidden_states`. hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output) hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] hidden_states = gate_mlp.unsqueeze(1) * self.ff(hidden_states) hidden_states = residual + hidden_states # Process attention outputs for the `encoder_hidden_states`. encoder_hidden_states = self.norm2_context(residual_context + c_gate_msa.unsqueeze(1) * context_attn_output) encoder_hidden_states = encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] encoder_hidden_states = c_gate_mlp.unsqueeze(1) * self.ff_context(encoder_hidden_states) encoder_hidden_states = residual_context + encoder_hidden_states return encoder_hidden_states, hidden_states class AuraFlowTransformer2DModel(ModelMixin, ConfigMixin): r""" A 2D Transformer model as introduced in AuraFlow (https://blog.fal.ai/auraflow/). Parameters: sample_size (`int`): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. patch_size (`int`): Patch size to turn the input data into small patches. in_channels (`int`, *optional*, defaults to 16): The number of channels in the input. num_mmdit_layers (`int`, *optional*, defaults to 4): The number of layers of MMDiT Transformer blocks to use. num_single_dit_layers (`int`, *optional*, defaults to 4): The number of layers of Transformer blocks to use. These blocks use concatenated image and text representations. attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head. num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention. joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. caption_projection_dim (`int`): Number of dimensions to use when projecting the `encoder_hidden_states`. out_channels (`int`, defaults to 16): Number of output channels. pos_embed_max_size (`int`, defaults to 4096): Maximum positions to embed from the image latents. """ _no_split_modules = ["AuraFlowJointTransformerBlock", "AuraFlowSingleTransformerBlock", "AuraFlowPatchEmbed"] _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: int = 64, patch_size: int = 2, in_channels: int = 4, num_mmdit_layers: int = 4, num_single_dit_layers: int = 32, attention_head_dim: int = 256, num_attention_heads: int = 12, joint_attention_dim: int = 2048, caption_projection_dim: int = 3072, out_channels: int = 4, pos_embed_max_size: int = 1024, ): super().__init__() default_out_channels = in_channels self.out_channels = out_channels if out_channels is not None else default_out_channels self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.pos_embed = AuraFlowPatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, pos_embed_max_size=pos_embed_max_size, ) self.context_embedder = nn.Linear( self.config.joint_attention_dim, self.config.caption_projection_dim, bias=False ) self.time_step_embed = Timesteps(num_channels=256, downscale_freq_shift=0, scale=1000, flip_sin_to_cos=True) self.time_step_proj = TimestepEmbedding(in_channels=256, time_embed_dim=self.inner_dim) self.joint_transformer_blocks = nn.ModuleList( [ AuraFlowJointTransformerBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim, ) for i in range(self.config.num_mmdit_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ AuraFlowSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim, ) for _ in range(self.config.num_single_dit_layers) ] ) self.norm_out = AuraFlowPreFinalBlock(self.inner_dim, self.inner_dim) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=False) # https://arxiv.org/abs/2309.16588 # prevents artifacts in the attention maps self.register_tokens = nn.Parameter(torch.randn(1, 8, self.inner_dim) * 0.02) self.gradient_checkpointing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedAuraFlowAttnProcessor2_0 def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAuraFlowAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, timestep: torch.LongTensor = None, return_dict: bool = True, ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: height, width = hidden_states.shape[-2:] # Apply patch embedding, timestep embedding, and project the caption embeddings. hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too. temb = self.time_step_embed(timestep).to(dtype=next(self.parameters()).dtype) temb = self.time_step_proj(temb) encoder_hidden_states = self.context_embedder(encoder_hidden_states) encoder_hidden_states = torch.cat( [self.register_tokens.repeat(encoder_hidden_states.size(0), 1, 1), encoder_hidden_states], dim=1 ) # MMDiT blocks. for index_block, block in enumerate(self.joint_transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, **ckpt_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb ) # Single DiT blocks that combine the `hidden_states` (image) and `encoder_hidden_states` (text) if len(self.single_transformer_blocks) > 0: encoder_seq_len = encoder_hidden_states.size(1) combined_hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for index_block, block in enumerate(self.single_transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} combined_hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), combined_hidden_states, temb, **ckpt_kwargs, ) else: combined_hidden_states = block(hidden_states=combined_hidden_states, temb=temb) hidden_states = combined_hidden_states[:, encoder_seq_len:] hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) # unpatchify patch_size = self.config.patch_size out_channels = self.config.out_channels height = height // patch_size width = width // patch_size hidden_states = hidden_states.reshape( shape=(hidden_states.shape[0], height, width, patch_size, patch_size, out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(hidden_states.shape[0], out_channels, height * patch_size, width * patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py", "repo_id": "diffusers", "token_count": 9942 }
131
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UNet1DOutput(BaseOutput): """ The output of [`UNet1DModel`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, sample_size)`): The hidden states output from the last layer of the model. """ sample: torch.Tensor class UNet1DModel(ModelMixin, ConfigMixin): r""" A 1D UNet model that takes a noisy sample and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int`, *optional*): Default length of sample. Should be adaptable at runtime. in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 2): Number of channels in the output. extra_in_channels (`int`, *optional*, defaults to 0): Number of additional channels to be added to the input of the first down block. Useful for cases where the input data has more channels than what the model was initially designed for. time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use. freq_shift (`float`, *optional*, defaults to 0.0): Frequency shift for Fourier time embedding. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip sin to cos for Fourier time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D")`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip")`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(32, 32, 64)`): Tuple of block output channels. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock1D"`): Block type for middle of UNet. out_block_type (`str`, *optional*, defaults to `None`): Optional output processing block of UNet. act_fn (`str`, *optional*, defaults to `None`): Optional activation function in UNet blocks. norm_num_groups (`int`, *optional*, defaults to 8): The number of groups for normalization. layers_per_block (`int`, *optional*, defaults to 1): The number of layers per block. downsample_each_block (`int`, *optional*, defaults to `False`): Experimental feature for using a UNet without upsampling. """ @register_to_config def __init__( self, sample_size: int = 65536, sample_rate: Optional[int] = None, in_channels: int = 2, out_channels: int = 2, extra_in_channels: int = 0, time_embedding_type: str = "fourier", flip_sin_to_cos: bool = True, use_timestep_embedding: bool = False, freq_shift: float = 0.0, down_block_types: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), mid_block_type: Tuple[str] = "UNetMidBlock1D", out_block_type: str = None, block_out_channels: Tuple[int] = (32, 32, 64), act_fn: str = None, norm_num_groups: int = 8, layers_per_block: int = 1, downsample_each_block: bool = False, ): super().__init__() self.sample_size = sample_size # time if time_embedding_type == "fourier": self.time_proj = GaussianFourierProjection( embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = 2 * block_out_channels[0] elif time_embedding_type == "positional": self.time_proj = Timesteps( block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=freq_shift ) timestep_input_dim = block_out_channels[0] if use_timestep_embedding: time_embed_dim = block_out_channels[0] * 4 self.time_mlp = TimestepEmbedding( in_channels=timestep_input_dim, time_embed_dim=time_embed_dim, act_fn=act_fn, out_dim=block_out_channels[0], ) self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) self.out_block = None # down output_channel = in_channels for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] if i == 0: input_channel += extra_in_channels is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, ) self.down_blocks.append(down_block) # mid self.mid_block = get_mid_block( mid_block_type, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=layers_per_block, add_downsample=downsample_each_block, ) # up reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] if out_block_type is None: final_upsample_channels = out_channels else: final_upsample_channels = block_out_channels[0] for i, up_block_type in enumerate(up_block_types): prev_output_channel = output_channel output_channel = ( reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels ) is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block( up_block_type, num_layers=layers_per_block, in_channels=prev_output_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_upsample=not is_final_block, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.out_block = get_out_block( out_block_type=out_block_type, num_groups_out=num_groups_out, embed_dim=block_out_channels[0], out_channels=out_channels, act_fn=act_fn, fc_dim=block_out_channels[-1] // 4, ) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], return_dict: bool = True, ) -> Union[UNet1DOutput, Tuple]: r""" The [`UNet1DModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch_size, num_channels, sample_size)`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_1d.UNet1DOutput`] instead of a plain tuple. Returns: [`~models.unets.unet_1d.UNet1DOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_1d.UNet1DOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # 1. time timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timestep_embed = self.time_proj(timesteps) if self.config.use_timestep_embedding: timestep_embed = self.time_mlp(timestep_embed) else: timestep_embed = timestep_embed[..., None] timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) timestep_embed = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down down_block_res_samples = () for downsample_block in self.down_blocks: sample, res_samples = downsample_block(hidden_states=sample, temb=timestep_embed) down_block_res_samples += res_samples # 3. mid if self.mid_block: sample = self.mid_block(sample, timestep_embed) # 4. up for i, upsample_block in enumerate(self.up_blocks): res_samples = down_block_res_samples[-1:] down_block_res_samples = down_block_res_samples[:-1] sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed) # 5. post-process if self.out_block: sample = self.out_block(sample, timestep_embed) if not return_dict: return (sample,) return UNet1DOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_1d.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_1d.py", "repo_id": "diffusers", "token_count": 4862 }
132
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # JAX implementation of VQGAN from taming-transformers https://github.com/CompVis/taming-transformers import math from functools import partial from typing import Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .modeling_flax_utils import FlaxModelMixin @flax.struct.dataclass class FlaxDecoderOutput(BaseOutput): """ Output of decoding method. Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): The decoded output sample from the last layer of the model. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The `dtype` of the parameters. """ sample: jnp.ndarray @flax.struct.dataclass class FlaxAutoencoderKLOutput(BaseOutput): """ Output of AutoencoderKL encoding method. Args: latent_dist (`FlaxDiagonalGaussianDistribution`): Encoded outputs of `Encoder` represented as the mean and logvar of `FlaxDiagonalGaussianDistribution`. `FlaxDiagonalGaussianDistribution` allows for sampling latents from the distribution. """ latent_dist: "FlaxDiagonalGaussianDistribution" class FlaxUpsample2D(nn.Module): """ Flax implementation of 2D Upsample layer Args: in_channels (`int`): Input channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, hidden_states): batch, height, width, channels = hidden_states.shape hidden_states = jax.image.resize( hidden_states, shape=(batch, height * 2, width * 2, channels), method="nearest", ) hidden_states = self.conv(hidden_states) return hidden_states class FlaxDownsample2D(nn.Module): """ Flax implementation of 2D Downsample layer Args: in_channels (`int`): Input channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), strides=(2, 2), padding="VALID", dtype=self.dtype, ) def __call__(self, hidden_states): pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim hidden_states = jnp.pad(hidden_states, pad_width=pad) hidden_states = self.conv(hidden_states) return hidden_states class FlaxResnetBlock2D(nn.Module): """ Flax implementation of 2D Resnet Block. Args: in_channels (`int`): Input channels out_channels (`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for group norm. use_nin_shortcut (:obj:`bool`, *optional*, defaults to `None`): Whether to use `nin_shortcut`. This activates a new layer inside ResNet block dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int = None dropout: float = 0.0 groups: int = 32 use_nin_shortcut: bool = None dtype: jnp.dtype = jnp.float32 def setup(self): out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) self.conv1 = nn.Conv( out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) self.norm2 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) self.dropout_layer = nn.Dropout(self.dropout) self.conv2 = nn.Conv( out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut self.conv_shortcut = None if use_nin_shortcut: self.conv_shortcut = nn.Conv( out_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) def __call__(self, hidden_states, deterministic=True): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: residual = self.conv_shortcut(residual) return hidden_states + residual class FlaxAttentionBlock(nn.Module): r""" Flax Convolutional based multi-head attention block for diffusion-based VAE. Parameters: channels (:obj:`int`): Input channels num_head_channels (:obj:`int`, *optional*, defaults to `None`): Number of attention heads num_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for group norm dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ channels: int num_head_channels: int = None num_groups: int = 32 dtype: jnp.dtype = jnp.float32 def setup(self): self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1 dense = partial(nn.Dense, self.channels, dtype=self.dtype) self.group_norm = nn.GroupNorm(num_groups=self.num_groups, epsilon=1e-6) self.query, self.key, self.value = dense(), dense(), dense() self.proj_attn = dense() def transpose_for_scores(self, projection): new_projection_shape = projection.shape[:-1] + (self.num_heads, -1) # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) new_projection = projection.reshape(new_projection_shape) # (B, T, H, D) -> (B, H, T, D) new_projection = jnp.transpose(new_projection, (0, 2, 1, 3)) return new_projection def __call__(self, hidden_states): residual = hidden_states batch, height, width, channels = hidden_states.shape hidden_states = self.group_norm(hidden_states) hidden_states = hidden_states.reshape((batch, height * width, channels)) query = self.query(hidden_states) key = self.key(hidden_states) value = self.value(hidden_states) # transpose query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) # compute attentions scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads)) attn_weights = jnp.einsum("...qc,...kc->...qk", query * scale, key * scale) attn_weights = nn.softmax(attn_weights, axis=-1) # attend to values hidden_states = jnp.einsum("...kc,...qk->...qc", value, attn_weights) hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3)) new_hidden_states_shape = hidden_states.shape[:-2] + (self.channels,) hidden_states = hidden_states.reshape(new_hidden_states_shape) hidden_states = self.proj_attn(hidden_states) hidden_states = hidden_states.reshape((batch, height, width, channels)) hidden_states = hidden_states + residual return hidden_states class FlaxDownEncoderBlock2D(nn.Module): r""" Flax Resnet blocks-based Encoder block for diffusion-based VAE. Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet block group norm add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsample layer dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_downsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) return hidden_states class FlaxUpDecoderBlock2D(nn.Module): r""" Flax Resnet blocks-based Decoder block for diffusion-based VAE. Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet block group norm add_upsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add upsample layer dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_upsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUNetMidBlock2D(nn.Module): r""" Flax Unet Mid-Block module. Parameters: in_channels (:obj:`int`): Input channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet and Attention block group norm num_attention_heads (:obj:`int`, *optional*, defaults to `1`): Number of attention heads for each attention block dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 num_attention_heads: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32) # there is always at least one resnet resnets = [ FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype, ) ] attentions = [] for _ in range(self.num_layers): attn_block = FlaxAttentionBlock( channels=self.in_channels, num_head_channels=self.num_attention_heads, num_groups=resnet_groups, dtype=self.dtype, ) attentions.append(attn_block) res_block = FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets self.attentions = attentions def __call__(self, hidden_states, deterministic=True): hidden_states = self.resnets[0](hidden_states, deterministic=deterministic) for attn, resnet in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states, deterministic=deterministic) return hidden_states class FlaxEncoder(nn.Module): r""" Flax Implementation of VAE Encoder. This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: in_channels (:obj:`int`, *optional*, defaults to 3): Input channels out_channels (:obj:`int`, *optional*, defaults to 3): Output channels down_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): DownEncoder block type block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple containing the number of output channels for each block layers_per_block (:obj:`int`, *optional*, defaults to `2`): Number of Resnet layer for each block norm_num_groups (:obj:`int`, *optional*, defaults to `32`): norm num group act_fn (:obj:`str`, *optional*, defaults to `silu`): Activation function double_z (:obj:`bool`, *optional*, defaults to `False`): Whether to double the last output channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ("DownEncoderBlock2D",) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = "silu" double_z: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels # in self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # downsampling down_blocks = [] output_channel = block_out_channels[0] for i, _ in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = FlaxDownEncoderBlock2D( in_channels=input_channel, out_channels=output_channel, num_layers=self.layers_per_block, resnet_groups=self.norm_num_groups, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(down_block) self.down_blocks = down_blocks # middle self.mid_block = FlaxUNetMidBlock2D( in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype, ) # end conv_out_channels = 2 * self.out_channels if self.double_z else self.out_channels self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) self.conv_out = nn.Conv( conv_out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, sample, deterministic: bool = True): # in sample = self.conv_in(sample) # downsampling for block in self.down_blocks: sample = block(sample, deterministic=deterministic) # middle sample = self.mid_block(sample, deterministic=deterministic) # end sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample class FlaxDecoder(nn.Module): r""" Flax Implementation of VAE Decoder. This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: in_channels (:obj:`int`, *optional*, defaults to 3): Input channels out_channels (:obj:`int`, *optional*, defaults to 3): Output channels up_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): UpDecoder block type block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple containing the number of output channels for each block layers_per_block (:obj:`int`, *optional*, defaults to `2`): Number of Resnet layer for each block norm_num_groups (:obj:`int`, *optional*, defaults to `32`): norm num group act_fn (:obj:`str`, *optional*, defaults to `silu`): Activation function double_z (:obj:`bool`, *optional*, defaults to `False`): Whether to double the last output channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): parameters `dtype` """ in_channels: int = 3 out_channels: int = 3 up_block_types: Tuple[str] = ("UpDecoderBlock2D",) block_out_channels: int = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = "silu" dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels # z to block_in self.conv_in = nn.Conv( block_out_channels[-1], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # middle self.mid_block = FlaxUNetMidBlock2D( in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype, ) # upsampling reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] up_blocks = [] for i, _ in enumerate(self.up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = FlaxUpDecoderBlock2D( in_channels=prev_output_channel, out_channels=output_channel, num_layers=self.layers_per_block + 1, resnet_groups=self.norm_num_groups, add_upsample=not is_final_block, dtype=self.dtype, ) up_blocks.append(up_block) prev_output_channel = output_channel self.up_blocks = up_blocks # end self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) self.conv_out = nn.Conv( self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, sample, deterministic: bool = True): # z to block_in sample = self.conv_in(sample) # middle sample = self.mid_block(sample, deterministic=deterministic) # upsampling for block in self.up_blocks: sample = block(sample, deterministic=deterministic) sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample class FlaxDiagonalGaussianDistribution(object): def __init__(self, parameters, deterministic=False): # Last axis to account for channels-last self.mean, self.logvar = jnp.split(parameters, 2, axis=-1) self.logvar = jnp.clip(self.logvar, -30.0, 20.0) self.deterministic = deterministic self.std = jnp.exp(0.5 * self.logvar) self.var = jnp.exp(self.logvar) if self.deterministic: self.var = self.std = jnp.zeros_like(self.mean) def sample(self, key): return self.mean + self.std * jax.random.normal(key, self.mean.shape) def kl(self, other=None): if self.deterministic: return jnp.array([0.0]) if other is None: return 0.5 * jnp.sum(self.mean**2 + self.var - 1.0 - self.logvar, axis=[1, 2, 3]) return 0.5 * jnp.sum( jnp.square(self.mean - other.mean) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, axis=[1, 2, 3], ) def nll(self, sample, axis=[1, 2, 3]): if self.deterministic: return jnp.array([0.0]) logtwopi = jnp.log(2.0 * jnp.pi) return 0.5 * jnp.sum(logtwopi + self.logvar + jnp.square(sample - self.mean) / self.var, axis=axis) def mode(self): return self.mean @flax_register_to_config class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): r""" Flax implementation of a VAE model with KL loss for decoding latent representations. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matter related to its general usage and behavior. Inherent JAX features such as the following are supported: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): Tuple of upsample block types. block_out_channels (`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple of block output channels. layers_per_block (`int`, *optional*, defaults to `2`): Number of ResNet layer for each block. act_fn (`str`, *optional*, defaults to `silu`): The activation function to use. latent_channels (`int`, *optional*, defaults to `4`): Number of channels in the latent space. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization. sample_size (`int`, *optional*, defaults to 32): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The `dtype` of the parameters. """ in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ("DownEncoderBlock2D",) up_block_types: Tuple[str] = ("UpDecoderBlock2D",) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 1 act_fn: str = "silu" latent_channels: int = 4 norm_num_groups: int = 32 sample_size: int = 32 scaling_factor: float = 0.18215 dtype: jnp.dtype = jnp.float32 def setup(self): self.encoder = FlaxEncoder( in_channels=self.config.in_channels, out_channels=self.config.latent_channels, down_block_types=self.config.down_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, act_fn=self.config.act_fn, norm_num_groups=self.config.norm_num_groups, double_z=True, dtype=self.dtype, ) self.decoder = FlaxDecoder( in_channels=self.config.latent_channels, out_channels=self.config.out_channels, up_block_types=self.config.up_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, norm_num_groups=self.config.norm_num_groups, act_fn=self.config.act_fn, dtype=self.dtype, ) self.quant_conv = nn.Conv( 2 * self.config.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) self.post_quant_conv = nn.Conv( self.config.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) params_rng, dropout_rng, gaussian_rng = jax.random.split(rng, 3) rngs = {"params": params_rng, "dropout": dropout_rng, "gaussian": gaussian_rng} return self.init(rngs, sample)["params"] def encode(self, sample, deterministic: bool = True, return_dict: bool = True): sample = jnp.transpose(sample, (0, 2, 3, 1)) hidden_states = self.encoder(sample, deterministic=deterministic) moments = self.quant_conv(hidden_states) posterior = FlaxDiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return FlaxAutoencoderKLOutput(latent_dist=posterior) def decode(self, latents, deterministic: bool = True, return_dict: bool = True): if latents.shape[-1] != self.config.latent_channels: latents = jnp.transpose(latents, (0, 2, 3, 1)) hidden_states = self.post_quant_conv(latents) hidden_states = self.decoder(hidden_states, deterministic=deterministic) hidden_states = jnp.transpose(hidden_states, (0, 3, 1, 2)) if not return_dict: return (hidden_states,) return FlaxDecoderOutput(sample=hidden_states) def __call__(self, sample, sample_posterior=False, deterministic: bool = True, return_dict: bool = True): posterior = self.encode(sample, deterministic=deterministic, return_dict=return_dict) if sample_posterior: rng = self.make_rng("gaussian") hidden_states = posterior.latent_dist.sample(rng) else: hidden_states = posterior.latent_dist.mode() sample = self.decode(hidden_states, return_dict=return_dict).sample if not return_dict: return (sample,) return FlaxDecoderOutput(sample=sample)
diffusers/src/diffusers/models/vae_flax.py/0
{ "file_path": "diffusers/src/diffusers/models/vae_flax.py", "repo_id": "diffusers", "token_count": 14479 }
133
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) _dummy_objects.update({"AudioLDMPipeline": AudioLDMPipeline}) else: _import_structure["pipeline_audioldm"] = ["AudioLDMPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) else: from .pipeline_audioldm import AudioLDMPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/audioldm/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/audioldm/__init__.py", "repo_id": "diffusers", "token_count": 581 }
134
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import torch from ...models import UNet2DModel from ...schedulers import CMStochasticIterativeScheduler from ...utils import ( logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import ConsistencyModelPipeline >>> device = "cuda" >>> # Load the cd_imagenet64_l2 checkpoint. >>> model_id_or_path = "openai/diffusers-cd_imagenet64_l2" >>> pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) >>> pipe.to(device) >>> # Onestep Sampling >>> image = pipe(num_inference_steps=1).images[0] >>> image.save("cd_imagenet64_l2_onestep_sample.png") >>> # Onestep sampling, class-conditional image generation >>> # ImageNet-64 class label 145 corresponds to king penguins >>> image = pipe(num_inference_steps=1, class_labels=145).images[0] >>> image.save("cd_imagenet64_l2_onestep_sample_penguin.png") >>> # Multistep sampling, class-conditional image generation >>> # Timesteps can be explicitly specified; the particular timesteps below are from the original GitHub repo: >>> # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L77 >>> image = pipe(num_inference_steps=None, timesteps=[22, 0], class_labels=145).images[0] >>> image.save("cd_imagenet64_l2_multistep_sample_penguin.png") ``` """ class ConsistencyModelPipeline(DiffusionPipeline): r""" Pipeline for unconditional or class-conditional image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only compatible with [`CMStochasticIterativeScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None: super().__init__() self.register_modules( unet=unet, scheduler=scheduler, ) self.safety_checker = None def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Follows diffusers.VaeImageProcessor.postprocess def postprocess_image(self, sample: torch.Tensor, output_type: str = "pil"): if output_type not in ["pt", "np", "pil"]: raise ValueError( f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" ) # Equivalent to diffusers.VaeImageProcessor.denormalize sample = (sample / 2 + 0.5).clamp(0, 1) if output_type == "pt": return sample # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == "np": return sample # Output_type must be 'pil' sample = self.numpy_to_pil(sample) return sample def prepare_class_labels(self, batch_size, device, class_labels=None): if self.unet.config.num_class_embeds is not None: if isinstance(class_labels, list): class_labels = torch.tensor(class_labels, dtype=torch.int) elif isinstance(class_labels, int): assert batch_size == 1, "Batch size must be 1 if classes is an int" class_labels = torch.tensor([class_labels], dtype=torch.int) elif class_labels is None: # Randomly generate batch_size class labels # TODO: should use generator here? int analogue of randn_tensor is not exposed in ...utils class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,)) class_labels = class_labels.to(device) else: class_labels = None return class_labels def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps): if num_inference_steps is None and timesteps is None: raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") if num_inference_steps is not None and timesteps is not None: logger.warning( f"Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied;" " `timesteps` will be used over `num_inference_steps`." ) if latents is not None: expected_shape = (batch_size, 3, img_size, img_size) if latents.shape != expected_shape: raise ValueError(f"The shape of latents is {latents.shape} but is expected to be {expected_shape}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, batch_size: int = 1, class_labels: Optional[Union[torch.Tensor, List[int], int]] = None, num_inference_steps: int = 1, timesteps: List[int] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. class_labels (`torch.Tensor` or `List[int]` or `int`, *optional*): Optional class labels for conditioning class-conditional consistency models. Not used if the model is not class-conditional. num_inference_steps (`int`, *optional*, defaults to 1): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Prepare call parameters img_size = self.unet.config.sample_size device = self._execution_device # 1. Check inputs self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps) # 2. Prepare image latents # Sample image latents x_0 ~ N(0, sigma_0^2 * I) sample = self.prepare_latents( batch_size=batch_size, num_channels=self.unet.config.in_channels, height=img_size, width=img_size, dtype=self.unet.dtype, device=device, generator=generator, latents=latents, ) # 3. Handle class_labels for class-conditional models class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels) # 4. Prepare timesteps if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps # 5. Denoising loop # Multistep sampling: implements Algorithm 1 in the paper with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): scaled_sample = self.scheduler.scale_model_input(sample, t) model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0] sample = self.scheduler.step(model_output, t, sample, generator=generator)[0] # call the callback, if provided progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, sample) # 6. Post-process image sample image = self.postprocess_image(sample, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py", "repo_id": "diffusers", "token_count": 5187 }
135
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_controlnet_xs"] = ["StableDiffusionControlNetXSPipeline"] _import_structure["pipeline_controlnet_xs_sd_xl"] = ["StableDiffusionXLControlNetXSPipeline"] try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_flax_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: pass # _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_controlnet_xs import StableDiffusionControlNetXSPipeline from .pipeline_controlnet_xs_sd_xl import StableDiffusionXLControlNetXSPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 else: pass # from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/controlnet_xs/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet_xs/__init__.py", "repo_id": "diffusers", "token_count": 894 }
136
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class IFPipelineOutput(BaseOutput): """ Args: Output class for Stable Diffusion pipelines. images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. nsfw_detected (`List[bool]`) List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content or a watermark. `None` if safety checking could not be performed. watermark_detected (`List[bool]`) List of flags denoting whether the corresponding generated image likely has a watermark. `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_detected: Optional[List[bool]] watermark_detected: Optional[List[bool]]
diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py", "repo_id": "diffusers", "token_count": 409 }
137
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_pndm": ["PNDMPipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_pndm import PNDMPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/pndm/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/pndm/__init__.py", "repo_id": "diffusers", "token_count": 182 }
138
# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ....image_processor import VaeImageProcessor from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers from ....utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DDPMParallelScheduler >>> from diffusers import StableDiffusionParadigmsPipeline >>> scheduler = DDPMParallelScheduler.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="scheduler") >>> pipe = StableDiffusionParadigmsPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", scheduler=scheduler, torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> ngpu, batch_per_device = torch.cuda.device_count(), 5 >>> pipe.wrapped_unet = torch.nn.DataParallel(pipe.unet, device_ids=[d for d in range(ngpu)]) >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt, parallel=ngpu * batch_per_device, num_inference_steps=1000).images[0] ``` """ class StableDiffusionParadigmsPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin, ): r""" Pipeline for text-to-image generation using a parallelized version of Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # attribute to wrap the unet with torch.nn.DataParallel when running multiple denoising steps on multiple GPUs self.wrapped_unet = self.unet # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def _cumsum(self, input, dim, debug=False): if debug: # cumsum_cuda_kernel does not have a deterministic implementation # so perform cumsum on cpu for debugging purposes return torch.cumsum(input.cpu().float(), dim=dim).to(input.device) else: return torch.cumsum(input, dim=dim) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, parallel: int = 10, tolerance: float = 0.1, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, debug: bool = False, clip_skip: int = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. parallel (`int`, *optional*, defaults to 10): The batch size to use when doing parallel sampling. More parallelism may lead to faster inference but requires higher memory usage and can also require more total FLOPs. tolerance (`float`, *optional*, defaults to 0.1): The error tolerance for determining when to slide the batch window forward for parallel sampling. Lower tolerance usually leads to less or no degradation. Higher tolerance is faster but can risk degradation of sample quality. The tolerance is specified as a ratio of the scheduler's noise magnitude. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). debug (`bool`, *optional*, defaults to `False`): Whether or not to run in debug mode. In debug mode, `torch.cumsum` is evaluated using the CPU. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) extra_step_kwargs.pop("generator", None) # # 7. Denoising loop scheduler = self.scheduler parallel = min(parallel, len(scheduler.timesteps)) begin_idx = 0 end_idx = parallel latents_time_evolution_buffer = torch.stack([latents] * (len(scheduler.timesteps) + 1)) # We must make sure the noise of stochastic schedulers such as DDPM is sampled only once per timestep. # Sampling inside the parallel denoising loop will mess this up, so we pre-sample the noise vectors outside the denoising loop. noise_array = torch.zeros_like(latents_time_evolution_buffer) for j in range(len(scheduler.timesteps)): base_noise = randn_tensor( shape=latents.shape, generator=generator, device=latents.device, dtype=prompt_embeds.dtype ) noise = (self.scheduler._get_variance(scheduler.timesteps[j]) ** 0.5) * base_noise noise_array[j] = noise.clone() # We specify the error tolerance as a ratio of the scheduler's noise magnitude. We similarly compute the error tolerance # outside of the denoising loop to avoid recomputing it at every step. # We will be dividing the norm of the noise, so we store its inverse here to avoid a division at every step. inverse_variance_norm = 1.0 / torch.tensor( [scheduler._get_variance(scheduler.timesteps[j]) for j in range(len(scheduler.timesteps))] + [0] ).to(noise_array.device) latent_dim = noise_array[0, 0].numel() inverse_variance_norm = inverse_variance_norm[:, None] / latent_dim scaled_tolerance = tolerance**2 with self.progress_bar(total=num_inference_steps) as progress_bar: steps = 0 while begin_idx < len(scheduler.timesteps): # these have shape (parallel_dim, 2*batch_size, ...) # parallel_len is at most parallel, but could be less if we are at the end of the timesteps # we are processing batch window of timesteps spanning [begin_idx, end_idx) parallel_len = end_idx - begin_idx block_prompt_embeds = torch.stack([prompt_embeds] * parallel_len) block_latents = latents_time_evolution_buffer[begin_idx:end_idx] block_t = scheduler.timesteps[begin_idx:end_idx, None].repeat(1, batch_size * num_images_per_prompt) t_vec = block_t if do_classifier_free_guidance: t_vec = t_vec.repeat(1, 2) # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([block_latents] * 2, dim=1) if do_classifier_free_guidance else block_latents ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t_vec) # if parallel_len is small, no need to use multiple GPUs net = self.wrapped_unet if parallel_len > 3 else self.unet # predict the noise residual, shape is now [parallel_len * 2 * batch_size * num_images_per_prompt, ...] model_output = net( latent_model_input.flatten(0, 1), t_vec.flatten(0, 1), encoder_hidden_states=block_prompt_embeds.flatten(0, 1), cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] per_latent_shape = model_output.shape[1:] if do_classifier_free_guidance: model_output = model_output.reshape( parallel_len, 2, batch_size * num_images_per_prompt, *per_latent_shape ) noise_pred_uncond, noise_pred_text = model_output[:, 0], model_output[:, 1] model_output = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) model_output = model_output.reshape( parallel_len * batch_size * num_images_per_prompt, *per_latent_shape ) block_latents_denoise = scheduler.batch_step_no_noise( model_output=model_output, timesteps=block_t.flatten(0, 1), sample=block_latents.flatten(0, 1), **extra_step_kwargs, ).reshape(block_latents.shape) # back to shape (parallel_dim, batch_size, ...) # now we want to add the pre-sampled noise # parallel sampling algorithm requires computing the cumulative drift from the beginning # of the window, so we need to compute cumulative sum of the deltas and the pre-sampled noises. delta = block_latents_denoise - block_latents cumulative_delta = self._cumsum(delta, dim=0, debug=debug) cumulative_noise = self._cumsum(noise_array[begin_idx:end_idx], dim=0, debug=debug) # if we are using an ODE-like scheduler (like DDIM), we don't want to add noise if scheduler._is_ode_scheduler: cumulative_noise = 0 block_latents_new = ( latents_time_evolution_buffer[begin_idx][None,] + cumulative_delta + cumulative_noise ) cur_error = torch.linalg.norm( (block_latents_new - latents_time_evolution_buffer[begin_idx + 1 : end_idx + 1]).reshape( parallel_len, batch_size * num_images_per_prompt, -1 ), dim=-1, ).pow(2) error_ratio = cur_error * inverse_variance_norm[begin_idx + 1 : end_idx + 1] # find the first index of the vector error_ratio that is greater than error tolerance # we can shift the window for the next iteration up to this index error_ratio = torch.nn.functional.pad( error_ratio, (0, 0, 0, 1), value=1e9 ) # handle the case when everything is below ratio, by padding the end of parallel_len dimension any_error_at_time = torch.max(error_ratio > scaled_tolerance, dim=1).values.int() ind = torch.argmax(any_error_at_time).item() # compute the new begin and end idxs for the window new_begin_idx = begin_idx + min(1 + ind, parallel) new_end_idx = min(new_begin_idx + parallel, len(scheduler.timesteps)) # store the computed latents for the current window in the global buffer latents_time_evolution_buffer[begin_idx + 1 : end_idx + 1] = block_latents_new # initialize the new sliding window latents with the end of the current window, # should be better than random initialization latents_time_evolution_buffer[end_idx : new_end_idx + 1] = latents_time_evolution_buffer[end_idx][ None, ] steps += 1 progress_bar.update(new_begin_idx - begin_idx) if callback is not None and steps % callback_steps == 0: callback(begin_idx, block_t[begin_idx], latents_time_evolution_buffer[begin_idx]) begin_idx = new_begin_idx end_idx = new_end_idx latents = latents_time_evolution_buffer[-1] if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py", "repo_id": "diffusers", "token_count": 17953 }
139
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Union import torch from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import deprecate, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` """ def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 new_width = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class KandinskyV22Pipeline(DiffusionPipeline): """ Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. """ model_cpu_offload_seq = "unet->movq" _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds"] def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image_embeds: Union[torch.Tensor, List[torch.Tensor]], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], height: int = 512, width: int = 512, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for negative text prompt, will be used to condition the image generation. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) device = self._execution_device self._guidance_scale = guidance_scale if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) batch_size = image_embeds.shape[0] * num_images_per_prompt if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if self.do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( dtype=self.unet.dtype, device=device ) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels height, width = downscale_height_and_width(height, width, self.movq_scale_factor) # create initial latent latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), image_embeds.dtype, device, generator, latents, self.scheduler, ) self._num_timesteps = len(timesteps) for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents added_cond_kwargs = {"image_embeds": image_embeds} noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) _, variance_pred_text = variance_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, )[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) image_embeds = callback_outputs.pop("image_embeds", image_embeds) negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") if not output_type == "latent": # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py", "repo_id": "diffusers", "token_count": 6259 }
140
# Copyright 2024 ChatGLM3-6B Model Team, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Optional, Tuple import torch import torch.nn.functional as F from torch import nn from torch.nn import LayerNorm from torch.nn.utils import skip_init from transformers import PretrainedConfig, PreTrainedModel from transformers.modeling_outputs import BaseModelOutputWithPast from ...utils import logging logger = logging.get_logger(__name__) class ChatGLMConfig(PretrainedConfig): model_type = "chatglm" def __init__( self, num_layers=28, padded_vocab_size=65024, hidden_size=4096, ffn_hidden_size=13696, kv_channels=128, num_attention_heads=32, seq_length=2048, hidden_dropout=0.0, classifier_dropout=None, attention_dropout=0.0, layernorm_epsilon=1e-5, rmsnorm=True, apply_residual_connection_post_layernorm=False, post_layer_norm=True, add_bias_linear=False, add_qkv_bias=False, bias_dropout_fusion=True, multi_query_attention=False, multi_query_group_num=1, apply_query_key_layer_scaling=True, attention_softmax_in_fp32=True, fp32_residual_connection=False, quantization_bit=0, pre_seq_len=None, prefix_projection=False, **kwargs, ): self.num_layers = num_layers self.vocab_size = padded_vocab_size self.padded_vocab_size = padded_vocab_size self.hidden_size = hidden_size self.ffn_hidden_size = ffn_hidden_size self.kv_channels = kv_channels self.num_attention_heads = num_attention_heads self.seq_length = seq_length self.hidden_dropout = hidden_dropout self.classifier_dropout = classifier_dropout self.attention_dropout = attention_dropout self.layernorm_epsilon = layernorm_epsilon self.rmsnorm = rmsnorm self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm self.post_layer_norm = post_layer_norm self.add_bias_linear = add_bias_linear self.add_qkv_bias = add_qkv_bias self.bias_dropout_fusion = bias_dropout_fusion self.multi_query_attention = multi_query_attention self.multi_query_group_num = multi_query_group_num self.apply_query_key_layer_scaling = apply_query_key_layer_scaling self.attention_softmax_in_fp32 = attention_softmax_in_fp32 self.fp32_residual_connection = fp32_residual_connection self.quantization_bit = quantization_bit self.pre_seq_len = pre_seq_len self.prefix_projection = prefix_projection super().__init__(**kwargs) class RMSNorm(torch.nn.Module): def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs): super().__init__() self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype)) self.eps = eps def forward(self, hidden_states: torch.Tensor): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.eps) return (self.weight * hidden_states).to(input_dtype) def _config_to_kwargs(args): common_kwargs = { "dtype": args.torch_dtype, } return common_kwargs class CoreAttention(torch.nn.Module): def __init__(self, config: ChatGLMConfig, layer_number): super(CoreAttention, self).__init__() self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 if self.apply_query_key_layer_scaling: self.attention_softmax_in_fp32 = True self.layer_number = max(1, layer_number) projection_size = config.kv_channels * config.num_attention_heads # Per attention head and per partition values. self.hidden_size_per_partition = projection_size self.hidden_size_per_attention_head = projection_size // config.num_attention_heads self.num_attention_heads_per_partition = config.num_attention_heads coeff = None self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) if self.apply_query_key_layer_scaling: coeff = self.layer_number self.norm_factor *= coeff self.coeff = coeff self.attention_dropout = torch.nn.Dropout(config.attention_dropout) def forward(self, query_layer, key_layer, value_layer, attention_mask): pytorch_major_version = int(torch.__version__.split(".")[0]) if pytorch_major_version >= 2: query_layer, key_layer, value_layer = [ k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer] ] if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: context_layer = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, is_causal=True ) else: if attention_mask is not None: attention_mask = ~attention_mask context_layer = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, attention_mask ) context_layer = context_layer.permute(2, 0, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) context_layer = context_layer.reshape(*new_context_layer_shape) else: # Raw attention scores # [b, np, sq, sk] output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) # [sq, b, np, hn] -> [sq, b * np, hn] query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) # [sk, b, np, hn] -> [sk, b * np, hn] key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) # preallocting input tensor: [b * np, sq, sk] matmul_input_buffer = torch.empty( output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype, device=query_layer.device, ) # Raw attention scores. [b * np, sq, sk] matmul_result = torch.baddbmm( matmul_input_buffer, query_layer.transpose(0, 1), # [b * np, sq, hn] key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] beta=0.0, alpha=(1.0 / self.norm_factor), ) # change view to [b, np, sq, sk] attention_scores = matmul_result.view(*output_size) # =========================== # Attention probs and dropout # =========================== # attention scores and attention mask [b, np, sq, sk] if self.attention_softmax_in_fp32: attention_scores = attention_scores.float() if self.coeff is not None: attention_scores = attention_scores * self.coeff if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]: attention_mask = torch.ones( output_size[0], 1, output_size[2], output_size[3], device=attention_scores.device, dtype=torch.bool ) attention_mask.tril_() attention_mask = ~attention_mask if attention_mask is not None: attention_scores = attention_scores.masked_fill(attention_mask, float("-inf")) attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = attention_probs.type_as(value_layer) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attention_dropout(attention_probs) # ========================= # Context layer. [sq, b, hp] # ========================= # value_layer -> context layer. # [sk, b, np, hn] --> [b, np, sq, hn] # context layer shape: [b, np, sq, hn] output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) # change view [sk, b * np, hn] value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) # change view [b * np, sq, sk] attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) # matmul: [b * np, sq, hn] context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) # change view [b, np, sq, hn] context_layer = context_layer.view(*output_size) # [b, np, sq, hn] --> [sq, b, np, hn] context_layer = context_layer.permute(2, 0, 1, 3).contiguous() # [sq, b, np, hn] --> [sq, b, hp] new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def split_tensor_along_last_dim( tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False, ) -> List[torch.Tensor]: """Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. Returns: A list of Tensors """ # Get the size and dimension. last_dim = tensor.dim() - 1 last_dim_size = tensor.size()[last_dim] // num_partitions # Split. tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) # Note: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list @torch.jit.script def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: # x: [sq, b, np, hn] sq, _b, np, _hn = x.size(0), x.size(1), x.size(2), x.size(3) rot_dim = rope_cache.shape[-2] * 2 x, x_pass = x[..., :rot_dim], x[..., rot_dim:] # truncate to support variable sizes rope_cache = rope_cache[:sq] xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2) rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2) x_out2 = torch.stack( [ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1], xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1], ], -1, ) x_out2 = x_out2.flatten(3) return torch.cat((x_out2, x_pass), dim=-1) class SelfAttention(torch.nn.Module): """Parallel self-attention layer abstract class. Self-attention layer takes input with size [s, b, h] and returns output of the same size. """ def __init__(self, config: ChatGLMConfig, layer_number, device=None): super(SelfAttention, self).__init__() self.layer_number = max(1, layer_number) self.projection_size = config.kv_channels * config.num_attention_heads # Per attention head and per partition values. self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads self.num_attention_heads_per_partition = config.num_attention_heads self.multi_query_attention = config.multi_query_attention self.qkv_hidden_size = 3 * self.projection_size if self.multi_query_attention: self.num_multi_query_groups_per_partition = config.multi_query_group_num self.qkv_hidden_size = ( self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num ) self.query_key_value = nn.Linear( config.hidden_size, self.qkv_hidden_size, bias=config.add_bias_linear or config.add_qkv_bias, device=device, **_config_to_kwargs(config), ) self.core_attention = CoreAttention(config, self.layer_number) # Output. self.dense = nn.Linear( self.projection_size, config.hidden_size, bias=config.add_bias_linear, device=device, **_config_to_kwargs(config), ) def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None): if self.multi_query_attention: num_attention_heads = self.num_multi_query_groups_per_partition else: num_attention_heads = self.num_attention_heads_per_partition return torch.empty( inference_max_sequence_len, batch_size, num_attention_heads, self.hidden_size_per_attention_head, dtype=dtype, device=device, ) def forward(self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True): # hidden_states: [sq, b, h] # ================================================= # Pre-allocate memory for key-values for inference. # ================================================= # ===================== # Query, Key, and Value # ===================== # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] mixed_x_layer = self.query_key_value(hidden_states) if self.multi_query_attention: (query_layer, key_layer, value_layer) = mixed_x_layer.split( [ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, ], dim=-1, ) query_layer = query_layer.view( query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) ) key_layer = key_layer.view( key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) ) value_layer = value_layer.view( value_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) ) else: new_tensor_shape = mixed_x_layer.size()[:-1] + ( self.num_attention_heads_per_partition, 3 * self.hidden_size_per_attention_head, ) mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) # apply relative positional encoding (rotary embedding) if rotary_pos_emb is not None: query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) # adjust key and value for inference if kv_cache is not None: cache_k, cache_v = kv_cache key_layer = torch.cat((cache_k, key_layer), dim=0) value_layer = torch.cat((cache_v, value_layer), dim=0) if use_cache: kv_cache = (key_layer, value_layer) else: kv_cache = None if self.multi_query_attention: key_layer = key_layer.unsqueeze(-2) key_layer = key_layer.expand( -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 ) key_layer = key_layer.contiguous().view( key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) ) value_layer = value_layer.unsqueeze(-2) value_layer = value_layer.expand( -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 ) value_layer = value_layer.contiguous().view( value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) ) # ================================== # core attention computation # ================================== context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) # ================= # Output. [sq, b, h] # ================= output = self.dense(context_layer) return output, kv_cache class MLP(torch.nn.Module): """MLP. MLP will take the input with h hidden state, project it to 4*h hidden dimension, perform nonlinear transformation, and project the state back into h hidden dimension. """ def __init__(self, config: ChatGLMConfig, device=None): super(MLP, self).__init__() self.add_bias = config.add_bias_linear # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf self.dense_h_to_4h = nn.Linear( config.hidden_size, config.ffn_hidden_size * 2, bias=self.add_bias, device=device, **_config_to_kwargs(config), ) def swiglu(x): x = torch.chunk(x, 2, dim=-1) return F.silu(x[0]) * x[1] self.activation_func = swiglu # Project back to h. self.dense_4h_to_h = nn.Linear( config.ffn_hidden_size, config.hidden_size, bias=self.add_bias, device=device, **_config_to_kwargs(config) ) def forward(self, hidden_states): # [s, b, 4hp] intermediate_parallel = self.dense_h_to_4h(hidden_states) intermediate_parallel = self.activation_func(intermediate_parallel) # [s, b, h] output = self.dense_4h_to_h(intermediate_parallel) return output class GLMBlock(torch.nn.Module): """A single transformer layer. Transformer layer takes input with size [s, b, h] and returns an output of the same size. """ def __init__(self, config: ChatGLMConfig, layer_number, device=None): super(GLMBlock, self).__init__() self.layer_number = layer_number self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm self.fp32_residual_connection = config.fp32_residual_connection LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm # Layernorm on the input data. self.input_layernorm = LayerNormFunc( config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype ) # Self attention. self.self_attention = SelfAttention(config, layer_number, device=device) self.hidden_dropout = config.hidden_dropout # Layernorm on the attention output self.post_attention_layernorm = LayerNormFunc( config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype ) # MLP self.mlp = MLP(config, device=device) def forward( self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True, ): # hidden_states: [s, b, h] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Self attention. attention_output, kv_cache = self.self_attention( layernorm_output, attention_mask, rotary_pos_emb, kv_cache=kv_cache, use_cache=use_cache ) # Residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) layernorm_input = residual + layernorm_input # Layer norm post the self attention. layernorm_output = self.post_attention_layernorm(layernorm_input) # MLP. mlp_output = self.mlp(layernorm_output) # Second residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) output = residual + output return output, kv_cache class GLMTransformer(torch.nn.Module): """Transformer class.""" def __init__(self, config: ChatGLMConfig, device=None): super(GLMTransformer, self).__init__() self.fp32_residual_connection = config.fp32_residual_connection self.post_layer_norm = config.post_layer_norm # Number of layers. self.num_layers = config.num_layers # Transformer layers. def build_layer(layer_number): return GLMBlock(config, layer_number, device=device) self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) if self.post_layer_norm: LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm # Final layer norm before output. self.final_layernorm = LayerNormFunc( config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype ) self.gradient_checkpointing = False def _get_layer(self, layer_number): return self.layers[layer_number] def forward( self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None, use_cache: Optional[bool] = True, output_hidden_states: Optional[bool] = False, ): if not kv_caches: kv_caches = [None for _ in range(self.num_layers)] presents = () if use_cache else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_self_attentions = None all_hidden_states = () if output_hidden_states else None for index in range(self.num_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer = self._get_layer(index) if self.gradient_checkpointing and self.training: layer_ret = torch.utils.checkpoint.checkpoint( layer, hidden_states, attention_mask, rotary_pos_emb, kv_caches[index], use_cache ) else: layer_ret = layer( hidden_states, attention_mask, rotary_pos_emb, kv_cache=kv_caches[index], use_cache=use_cache ) hidden_states, kv_cache = layer_ret if use_cache: presents = presents + (kv_cache,) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # Final layer norm. if self.post_layer_norm: hidden_states = self.final_layernorm(hidden_states) return hidden_states, presents, all_hidden_states, all_self_attentions class ChatGLMPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ is_parallelizable = False supports_gradient_checkpointing = True config_class = ChatGLMConfig base_model_prefix = "transformer" _no_split_modules = ["GLMBlock"] def _init_weights(self, module: nn.Module): """Initialize the weights.""" return def get_masks(self, input_ids, past_key_values, padding_mask=None): batch_size, seq_length = input_ids.shape full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device) full_attention_mask.tril_() past_length = 0 if past_key_values: past_length = past_key_values[0][0].shape[0] if past_length: full_attention_mask = torch.cat( (torch.ones(batch_size, seq_length, past_length, device=input_ids.device), full_attention_mask), dim=-1 ) if padding_mask is not None: full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1) if not past_length and padding_mask is not None: full_attention_mask -= padding_mask.unsqueeze(-1) - 1 full_attention_mask = (full_attention_mask < 0.5).bool() full_attention_mask.unsqueeze_(1) return full_attention_mask def get_position_ids(self, input_ids, device): batch_size, seq_length = input_ids.shape position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) return position_ids def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, GLMTransformer): module.gradient_checkpointing = value def default_init(cls, *args, **kwargs): return cls(*args, **kwargs) class Embedding(torch.nn.Module): """Language model embeddings.""" def __init__(self, config: ChatGLMConfig, device=None): super(Embedding, self).__init__() self.hidden_size = config.hidden_size # Word embeddings (parallel). self.word_embeddings = nn.Embedding( config.padded_vocab_size, self.hidden_size, dtype=config.torch_dtype, device=device ) self.fp32_residual_connection = config.fp32_residual_connection def forward(self, input_ids): # Embeddings. words_embeddings = self.word_embeddings(input_ids) embeddings = words_embeddings # Data format change to avoid explicit tranposes : [b s h] --> [s b h]. embeddings = embeddings.transpose(0, 1).contiguous() # If the input flag for fp32 residual connection is set, convert for float. if self.fp32_residual_connection: embeddings = embeddings.float() return embeddings class RotaryEmbedding(nn.Module): def __init__(self, dim, original_impl=False, device=None, dtype=None): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)) self.register_buffer("inv_freq", inv_freq) self.dim = dim self.original_impl = original_impl def forward_impl(self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000): """Enhanced Transformer with Rotary Position Embedding. Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ transformers/rope/__init__.py. MIT License: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. """ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem)) # Create position indexes `[0, 1, ..., seq_len - 1]` seq_idx = torch.arange(seq_len, dtype=torch.float, device=device) # Calculate the product of position index and $\theta_i$ idx_theta = torch.outer(seq_idx, theta).float() cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) # this is to mimic the behaviour of complex32, else we will get different results if dtype in (torch.float16, torch.bfloat16, torch.int8): cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half() return cache def forward(self, max_seq_len, offset=0): return self.forward_impl(max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device) class PrefixEncoder(torch.nn.Module): """ The torch.nn model to encode the prefix Input shape: (batch-size, prefix-length) Output shape: (batch-size, prefix-length, 2*layers*hidden) """ def __init__(self, config: ChatGLMConfig): super().__init__() self.prefix_projection = config.prefix_projection if self.prefix_projection: # Use a two-layer MLP to encode the prefix kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2 self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size) self.trans = torch.nn.Sequential( torch.nn.Linear(kv_size, config.hidden_size), torch.nn.Tanh(), torch.nn.Linear(config.hidden_size, kv_size), ) else: self.embedding = torch.nn.Embedding( config.pre_seq_len, config.num_layers * config.kv_channels * config.multi_query_group_num * 2 ) def forward(self, prefix: torch.Tensor): if self.prefix_projection: prefix_tokens = self.embedding(prefix) past_key_values = self.trans(prefix_tokens) else: past_key_values = self.embedding(prefix) return past_key_values class ChatGLMModel(ChatGLMPreTrainedModel): def __init__(self, config: ChatGLMConfig, device=None, empty_init=True): super().__init__(config) if empty_init: init_method = skip_init else: init_method = default_init init_kwargs = {} if device is not None: init_kwargs["device"] = device self.embedding = init_method(Embedding, config, **init_kwargs) self.num_layers = config.num_layers self.multi_query_group_num = config.multi_query_group_num self.kv_channels = config.kv_channels # Rotary positional embeddings self.seq_length = config.seq_length rotary_dim = ( config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels ) self.rotary_pos_emb = RotaryEmbedding( rotary_dim // 2, original_impl=config.original_rope, device=device, dtype=config.torch_dtype ) self.encoder = init_method(GLMTransformer, config, **init_kwargs) self.output_layer = init_method( nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False, dtype=config.torch_dtype, **init_kwargs, ) self.pre_seq_len = config.pre_seq_len self.prefix_projection = config.prefix_projection if self.pre_seq_len is not None: for param in self.parameters(): param.requires_grad = False self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = PrefixEncoder(config) self.dropout = torch.nn.Dropout(0.1) def get_input_embeddings(self): return self.embedding.word_embeddings def get_prompt(self, batch_size, device, dtype=torch.half): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) past_key_values = past_key_values.view( batch_size, self.pre_seq_len, self.num_layers * 2, self.multi_query_group_num, self.kv_channels ) # seq_len, b, nh, hidden_size past_key_values = self.dropout(past_key_values) past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) return past_key_values def forward( self, input_ids, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.BoolTensor] = None, full_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, seq_length = input_ids.shape if inputs_embeds is None: inputs_embeds = self.embedding(input_ids) if self.pre_seq_len is not None: if past_key_values is None: past_key_values = self.get_prompt( batch_size=batch_size, device=input_ids.device, dtype=inputs_embeds.dtype ) if attention_mask is not None: attention_mask = torch.cat( [attention_mask.new_ones((batch_size, self.pre_seq_len)), attention_mask], dim=-1 ) if full_attention_mask is None: if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask) # Rotary positional embeddings rotary_pos_emb = self.rotary_pos_emb(self.seq_length) if position_ids is not None: rotary_pos_emb = rotary_pos_emb[position_ids] else: rotary_pos_emb = rotary_pos_emb[None, :seq_length] rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() # Run encoder. hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb, kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states, ) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
diffusers/src/diffusers/pipelines/kolors/text_encoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kolors/text_encoder.py", "repo_id": "diffusers", "token_count": 16353 }
141
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["marigold_image_processing"] = ["MarigoldImageProcessor"] _import_structure["pipeline_marigold_depth"] = ["MarigoldDepthOutput", "MarigoldDepthPipeline"] _import_structure["pipeline_marigold_normals"] = ["MarigoldNormalsOutput", "MarigoldNormalsPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .marigold_image_processing import MarigoldImageProcessor from .pipeline_marigold_depth import MarigoldDepthOutput, MarigoldDepthPipeline from .pipeline_marigold_normals import MarigoldNormalsOutput, MarigoldNormalsPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/marigold/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/marigold/__init__.py", "repo_id": "diffusers", "token_count": 660 }
142