text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
//! Activation Functions //! use candle::{Result, Tensor}; #[derive(Debug, Clone, Copy, PartialEq, serde::Deserialize, serde::Serialize, Default)] #[serde(rename_all = "lowercase")] pub enum Activation { #[default] #[serde(alias = "gelu")] Gelu, #[serde(alias = "gelu_new")] NewGelu, Relu, Relu2, Relu6, Silu, Sigmoid, HardSigmoid, Swiglu, Swish, HardSwish, Elu(f64), LeakyRelu(f64), #[serde(alias = "gelu_pytorch_tanh")] GeluPytorchTanh, } impl super::Module for Activation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::Gelu => xs.gelu_erf(), // https://github.com/huggingface/transformers/blob/12f043eaeaabfef6f6efea411d98e6f6d3c094b7/src/transformers/activations.py#L49-L78 Self::NewGelu => xs.gelu(), Self::Relu => xs.relu(), Self::Relu2 => xs.relu()?.sqr(), Self::Relu6 => xs.clamp(0f32, 6f32), Self::Silu => xs.silu(), Self::Sigmoid => crate::ops::sigmoid(xs), Self::HardSigmoid => crate::ops::hard_sigmoid(xs), Self::Swiglu => crate::ops::swiglu(xs), Self::Swish => xs * crate::ops::sigmoid(xs)?, Self::HardSwish => xs * crate::ops::hard_sigmoid(xs)?, &Self::Elu(alpha) => xs.elu(alpha), &Self::LeakyRelu(negative_slope) => crate::ops::leaky_relu(xs, negative_slope), Self::GeluPytorchTanh => xs.gelu(), } } } #[derive(Clone, Debug)] pub struct PReLU { weight: Tensor, is_scalar: bool, } impl PReLU { pub fn new(weight: Tensor, is_scalar: bool) -> Self { Self { weight, is_scalar } } pub fn weight(&self) -> &Tensor { &self.weight } pub fn is_scalar(&self) -> bool { self.is_scalar } } impl candle::Module for PReLU { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let weight = if self.is_scalar { self.weight.reshape(())? } else if xs.shape() == self.weight.shape() { self.weight.clone() } else if xs.rank() >= 2 { let num_channels = xs.dim(1)?; let num_weights = self.weight.elem_count(); if num_weights != num_channels { candle::bail!("error in prelu: unexpected number of channels for the input, got {num_channels}, weight dim is {num_weights}") } let mut s = vec![1; xs.rank()]; s[1] = num_weights; self.weight.reshape(s)? } else { self.weight.clone() }; let zeros = xs.zeros_like()?; xs.maximum(&zeros)? + xs.minimum(&zeros)?.broadcast_mul(&weight)? } } /// Create or initialize a new PReLU layer. /// /// This uses some default name for weights, namely `"weight"`. /// # Arguments /// /// * `num_channels` - The number of channels. Use `None` to have as single trainable value and /// `Some` for a 1D vector with the appropriate number of channels. When applying the `forward` /// function, the input tensor shape `s` should either be one dimension with this number of /// channels or if `s.len() >= 2` it should have `s[1]` equal to this number. pub fn prelu(num_channels: Option<usize>, vs: crate::VarBuilder) -> Result<PReLU> { let init_ws = crate::init::Init::Const(0.25); // When using a scalar weight, the PyTorch encoding is to use a 1d vector of length 1. let ws = vs.get_with_hints((num_channels.unwrap_or(1),), "weight", init_ws)?; Ok(PReLU::new(ws, num_channels.is_none())) }
candle/candle-nn/src/activation.rs/0
{ "file_path": "candle/candle-nn/src/activation.rs", "repo_id": "candle", "token_count": 1702 }
51
//! Rotary Embeddings //! use candle::{CpuStorage, Layout, Result, Shape, Tensor, D}; use rayon::prelude::*; /// Interleaved variant of rotary embeddings. /// The x0 and x1 value are interleaved on the n_embd (= head_dim) dimension. /// The resulting y0 and y1 are also interleaved with: /// y0 = x0*cos - x1*sin /// y1 = x0*sin + x1*cos #[derive(Debug, Clone)] struct RotaryEmbI; impl candle::CustomOp3 for RotaryEmbI { fn name(&self) -> &'static str { "rotary-emb-int" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, h, t, d) = l_src.shape().dims4()?; let unbatched_rope = l_cos.dims().len() == 3 && l_sin.dims().len() == 3; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * d) .zip(dst.par_chunks_mut(t * d)) .enumerate() .for_each(|(bh_i, (src, dst))| { for i_over_2 in 0..t * d / 2 { let i = 2 * i_over_2; let rope_i = if unbatched_rope { let b_i = bh_i / h; i_over_2 + b_i * t * d / 2 } else { i_over_2 }; dst[i] = src[i] * cos[rope_i] - src[i + 1] * sin[rope_i]; dst[i + 1] = src[i] * sin[rope_i] + src[i + 1] * cos[rope_i]; } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, h, t, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchConfig, PushKernelArg, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, h, t, d) = l_src.shape().dims4()?; let stride_b = if l_cos.dims().len() == 3 && l_sin.dims().len() == 3 { (h * t * d) as u32 } else { 0u32 }; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope_i"), &kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el)? }; let mut builder = func.builder(); builder.arg(&src); builder.arg(&cos); builder.arg(&sin); builder.arg(&dst); candle::builder_arg!(builder, (b * h) as u32, (t * d) as u32, stride_b); // SAFETY: ffi. unsafe { builder.launch(cfg) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope-i {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_i_f32", candle::DType::F16 => "rope_i_f16", candle::DType::BF16 => "rope_i_bf16", dtype => candle::bail!("rope-i is not implemented for {dtype:?}"), }; let (b, h, t, d) = l_src.shape().dims4()?; let stride_b = if l_cos.dims().len() == 3 && l_sin.dims().len() == 3 { h * t * d } else { 0usize }; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-i")?; candle_metal_kernels::call_rope_i( device.metal_device(), &command_buffer, kernels, name, b * h, t * d, stride_b, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } fn rope_check_cs(cs: &Tensor, b_sz: usize) -> Result<(usize, usize)> { match *cs.dims() { [t, d] => Ok((t, d)), [b, t, d] => { if b != b_sz { candle::bail!("inconsistent batch size in rope {b_sz} {cs:?}",) } Ok((t, d)) } _ => candle::bail!("cos/sin has to be 2D or 3D in rope {b_sz} {cs:?}"), } } pub fn rope_i(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (b_sz, _n_head, seq_len, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = rope_check_cs(cos, b_sz)?; let (sin_seq_len, sin_n_embd) = rope_check_cs(sin, b_sz)?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmbI) } pub fn rope_i_slow(x: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (b_sz, n_head, seq_len, n_embd) = x.dims4()?; let cos = cos .narrow(0, 0, seq_len)? .reshape((seq_len, n_embd / 2, 1))?; let sin = sin .narrow(0, 0, seq_len)? .reshape((seq_len, n_embd / 2, 1))?; let cos = cos.broadcast_as((b_sz, 1, seq_len, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, 1, seq_len, n_embd / 2, 1))?; let x = x.reshape((b_sz, n_head, seq_len, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let y0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let y1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[y0, y1], D::Minus1)?; let rope = rope.flatten_from(D::Minus2)?; Ok(rope) } /// Contiguous variant of rope embeddings. #[derive(Debug, Clone)] struct RotaryEmb; impl candle::CustomOp3 for RotaryEmb { fn name(&self) -> &'static str { "rotary-emb" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, h, t, d) = l_src.shape().dims4()?; let unbatched_rope = l_cos.dims().len() == 3 && l_sin.dims().len() == 3; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * d) .zip(dst.par_chunks_mut(t * d)) .enumerate() .for_each(|(bh_i, (src, dst))| { for i_t in 0..t { for i_d in 0..d / 2 { let i1 = i_t * d + i_d; let i2 = i1 + d / 2; let i_cs = i_t * (d / 2) + i_d; let i_cs = if unbatched_rope { let b_i = bh_i / h; i_cs + b_i * t * d / 2 } else { i_cs }; dst[i1] = src[i1] * cos[i_cs] - src[i2] * sin[i_cs]; dst[i2] = src[i1] * sin[i_cs] + src[i2] * cos[i_cs]; } } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, h, t, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchConfig, PushKernelArg, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, h, t, d) = l_src.shape().dims4()?; let stride_b = if l_cos.dims().len() == 3 && l_sin.dims().len() == 3 { (h * t * d) as u32 } else { 0u32 }; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope"), &kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el)? }; let mut builder = func.builder(); builder.arg(&src); builder.arg(&cos); builder.arg(&sin); builder.arg(&dst); candle::builder_arg!(builder, (b * h) as u32, (t * d) as u32, d as u32, stride_b); // SAFETY: ffi. unsafe { builder.launch(cfg) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_f32", candle::DType::F16 => "rope_f16", candle::DType::BF16 => "rope_bf16", dtype => candle::bail!("rope is not implemented for {dtype:?}"), }; let (b, h, t, d) = l_src.shape().dims4()?; let stride_b = if l_cos.dims().len() == 3 && l_sin.dims().len() == 3 { h * t * d } else { 0usize }; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-i")?; candle_metal_kernels::call_rope( device.metal_device(), &command_buffer, kernels, name, b * h, t * d, d, stride_b, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (b_sz, _n_head, seq_len, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = rope_check_cs(cos, b_sz)?; let (sin_seq_len, sin_n_embd) = rope_check_cs(sin, b_sz)?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmb) } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } pub fn rope_slow(x: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, _h, seq_len, _n_embd) = x.dims4()?; let cos = Tensor::cat(&[cos, cos], D::Minus1)?; let sin = Tensor::cat(&[sin, sin], D::Minus1)?; let cos = cos.narrow(0, 0, seq_len)?; let sin = sin.narrow(0, 0, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; let sin = sin.unsqueeze(0)?.unsqueeze(0)?; x.broadcast_mul(&cos)? + rotate_half(x)?.broadcast_mul(&sin)? } /// T (seqlen)/H (num-heads)/D (head-dim) contiguous variant of rope embeddings. #[derive(Debug, Clone)] struct RotaryEmbThd; impl candle::CustomOp3 for RotaryEmbThd { fn name(&self) -> &'static str { "rotary-emb" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, t, h, d) = l_src.shape().dims4()?; let unbatched_rope = l_cos.dims().len() == 3 && l_sin.dims().len() == 3; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * h * d) .zip(dst.par_chunks_mut(t * h * d)) .enumerate() .for_each(|(b_i, (src, dst))| { for i_t in 0..t { for i_d in 0..d / 2 { let i_cs = i_t * (d / 2) + i_d; let i_cs = if unbatched_rope { i_cs + b_i * t * d / 2 } else { i_cs }; for i_h in 0..h { let i1 = i_t * h * d + i_h * d + i_d; let i2 = i1 + d / 2; dst[i1] = src[i1] * cos[i_cs] - src[i2] * sin[i_cs]; dst[i2] = src[i1] * sin[i_cs] + src[i2] * cos[i_cs]; } } } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, t, h, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchConfig, PushKernelArg, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, t, h, d) = l_src.shape().dims4()?; let stride_b = if l_cos.dims().len() == 3 && l_sin.dims().len() == 3 { (h * t * d) as u32 } else { 0u32 }; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope_thd"), &kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el)? }; let mut builder = func.builder(); builder.arg(&src); builder.arg(&cos); builder.arg(&sin); builder.arg(&dst); candle::builder_arg!(builder, b as u32, t as u32, h as u32, d as u32, stride_b); // SAFETY: ffi. unsafe { builder.launch(cfg) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_thd_f32", candle::DType::F16 => "rope_thd_f16", candle::DType::BF16 => "rope_thd_bf16", dtype => candle::bail!("rope_thd is not implemented for {dtype:?}"), }; let (b, t, h, d) = l_src.shape().dims4()?; let stride_b = if l_cos.dims().len() == 3 && l_sin.dims().len() == 3 { h * t * d } else { 0usize }; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-thd")?; candle_metal_kernels::call_rope_thd( device.metal_device(), &command_buffer, kernels, name, b, t, h, d, stride_b, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope_thd(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (b_sz, seq_len, _n_head, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = rope_check_cs(cos, b_sz)?; let (sin_seq_len, sin_n_embd) = rope_check_cs(sin, b_sz)?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmbThd) }
candle/candle-nn/src/rotary_emb.rs/0
{ "file_path": "candle/candle-nn/src/rotary_emb.rs", "repo_id": "candle", "token_count": 17379 }
52
# candle-onnx This crate adds ONNX support to candle ## FAQ #### Missing protoc installation when compiling candle-onnx The candle-onnx dependency prost-build no longer comes bundled with prost binaries. This could cause the following error when attempting to compile candle-onnx: ``` error: failed to run custom build command for `candle-onnx` Caused by: // (...) Could not find `protoc` installation and this build crate cannot proceed without this knowledge. ``` To fix this issue install protoc on your system and make it available in your system `PATH`. See the [protoc documentation](https://grpc.io/docs/protoc-installation/) for more information.
candle/candle-onnx/README.md/0
{ "file_path": "candle/candle-onnx/README.md", "repo_id": "candle", "token_count": 180 }
53
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape from candle import Tensor, DType, QTensor @staticmethod def avg_pool2d(tensor: Tensor, ksize: int, stride: int = 1) -> Tensor: """ Applies the 2d avg-pool function to a given tensor.# """ pass @staticmethod def gelu(tensor: Tensor) -> Tensor: """ Applies the Gaussian Error Linear Unit (GELU) function to a given tensor. """ pass @staticmethod def max_pool2d(tensor: Tensor, ksize: int, stride: int = 1) -> Tensor: """ Applies the 2d max-pool function to a given tensor.# """ pass @staticmethod def relu(tensor: Tensor) -> Tensor: """ Applies the Rectified Linear Unit (ReLU) function to a given tensor. """ pass @staticmethod def silu(tensor: Tensor) -> Tensor: """ Applies the Sigmoid Linear Unit (SiLU) function to a given tensor. """ pass @staticmethod def softmax(tensor: Tensor, dim: int) -> Tensor: """ Applies the Softmax function to a given tensor.# """ pass @staticmethod def tanh(tensor: Tensor) -> Tensor: """ Applies the tanh function to a given tensor. """ pass
candle/candle-pyo3/py_src/candle/functional/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/functional/__init__.pyi", "repo_id": "candle", "token_count": 484 }
54
[project] name = 'candle-nn' requires-python = '>=3.7' authors = [ {name = 'The Candle Team'}, ] dynamic = [ 'description', 'license', 'readme', 'version', ] [project.urls] Homepage = 'https://github.com/huggingface/candle' Source = 'https://github.com/huggingface/candle' [build-system] requires = ["maturin>=1.0,<2.0"] build-backend = "maturin" [tool.maturin] python-source = "py_src" module-name = "candle.candle" bindings = 'pyo3' features = ["pyo3/extension-module"] [tool.black] line-length = 119 target-version = ['py35'] [project.optional-dependencies] testing = ["pytest", "black==22.3"] huggingface = ["transformers>=4.33.3", "huggingface-hub>=0.17.3"]
candle/candle-pyo3/pyproject.toml/0
{ "file_path": "candle/candle-pyo3/pyproject.toml", "repo_id": "candle", "token_count": 292 }
55
[package] name = "candle-transformers" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } byteorder = { workspace = true } candle = { workspace = true } candle-flash-attn = { workspace = true, optional = true } candle-nn = { workspace = true } fancy-regex = { workspace = true } intel-mkl-src = { workspace = true, optional = true } num-traits = { workspace = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_plain = { workspace = true } tracing = { workspace = true } [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"] cuda = ["candle/cuda", "candle-nn/cuda"] cudnn = ["candle/cudnn", "candle-nn/cudnn"] flash-attn = ["cuda", "dep:candle-flash-attn"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"] metal = ["candle/metal", "candle-nn/metal"]
candle/candle-transformers/Cargo.toml/0
{ "file_path": "candle/candle-transformers/Cargo.toml", "repo_id": "candle", "token_count": 395 }
56
//! Contrastive Language-Image Pre-Training //! //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! https://github.com/openai/CLIP //! https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip use candle::{Context, IndexOp, Result, Shape, Tensor, D}; use candle_nn as nn; use candle_nn::Module; use nn::Conv2dConfig; use super::{ text_model::{Activation, ClipEncoder}, EncoderConfig, }; #[derive(Debug, Clone)] pub struct ClipVisionConfig { pub embed_dim: usize, pub activation: Activation, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, #[allow(dead_code)] pub projection_dim: usize, pub num_channels: usize, pub image_size: usize, pub patch_size: usize, } impl ClipVisionConfig { // The config details can be found in the "vision_config" section of this json file: // https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json pub fn vit_base_patch32() -> Self { Self { embed_dim: 768, activation: Activation::QuickGelu, intermediate_size: 3072, num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 512, num_channels: 3, image_size: 224, patch_size: 32, } } pub fn clip_vit_large_patch14_336() -> Self { Self { embed_dim: 1024, activation: Activation::QuickGelu, intermediate_size: 4096, num_hidden_layers: 24, num_attention_heads: 16, projection_dim: 768, num_channels: 3, image_size: 336, patch_size: 14, } } } // https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L112 #[derive(Clone, Debug)] struct ClipVisionEmbeddings { patch_embedding: candle_nn::Conv2d, position_ids: Tensor, class_embedding: Tensor, position_embedding: candle_nn::Embedding, } impl ClipVisionEmbeddings { fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> { // originally nn.Parameter let class_embedding = if vs.contains_tensor("class_embedding") { vs.get(c.embed_dim, "class_embedding")? } else { Tensor::randn(0f32, 1f32, c.embed_dim, vs.device())? }; let num_patches = (c.image_size / c.patch_size).pow(2); let num_positions = num_patches + 1; let position_ids = Tensor::arange(0, num_positions as i64, vs.device())?; let conv2dconfig = Conv2dConfig { stride: c.patch_size, ..Default::default() }; let position_embedding = candle_nn::embedding(num_positions, c.embed_dim, vs.pp("position_embedding"))?; let patch_embedding = candle_nn::conv2d_no_bias( c.num_channels, c.embed_dim, c.patch_size, conv2dconfig, vs.pp("patch_embedding"), )?; Ok(Self { patch_embedding, position_ids, class_embedding, position_embedding, }) } } impl Module for ClipVisionEmbeddings { fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> { let batch_size = pixel_values.shape().dims(); let patch_embeds = self .patch_embedding .forward(pixel_values)? .flatten_from(2)? .transpose(1, 2)?; let shape = Shape::from((batch_size[0], 1, self.class_embedding.dim(D::Minus1)?)); let class_embeds = self.class_embedding.expand(shape)?; let embeddings = Tensor::cat(&[class_embeds, patch_embeds], 1)?; let position_embedding = self.position_embedding.forward(&self.position_ids)?; embeddings.broadcast_add(&position_embedding) } } // https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L743 #[derive(Clone, Debug)] pub struct ClipVisionTransformer { embeddings: ClipVisionEmbeddings, encoder: ClipEncoder, pre_layer_norm: candle_nn::LayerNorm, final_layer_norm: candle_nn::LayerNorm, } impl ClipVisionTransformer { pub fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> { let embeddings = ClipVisionEmbeddings::new(vs.pp("embeddings"), c)?; let pre_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("pre_layrnorm"))?; let encoder = ClipEncoder::new(vs.pp("encoder"), &EncoderConfig::Vision(c.clone()))?; let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, final_layer_norm, pre_layer_norm, }) } // required by LLaVA pub fn output_hidden_states(&self, pixel_values: &Tensor) -> Result<Vec<Tensor>> { let hidden_states = pixel_values .apply(&self.embeddings)? .apply(&self.pre_layer_norm)?; let mut result = self.encoder.output_hidden_states(&hidden_states, None)?; let encoder_outputs = result.last().context("no last")?; let pooled_output = encoder_outputs.i((.., 0, ..))?; result.push(self.final_layer_norm.forward(&pooled_output)?.clone()); Ok(result) } } impl Module for ClipVisionTransformer { fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> { let hidden_states = pixel_values .apply(&self.embeddings)? .apply(&self.pre_layer_norm)?; let encoder_outputs = self.encoder.forward(&hidden_states, None)?; // https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L787 // pooled_output = encoder_outputs[:, 0, :] let pooled_output = encoder_outputs.i((.., 0, ..))?; self.final_layer_norm.forward(&pooled_output) } }
candle/candle-transformers/src/models/clip/vision_model.rs/0
{ "file_path": "candle/candle-transformers/src/models/clip/vision_model.rs", "repo_id": "candle", "token_count": 2837 }
57
//! EVA-2 inference implementation. //! //! EVA-02 is a computer vision model that can be used as an ImageNet classifier. //! The model returns the probability for an image to belong to each of the 1000 //! ImageNet categories. //! //! - [Paper](https://arxiv.org/abs/2303.11331). EVA-02: A Visual Representation for Neon Genesis //! - [Code](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/eva2.py) //! //! # Example //! //! ```bash //! cargo run \ //! --example eva2 \ //! --release -- \ //! --image candle-examples/examples/yolo-v8/assets/bike.jpg //! //! > mountain bike, all-terrain bike, off-roader: 37.09% //! > maillot : 8.30% //! > alp : 2.13% //! > bicycle-built-for-two, tandem bicycle, tandem: 0.84% //! > crash helmet : 0.73% //! ``` //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640> //! </div> //! use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 448; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 1000; fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { q: Linear, k: Linear, v: Linear, proj: Linear, rot_pos_embed: Tensor, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, rot_pos_embed: &Tensor, ) -> Result<Self> { let q = linear(vb.pp("q_proj"), dim, dim, qkv_bias)?; let k = linear(vb.pp("k_proj"), dim, dim, false)?; // no bias for Key let v = linear(vb.pp("v_proj"), dim, dim, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let rot_pos_embed = rot_pos_embed.clone(); let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { q, k, v, proj, rot_pos_embed, num_heads, scale, }) } } impl Attention { // See: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/pos_embed_sincos.py#L210 fn apply_rot_embed_cat(x: &Tensor, emb: &Tensor) -> Result<Tensor> { let cos_emb = emb.i((0.., 64..128))?; //.transpose(0, 1)?; let sin_emb = emb.i((0.., 0..64))?; //.transpose(0, 1)?; let index_even: [u32; 32] = (0u32..=63) .step_by(2) .collect::<Vec<_>>() .try_into() .expect("wrong size iterator"); let index_odd: [u32; 32] = (1u32..=63) .step_by(2) .collect::<Vec<_>>() .try_into() .expect("wrong size iterator"); let t_index_even = Tensor::new(&index_even, x.device())?; let t_index_odd = Tensor::new(&index_odd, x.device())?; let x_c = x.contiguous()?; let rot_x_even = x_c.index_select(&t_index_even, D::Minus1)?; let rot_x_odd_minus = (-1.0 * x_c.index_select(&t_index_odd, D::Minus1)?)?; let rot_x = Tensor::stack(&[&rot_x_odd_minus, &rot_x_even], D::Minus1)?.reshape(x.shape())?; x.broadcast_mul(&cos_emb)? + rot_x.broadcast_mul(&sin_emb)? } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = Tensor::cat( &[ &self.q.forward(xs)?, &self.k.forward(xs)?, &self.v.forward(xs)?, ], 2, )? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = qkv.i(0)?; let k = qkv.i(1)?.contiguous()?; let v = qkv.i(2)?.contiguous()?; let npt = 1; // num_prefix_tokens = 1 for CLS token let q = Tensor::cat( &[ &q.i((0.., 0.., ..npt, 0..))?, &Self::apply_rot_embed_cat(&q.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?, ], 2, )?; let k = Tensor::cat( &[ &k.i((0.., 0.., ..npt, 0..))?, &Self::apply_rot_embed_cat(&k.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?, ], 2, )?; let q = (q * self.scale)?; let attn = &q.matmul(&k.t()?)?; let attn = candle_nn::ops::softmax(attn, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct Mlp { fc1_g: Linear, fc1_x: Linear, norm: LayerNorm, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1_g = linear(vb.pp("fc1_g"), in_features, hidden_features, bias)?; let fc1_x = linear(vb.pp("fc1_x"), in_features, hidden_features, bias)?; let norm = layer_norm(hidden_features, 1e-6, vb.pp("norm"))?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1_g, fc1_x, norm, fc2, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs_g = self.fc1_g.forward(xs)?.silu()?; let xs = self.fc1_x.forward(xs)?; let xs = self.norm.forward(&(xs_g.mul(&xs)?))?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, norm2: LayerNorm, mlp: Mlp, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize, rot_pos_embed: &Tensor) -> Result<Self> { let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true, rot_pos_embed)?; let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?; let hidden_dim = dim * 4 * 2 / 3; // 768 * 4 * 2 / 3 = 3072 * 2 / 3 = 2048 let mlp = Mlp::new(vb.pp("mlp"), dim, hidden_dim, true)?; Ok(Self { norm1, attn, norm2, mlp, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = &self.attn.forward(&self.norm1.forward(xs)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = &self.mlp.forward(&self.norm2.forward(&xs)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct EVA2VisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl EVA2VisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let pos_embed = vb.get((1, patch_embed.num_patches + 1, embed_dim), "pos_embed")?; let rot_pos_embed = vb.get((patch_embed.num_patches, 128), "rot_pos_embed")?; let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads, &rot_pos_embed)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding( &self, xs: &Tensor, w: usize, h: usize, num_prefix_tokens: usize, ) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(self.pos_embed.clone()); } // Interpolate only local tokens, i.e. those after the CLS token let prefix_tokens_pos_embed = self.pos_embed.i((0.., ..num_prefix_tokens, 0..))?.clone(); let patch_pos_embed = &self.pos_embed.i((0.., num_prefix_tokens.., 0..))?; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); let patch_pos_embed = patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim))?; Tensor::cat(&[&prefix_tokens_pos_embed, &patch_pos_embed], 1) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; if (w != IMG_SIZE) || (h != IMG_SIZE) { panic!("Error: The input tensor should have the shape: Bx3x518x518."); } let xs = self.patch_embed.forward(xs)?; let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?; let xs = (&xs + &self.interpolate_pos_encoding(&xs, w, h, 1)?)?; Ok(xs) } fn get_intermediate_layers_not_chunked( &self, xs: &Tensor, blocks_to_take: &[usize], ) -> Result<Vec<Tensor>> { let mut xs = self.prepare_tokens_with_mask(xs)?; let mut output = Vec::new(); for (i, blk) in self.blocks.iter().enumerate() { xs = blk.forward(&xs)?; if blocks_to_take.contains(&i) { output.push(xs.clone()); } } if output.len() != blocks_to_take.len() { candle::bail!( "only {} / {} blocks found", output.len(), blocks_to_take.len() ); } Ok(output) } pub fn get_intermediate_layers( &self, xs: &Tensor, blocks_to_take: &[usize], reshape: bool, return_class_token: bool, norm: bool, ) -> Result<Tensor> { let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?; let outputs = if norm { outputs .iter() .map(|out| self.norm.forward(out)) .collect::<Result<Vec<_>>>()? } else { outputs }; let class_tokens = outputs .iter() .map(|out| out.i((.., 0))) .collect::<Result<Vec<_>>>()?; let outputs = outputs .iter() .map(|out| out.i((.., 1..))) .collect::<Result<Vec<_>>>()?; let outputs = if reshape { let (b, _c, w, h) = xs.dims4()?; let patch_size = self.patch_embed.patch_size.0; let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size)); outputs .iter() .map(|out| { out.reshape((b, w / patch_size, h / patch_size, num_channels))? .transpose(2, 3)? .transpose(1, 2) }) .collect::<Result<Vec<_>>>()? } else { outputs }; let outputs = if return_class_token { outputs .iter() .zip(class_tokens.iter()) .map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1)) .collect::<Result<Vec<_>>>()? } else { outputs }; Tensor::stack(&outputs[..], 0) } } impl Module for EVA2VisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs_moy_local_tokens = xs.i((.., 1..))?.mean(1)?; let xs_norm = self.norm.forward(&xs_moy_local_tokens)?; self.head.forward(&xs_norm) } } pub fn vit_base(vb: VarBuilder) -> Result<EVA2VisionTransformer> { EVA2VisionTransformer::new(vb, 12, 768, 12) } pub fn vit_large(vb: VarBuilder) -> Result<EVA2VisionTransformer> { EVA2VisionTransformer::new(vb, 24, 1024, 16) }
candle/candle-transformers/src/models/eva2.rs/0
{ "file_path": "candle/candle-transformers/src/models/eva2.rs", "repo_id": "candle", "token_count": 7638 }
58
//! # JinaBERT inference implementation //! //! Based on implementation from huggingface for Jina BERT and its variants //! //! See: [Jina Embeddings on HuggingFace](https://huggingface.co/jinaai/jina-embeddings-v2-base-en) use super::with_tracing::{linear, linear_no_bias, Embedding, Linear}; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Module, VarBuilder}; use serde::Deserialize; pub const DTYPE: DType = DType::F32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum PositionEmbeddingType { Absolute, Alibi, } // https://huggingface.co/jinaai/jina-bert-implementation/blob/main/configuration_bert.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub intermediate_size: usize, pub hidden_act: candle_nn::Activation, pub max_position_embeddings: usize, pub type_vocab_size: usize, pub initializer_range: f64, pub layer_norm_eps: f64, pub pad_token_id: usize, pub position_embedding_type: PositionEmbeddingType, } impl Config { pub fn v2_base() -> Self { // https://huggingface.co/jinaai/jina-embeddings-v2-base-en/blob/main/config.json Self { vocab_size: 30528, hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: candle_nn::Activation::Gelu, max_position_embeddings: 8192, type_vocab_size: 2, initializer_range: 0.02, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Alibi, } } #[allow(clippy::too_many_arguments)] pub fn new( vocab_size: usize, hidden_size: usize, num_hidden_layers: usize, num_attention_heads: usize, intermediate_size: usize, hidden_act: candle_nn::Activation, max_position_embeddings: usize, type_vocab_size: usize, initializer_range: f64, layer_norm_eps: f64, pad_token_id: usize, position_embedding_type: PositionEmbeddingType, ) -> Self { Config { vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, } } } #[derive(Clone, Debug)] struct BertEmbeddings { word_embeddings: Embedding, // no position_embeddings as we only support alibi. token_type_embeddings: Embedding, layer_norm: LayerNorm, span: tracing::Span, } impl BertEmbeddings { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let word_embeddings = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?; let token_type_embeddings = Embedding::new( cfg.type_vocab_size, cfg.hidden_size, vb.pp("token_type_embeddings"), )?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { word_embeddings, token_type_embeddings, layer_norm, span: tracing::span!(tracing::Level::TRACE, "embeddings"), }) } } impl Module for BertEmbeddings { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len) = input_ids.dims2()?; let input_embeddings = self.word_embeddings.forward(input_ids)?; let token_type_embeddings = Tensor::zeros(seq_len, DType::U32, input_ids.device())? .broadcast_left(b_size)? .apply(&self.token_type_embeddings)?; let embeddings = (&input_embeddings + token_type_embeddings)?; let embeddings = self.layer_norm.forward(&embeddings)?; Ok(embeddings) } } #[derive(Clone, Debug)] struct BertSelfAttention { query: Linear, key: Linear, value: Linear, num_attention_heads: usize, attention_head_size: usize, span: tracing::Span, span_softmax: tracing::Span, } impl BertSelfAttention { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attention_head_size = cfg.hidden_size / cfg.num_attention_heads; let all_head_size = cfg.num_attention_heads * attention_head_size; let hidden_size = cfg.hidden_size; let query = linear(hidden_size, all_head_size, vb.pp("query"))?; let value = linear(hidden_size, all_head_size, vb.pp("value"))?; let key = linear(hidden_size, all_head_size, vb.pp("key"))?; Ok(Self { query, key, value, num_attention_heads: cfg.num_attention_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "self-attn"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let mut x_shape = xs.dims().to_vec(); x_shape.pop(); x_shape.push(self.num_attention_heads); x_shape.push(self.attention_head_size); xs.reshape(x_shape)?.transpose(1, 2)?.contiguous() } fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let query_layer = self.query.forward(xs)?; let key_layer = self.key.forward(xs)?; let value_layer = self.value.forward(xs)?; let query_layer = self.transpose_for_scores(&query_layer)?; let key_layer = self.transpose_for_scores(&key_layer)?; let value_layer = self.transpose_for_scores(&value_layer)?; let attention_scores = query_layer.matmul(&key_layer.t()?)?; let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?; let attention_scores = attention_scores.broadcast_add(bias)?; let attention_probs = { let _enter_sm = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attention_scores)? }; let context_layer = attention_probs.matmul(&value_layer)?; let context_layer = context_layer.transpose(1, 2)?.contiguous()?; let context_layer = context_layer.flatten_from(D::Minus2)?; Ok(context_layer) } } #[derive(Clone, Debug)] struct BertSelfOutput { dense: Linear, layer_norm: LayerNorm, span: tracing::Span, } impl BertSelfOutput { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, layer_norm, span: tracing::span!(tracing::Level::TRACE, "self-out"), }) } fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.dense.forward(xs)?; self.layer_norm.forward(&(xs + input_tensor)?) } } #[derive(Clone, Debug)] struct BertAttention { self_attention: BertSelfAttention, self_output: BertSelfOutput, span: tracing::Span, } impl BertAttention { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let self_attention = BertSelfAttention::new(vb.pp("self"), cfg)?; let self_output = BertSelfOutput::new(vb.pp("output"), cfg)?; Ok(Self { self_attention, self_output, span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let self_outputs = self.self_attention.forward(xs, bias)?; let attention_output = self.self_output.forward(&self_outputs, xs)?; Ok(attention_output) } } #[derive(Clone, Debug)] struct BertGLUMLP { gated_layers: Linear, act: candle_nn::Activation, wo: Linear, layernorm: LayerNorm, intermediate_size: usize, } impl BertGLUMLP { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let gated_layers = linear_no_bias( cfg.hidden_size, cfg.intermediate_size * 2, vb.pp("gated_layers"), )?; let act = candle_nn::Activation::Gelu; // geglu let wo = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("wo"))?; let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layernorm"))?; Ok(Self { gated_layers, act, wo, layernorm, intermediate_size: cfg.intermediate_size, }) } } impl Module for BertGLUMLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.gated_layers)?; let gated = xs.narrow(D::Minus1, 0, self.intermediate_size)?; let non_gated = xs.narrow(D::Minus1, self.intermediate_size, self.intermediate_size)?; let xs = (gated.apply(&self.act) * non_gated)?.apply(&self.wo); (xs + residual)?.apply(&self.layernorm) } } #[derive(Clone, Debug)] struct BertLayer { attention: BertAttention, mlp: BertGLUMLP, span: tracing::Span, } impl BertLayer { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attention = BertAttention::new(vb.pp("attention"), cfg)?; let mlp = BertGLUMLP::new(vb.pp("mlp"), cfg)?; Ok(Self { attention, mlp, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.attention.forward(xs, bias)?.apply(&self.mlp) } } fn build_alibi_bias(cfg: &Config) -> Result<Tensor> { let n_heads = cfg.num_attention_heads; let seq_len = cfg.max_position_embeddings; let alibi_bias = Tensor::arange(0, seq_len as i64, &Device::Cpu)?.to_dtype(DType::F32)?; let alibi_bias = { let a1 = alibi_bias.reshape((1, seq_len))?; let a2 = alibi_bias.reshape((seq_len, 1))?; a1.broadcast_sub(&a2)?.abs()?.broadcast_left(n_heads)? }; let mut n_heads2 = 1; while n_heads2 < n_heads { n_heads2 *= 2 } let slopes = (1..=n_heads2) .map(|v| -1f32 / 2f32.powf((v * 8) as f32 / n_heads2 as f32)) .collect::<Vec<_>>(); let slopes = if n_heads2 == n_heads { slopes } else { slopes .iter() .skip(1) .step_by(2) .chain(slopes.iter().step_by(2)) .take(n_heads) .cloned() .collect::<Vec<f32>>() }; let slopes = Tensor::new(slopes, &Device::Cpu)?.reshape((1, (), 1, 1))?; alibi_bias.to_dtype(DType::F32)?.broadcast_mul(&slopes) } #[derive(Clone, Debug)] struct BertEncoder { alibi: Tensor, layers: Vec<BertLayer>, span: tracing::Span, } impl BertEncoder { fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { if cfg.position_embedding_type != PositionEmbeddingType::Alibi { candle::bail!("only alibi is supported as a position-embedding-type") } let layers = (0..cfg.num_hidden_layers) .map(|index| BertLayer::new(vb.pp(format!("layer.{index}")), cfg)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); let alibi = build_alibi_bias(cfg)?.to_device(vb.device())?; Ok(Self { alibi, layers, span, }) } } impl Module for BertEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let seq_len = xs.dim(1)?; let alibi_bias = self.alibi.i((.., .., ..seq_len, ..seq_len))?; let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, &alibi_bias)? } Ok(xs) } } #[derive(Clone, Debug)] pub struct BertModel { embeddings: BertEmbeddings, encoder: BertEncoder, pub device: Device, span: tracing::Span, } impl BertModel { pub fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> { let embeddings = BertEmbeddings::new(vb.pp("embeddings"), cfg)?; let encoder = BertEncoder::new(vb.pp("encoder"), cfg)?; Ok(Self { embeddings, encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } } impl Module for BertModel { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids)?; let sequence_output = self.encoder.forward(&embedding_output)?; Ok(sequence_output) } }
candle/candle-transformers/src/models/jina_bert.rs/0
{ "file_path": "candle/candle-transformers/src/models/jina_bert.rs", "repo_id": "candle", "token_count": 6364 }
59
//! NV-Embed-v2 //! //! NV-Embed-v2 is a text embedding model that combines a Mistral decoder with a latent attention mechanism to produce high-quality text embeddings. //! //! This implementation is based on the [paper](https://arxiv.org/pdf/2405.17428) and [weights](https://huggingface.co/nvidia/NV-Embed-v2) //! //! # Query-Passage Retrieval Example //! ```bash //! cargo run --example nvembed_v2 --release //! ``` //! //! # Sentence Embedding Example //! ```bash //! cargo run --example nvembed_v2 --release -- --prompt "Here is a test sentence" //! ``` pub mod embedding; pub mod model;
candle/candle-transformers/src/models/nvembed_v2/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/nvembed_v2/mod.rs", "repo_id": "candle", "token_count": 211 }
60
//! Gemma 3 model implementation with quantization support. //! //! Gemma 3 is a family of multimodal language models developed by Google. //! This implementation provides quantization for reduced memory usage and faster inference. //! //! Key characteristics: //! - Group-Query Attention (GQA) with specialized key-value heads //! - RMSNorm for layer normalization //! - Specialized attention patterns with separate normalization for Q/K/V //! - Feed-forward network with SwiGLU activation //! - Support for 2/3/4/8-bit quantization //! //! References: //! - [Gemma 3 Models](https://blog.google/technology/developers/gemma-3/) //! use crate::quantized_nn::RmsNorm; use candle::quantized::gguf_file; use candle::quantized::QTensor; use candle::D; use candle::{DType, Device, IndexOp, Result, Tensor}; use candle_nn::{Embedding, Module}; pub const MAX_SEQ_LEN: usize = 131072; // Gemma 3 supports 128K context window pub const DEFAULT_SLIDING_WINDOW_TYPE: usize = 6; pub const DEFAULT_ROPE_FREQUENCY: f32 = 1_000_000.; pub const DEFAULT_ROPE_FREQUENCY_SLIDING: f32 = 10_000.; pub const DEFAULT_ROPE_FREQUENCY_SCALE_FACTOR: f32 = 1.; #[derive(Debug, Clone)] struct QMatMul { inner: candle::quantized::QMatMul, span: tracing::Span, } impl QMatMul { fn from_qtensor(qtensor: QTensor) -> Result<Self> { let inner = candle::quantized::QMatMul::from_qtensor(qtensor)?; let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); Ok(Self { inner, span }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] struct Mlp { feed_forward_gate: QMatMul, // ffn_gate in GGUF feed_forward_up: QMatMul, // ffn_up in GGUF feed_forward_down: QMatMul, // ffn_down in GGUF } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let gate = self.feed_forward_gate.forward(xs)?; let up = self.feed_forward_up.forward(xs)?; let silu = candle_nn::ops::silu(&gate)?; let gated = (silu * up)?; self.feed_forward_down.forward(&gated) } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(head_dim: usize, rope_frequency: f32, device: &Device) -> Result<Self> { let theta: Vec<_> = (0..head_dim) .step_by(2) .map(|i| 1f32 / rope_frequency.powf(i as f32 / head_dim as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)? .to_dtype(DType::F32)? .reshape((MAX_SEQ_LEN, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let cos = idx_theta.cos()?; let sin = idx_theta.sin()?; Ok(Self { sin, cos }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, index_pos: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, index_pos, seq_len)?; let sin = self.sin.narrow(0, index_pos, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] struct LayerWeights { // Attention components attention_wq: QMatMul, attention_wk: QMatMul, attention_wv: QMatMul, attention_wo: QMatMul, // Specialized normalization for Q and K attention_q_norm: RmsNorm, attention_k_norm: RmsNorm, // Layer normalization attention_norm: RmsNorm, // Applied before attention post_attention_norm: RmsNorm, // Applied after attention ffn_norm: RmsNorm, // Applied before feedforward post_ffn_norm: RmsNorm, // Applied after feedforward // Feed-forward network mlp: Mlp, // Attention parameters n_head: usize, // Number of query heads n_kv_head: usize, // Number of key-value heads head_dim: usize, // Dimension of each head q_dim: usize, // Total dimension for queries sliding_window_size: Option<usize>, rotary_embedding: RotaryEmbedding, neg_inf: Tensor, // Cache kv_cache: Option<(Tensor, Tensor)>, // Tracing span_attn: tracing::Span, span_mlp: tracing::Span, } impl LayerWeights { fn mask( &self, b_sz: usize, seq_len: usize, index_pos: usize, dtype: DType, device: &Device, ) -> Result<Tensor> { let mask: Vec<_> = if let Some(sliding_window_size) = self.sliding_window_size { (0..seq_len) .flat_map(|i| { (0..seq_len).map(move |j| { if i < j || j + sliding_window_size < i { 0u32 } else { 1u32 } }) }) .collect() } else { (0..seq_len) .flat_map(|i| (0..seq_len).map(move |j| if i < j { 0u32 } else { 1u32 })) .collect() }; let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?; let mask = if index_pos > 0 { let mask0 = Tensor::zeros((seq_len, index_pos), DType::F32, device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_sz, 1, seq_len, seq_len + index_pos))? .to_dtype(dtype) } fn forward_attn( &mut self, x: &Tensor, mask: Option<&Tensor>, index_pos: usize, ) -> Result<Tensor> { let _enter = self.span_attn.enter(); let (b_sz, seq_len, _) = x.dims3()?; let q = self.attention_wq.forward(x)?; let k = self.attention_wk.forward(x)?; let v = self.attention_wv.forward(x)?; let q = q .reshape((b_sz, seq_len, self.n_head, self.head_dim))? .transpose(1, 2)?; let k = k .reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))? .transpose(1, 2)?; let q = self.attention_q_norm.forward(&q.contiguous()?)?; let k = self.attention_k_norm.forward(&k.contiguous()?)?; let (q, k) = self .rotary_embedding .apply_rotary_emb_qkv(&q, &k, index_pos)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((k_cache, v_cache)) => { if index_pos == 0 { (k, v) } else { let k = Tensor::cat(&[k_cache, &k], 2)?; // concat on seq dim let v = Tensor::cat(&[v_cache, &v], 2)?; (k, v) } } }; self.kv_cache = Some((k.clone(), v.clone())); // update cache // Repeat KV for GQA let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?; let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?; // Scaled Dot-Product Attention let scale = 1.0 / (self.head_dim as f64).sqrt(); let mut attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?; if let Some(mask) = mask { let mask = mask.broadcast_as(attn_weights.shape())?; let neg_inf = self.neg_inf.broadcast_as(attn_weights.dims())?; attn_weights = mask.eq(0u32)?.where_cond(&neg_inf, &attn_weights)?; } let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&v)?; let attn_output = attn_output .transpose(1, 2)? .reshape((b_sz, seq_len, self.q_dim))?; self.attention_wo.forward(&attn_output) } } #[derive(Debug, Clone)] pub struct ModelWeights { tok_embeddings: Embedding, embedding_length: usize, layers: Vec<LayerWeights>, norm: RmsNorm, output: QMatMul, span: tracing::Span, span_output: tracing::Span, } impl ModelWeights { pub fn from_gguf<R: std::io::Seek + std::io::Read>( ct: gguf_file::Content, reader: &mut R, device: &Device, ) -> Result<Self> { let md_get = |s: &str| match ct.metadata.get(s) { None => candle::bail!("cannot find {s} in metadata"), Some(v) => Ok(v), }; let head_count = md_get("gemma3.attention.head_count")?.to_u32()? as usize; let head_count_kv = md_get("gemma3.attention.head_count_kv")?.to_u32()? as usize; let block_count = md_get("gemma3.block_count")?.to_u32()? as usize; let embedding_length = md_get("gemma3.embedding_length")?.to_u32()? as usize; let key_length = md_get("gemma3.attention.key_length")?.to_u32()? as usize; let _value_length = md_get("gemma3.attention.value_length")?.to_u32()? as usize; let rms_norm_eps = md_get("gemma3.attention.layer_norm_rms_epsilon")?.to_f32()? as f64; let sliding_window_size = md_get("gemma3.attention.sliding_window")?.to_u32()? as usize; let sliding_window_type = md_get("gemma3.attention.sliding_window_type") .and_then(|m| Ok(m.to_u32()? as usize)) .unwrap_or(DEFAULT_SLIDING_WINDOW_TYPE); let rope_freq_base = md_get("gemma3.rope.freq_base") .and_then(|m| m.to_f32()) .unwrap_or(DEFAULT_ROPE_FREQUENCY); let rope_freq_base_sliding = md_get("gemma3.rope.local_freq_base") .and_then(|m| m.to_f32()) .unwrap_or(DEFAULT_ROPE_FREQUENCY_SLIDING); // Unused in Llama.cpp so we aren't using it here. let _rope_freq_scaling_factor = md_get("gemma3.rope.scaling.factor") .and_then(|m| m.to_f32()) .unwrap_or(DEFAULT_ROPE_FREQUENCY_SCALE_FACTOR); // Compute the dimensions for queries, keys, and values // These are the total dimensions when projected across all heads let q_dim = head_count * key_length; let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?; // Load token embeddings and output projection let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; let tok_embeddings = tok_embeddings.dequantize(device)?; let norm = RmsNorm::from_qtensor( ct.tensor(reader, "output_norm.weight", device)?, rms_norm_eps, )?; let output = match ct.tensor(reader, "output.weight", device) { Ok(tensor) => tensor, Err(_) => ct.tensor(reader, "token_embd.weight", device)?, // Use tied weights if output.weight doesn't exist }; let mut layers = Vec::with_capacity(block_count); for layer_idx in 0..block_count { let prefix = format!("blk.{layer_idx}"); let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?; let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?; let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?; let attention_wo = ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?; let attention_q_norm = RmsNorm::from_qtensor( ct.tensor(reader, &format!("{prefix}.attn_q_norm.weight"), device)?, rms_norm_eps, )?; let attention_k_norm = RmsNorm::from_qtensor( ct.tensor(reader, &format!("{prefix}.attn_k_norm.weight"), device)?, rms_norm_eps, )?; let attention_norm = RmsNorm::from_qtensor( ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?, rms_norm_eps, )?; let post_attention_norm = RmsNorm::from_qtensor( ct.tensor( reader, &format!("{prefix}.post_attention_norm.weight"), device, )?, rms_norm_eps, )?; let ffn_norm = RmsNorm::from_qtensor( ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?, rms_norm_eps, )?; let post_ffn_norm = RmsNorm::from_qtensor( ct.tensor(reader, &format!("{prefix}.post_ffw_norm.weight"), device)?, rms_norm_eps, )?; let feed_forward_gate = ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?; let feed_forward_up = ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?; let feed_forward_down = ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?; let mlp = Mlp { feed_forward_gate: QMatMul::from_qtensor(feed_forward_gate)?, feed_forward_up: QMatMul::from_qtensor(feed_forward_up)?, feed_forward_down: QMatMul::from_qtensor(feed_forward_down)?, }; // Sliding window pattern hardcoded to 6 because it's not explicitly defined let is_sliding = (layer_idx + 1) % sliding_window_type > 0; let sliding_window_size = is_sliding.then_some(sliding_window_size); let layer_rope_frequency = if is_sliding { rope_freq_base_sliding } else { rope_freq_base }; let rotary_embedding = RotaryEmbedding::new(key_length, layer_rope_frequency, device)?; // Tracing spans let span_attn = tracing::span!(tracing::Level::TRACE, "attn"); let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp"); layers.push(LayerWeights { attention_wq: QMatMul::from_qtensor(attention_wq)?, attention_wk: QMatMul::from_qtensor(attention_wk)?, attention_wv: QMatMul::from_qtensor(attention_wv)?, attention_wo: QMatMul::from_qtensor(attention_wo)?, attention_q_norm, attention_k_norm, attention_norm, post_attention_norm, ffn_norm, post_ffn_norm, mlp, n_head: head_count, n_kv_head: head_count_kv, head_dim: key_length, q_dim, sliding_window_size, rotary_embedding, neg_inf: neg_inf.clone(), kv_cache: None, span_attn, span_mlp, }) } let span = tracing::span!(tracing::Level::TRACE, "model"); let span_output = tracing::span!(tracing::Level::TRACE, "output"); Ok(Self { tok_embeddings: Embedding::new(tok_embeddings, embedding_length), embedding_length, layers, norm, output: QMatMul::from_qtensor(output)?, span, span_output, }) } pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (b_sz, seq_len) = x.dims2()?; let _enter = self.span.enter(); let mut layer_in = self.tok_embeddings.forward(x)?; layer_in = (layer_in * (self.embedding_length as f64).sqrt())?; for layer in self.layers.iter_mut() { let attention_mask = if seq_len == 1 { None } else { Some(layer.mask(b_sz, seq_len, index_pos, x.dtype(), x.device())?) }; // Attention block let residual = &layer_in; let x = layer.attention_norm.forward(&layer_in)?; let x = layer.forward_attn(&x, attention_mask.as_ref(), index_pos)?; let x = layer.post_attention_norm.forward(&x)?; let x = (x + residual)?; // Feed-forward block let _enter = layer.span_mlp.enter(); let residual = &x; let x = layer.ffn_norm.forward(&x)?; let x = layer.mlp.forward(&x)?; let x = layer.post_ffn_norm.forward(&x)?; let x = (x + residual)?; drop(_enter); layer_in = x; } let _enter = self.span_output.enter(); let x = layer_in.i((.., seq_len - 1, ..))?; let x = self.norm.forward(&x)?; let output = self.output.forward(&x)?; Ok(output) } }
candle/candle-transformers/src/models/quantized_gemma3.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_gemma3.rs", "repo_id": "candle", "token_count": 8559 }
61
// Adapted from: // https://github.com/ChaoningZhang/MobileSAM/blob/master/mobile_sam/modeling/tiny_vit_sam.py use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{Conv2dConfig, Module, VarBuilder}; const MBCONV_EXPAND_RATIO: usize = 4; const MLP_RATIO: usize = 4; const LOCAL_CONV_SIZE: usize = 3; const IMG_SIZE: usize = 1024; const IN_CHANNELS: usize = 3; #[derive(Debug)] struct Conv2dBN { c: candle_nn::Conv2d, bn: candle_nn::BatchNorm, span: tracing::Span, } impl Conv2dBN { fn new(in_: usize, out: usize, ks: usize, cfg: Conv2dConfig, vb: VarBuilder) -> Result<Self> { let c = candle_nn::conv2d_no_bias(in_, out, ks, cfg, vb.pp("c"))?; let bn = candle_nn::batch_norm(out, 1e-5, vb.pp("bn"))?; let span = tracing::span!(tracing::Level::TRACE, "conv2d-bn"); Ok(Self { c, bn, span }) } } impl Module for Conv2dBN { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.c)?.apply_t(&self.bn, false) } } #[derive(Debug)] struct PatchEmbed { conv1: Conv2dBN, conv2: Conv2dBN, span: tracing::Span, } impl PatchEmbed { fn new(in_chans: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { stride: 2, padding: 1, ..Default::default() }; let conv1 = Conv2dBN::new(in_chans, embed_dim / 2, 3, cfg, vb.pp("seq.0"))?; let conv2 = Conv2dBN::new(embed_dim / 2, embed_dim, 3, cfg, vb.pp("seq.2"))?; let span = tracing::span!(tracing::Level::TRACE, "patch-embed"); Ok(Self { conv1, conv2, span }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.conv1)?.gelu()?.apply(&self.conv2) } } #[derive(Debug)] struct MBConv { conv1: Conv2dBN, conv2: Conv2dBN, conv3: Conv2dBN, span: tracing::Span, } impl MBConv { fn new(in_: usize, out: usize, expand_ratio: usize, vb: VarBuilder) -> Result<Self> { let hidden = in_ * expand_ratio; let cfg2 = candle_nn::Conv2dConfig { padding: 1, groups: hidden, ..Default::default() }; let conv1 = Conv2dBN::new(in_, hidden, 1, Default::default(), vb.pp("conv1"))?; let conv2 = Conv2dBN::new(hidden, hidden, 3, cfg2, vb.pp("conv2"))?; let conv3 = Conv2dBN::new(hidden, out, 1, Default::default(), vb.pp("conv3"))?; let span = tracing::span!(tracing::Level::TRACE, "mb-conv"); Ok(Self { conv1, conv2, conv3, span, }) } } impl Module for MBConv { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let shortcut = xs; let xs = xs .apply(&self.conv1)? .gelu()? .apply(&self.conv2)? .gelu()? .apply(&self.conv3)?; (xs + shortcut)?.gelu() } } #[derive(Debug)] struct PatchMerging { conv1: Conv2dBN, conv2: Conv2dBN, conv3: Conv2dBN, input_resolution: (usize, usize), span: tracing::Span, } impl PatchMerging { fn new( input_resolution: (usize, usize), dim: usize, out: usize, vb: VarBuilder, ) -> Result<Self> { let stride = if [320, 448, 576].contains(&out) { 1 } else { 2 }; let cfg2 = candle_nn::Conv2dConfig { padding: 1, stride, groups: out, ..Default::default() }; let conv1 = Conv2dBN::new(dim, out, 1, Default::default(), vb.pp("conv1"))?; let conv2 = Conv2dBN::new(out, out, 3, cfg2, vb.pp("conv2"))?; let conv3 = Conv2dBN::new(out, out, 1, Default::default(), vb.pp("conv3"))?; let span = tracing::span!(tracing::Level::TRACE, "patch-merging"); Ok(Self { conv1, conv2, conv3, input_resolution, span, }) } } impl Module for PatchMerging { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = if xs.rank() == 3 { let (h, w) = self.input_resolution; let b = xs.dim(0)?; xs.reshape((b, h, w, ()))?.permute((0, 3, 1, 2))? } else { xs.clone() }; xs.apply(&self.conv1)? .gelu()? .apply(&self.conv2)? .gelu()? .apply(&self.conv3)? .flatten_from(2)? .transpose(1, 2) } } #[derive(Debug)] struct ConvLayer { blocks: Vec<MBConv>, downsample: Option<PatchMerging>, span: tracing::Span, } impl ConvLayer { fn new( dim: usize, out: usize, input_resolution: (usize, usize), depth: usize, downsample: bool, conv_expand_ratio: usize, vb: VarBuilder, ) -> Result<Self> { let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let block = MBConv::new(dim, dim, conv_expand_ratio, vb_b.pp(index))?; blocks.push(block) } let downsample = if downsample { let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "conv-layer"); Ok(Self { blocks, downsample, span, }) } } impl Module for ConvLayer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for block in self.blocks.iter() { xs = block.forward(&xs)? } match &self.downsample { None => Ok(xs), Some(downsample) => downsample.forward(&xs), } } } #[derive(Debug)] struct Mlp { norm: candle_nn::LayerNorm, fc1: super::Linear, fc2: super::Linear, span: tracing::Span, } impl Mlp { fn new(in_: usize, hidden: usize, vb: VarBuilder) -> Result<Self> { let norm = candle_nn::layer_norm(in_, 1e-5, vb.pp("norm"))?; let fc1 = super::linear(vb.pp("fc1"), in_, hidden, true)?; let fc2 = super::linear(vb.pp("fc2"), hidden, in_, true)?; let span = tracing::span!(tracing::Level::TRACE, "mlp"); Ok(Self { norm, fc1, fc2, span, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.norm)? .apply(&self.fc1)? .gelu()? .apply(&self.fc2) } } #[derive(Debug)] struct Attention { norm: candle_nn::LayerNorm, qkv: super::Linear, proj: super::Linear, ab: Tensor, key_dim: usize, num_heads: usize, d: usize, dh: usize, scale: f64, span: tracing::Span, span_matmul: tracing::Span, span_softmax: tracing::Span, } impl Attention { fn new( dim: usize, key_dim: usize, num_heads: usize, attn_ratio: usize, resolution: (usize, usize), vb: VarBuilder, ) -> Result<Self> { let d = attn_ratio * key_dim; let dh = d * num_heads; let nh_kd = key_dim * num_heads; let h = dh + nh_kd * 2; let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?; let qkv = super::linear(vb.pp("qkv"), dim, h, true)?; let proj = super::linear(vb.pp("proj"), dh, dim, true)?; let points = (0..resolution.0) .flat_map(|x| (0..resolution.1).map(move |y| (x as i64, y as i64))) .collect::<Vec<_>>(); let mut idxs = Vec::with_capacity(points.len() * points.len()); let mut attention_offsets = std::collections::HashMap::new(); for &(x1, y1) in points.iter() { for &(x2, y2) in points.iter() { let offset = ((x2 - x1).abs(), (y2 - y1).abs()); let l = attention_offsets.len(); let idx = attention_offsets.entry(offset).or_insert(l); idxs.push(*idx as u32) } } let attention_biases = vb.get((num_heads, attention_offsets.len()), "attention_biases")?; let idxs = Tensor::new(idxs, attention_biases.device())?; let ab = attention_biases .index_select(&idxs, 1)? .reshape(((), points.len(), points.len()))?; let span = tracing::span!(tracing::Level::TRACE, "attention"); let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul"); let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm"); Ok(Self { norm, qkv, proj, ab, key_dim, num_heads, d, dh, scale: 1f64 / (key_dim as f64).sqrt(), span, span_matmul, span_softmax, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b, n, _) = xs.dims3()?; let xs = xs.apply(&self.norm)?; let qkv = xs.apply(&self.qkv)?.reshape((b, n, self.num_heads, ()))?; let q = qkv .narrow(D::Minus1, 0, self.key_dim)? .permute((0, 2, 1, 3))? .contiguous()?; let k = qkv .narrow(D::Minus1, self.key_dim, self.key_dim)? .permute((0, 2, 1, 3))? .contiguous()?; let v = qkv .narrow(D::Minus1, 2 * self.key_dim, self.d)? .permute((0, 2, 1, 3))? .contiguous()?; let attn = { let _enter = self.span_matmul.enter(); (q.matmul(&k.t()?)? * self.scale)? }; let attn = attn.broadcast_add(&self.ab)?; let attn = { let _enter = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attn)? }; let attn = { let _enter = self.span_matmul.enter(); attn.matmul(&v)? }; attn.transpose(1, 2)? .reshape((b, n, self.dh))? .apply(&self.proj) } } #[derive(Debug)] struct TinyViTBlock { attn: Attention, local_conv: Conv2dBN, mlp: Mlp, window_size: usize, input_resolution: (usize, usize), span: tracing::Span, } impl TinyViTBlock { fn new( dim: usize, input_resolution: (usize, usize), num_heads: usize, window_size: usize, vb: VarBuilder, ) -> Result<Self> { let head_dim = dim / num_heads; let attn = Attention::new( dim, head_dim, num_heads, 1, (window_size, window_size), vb.pp("attn"), )?; let mlp = Mlp::new(dim, dim * MLP_RATIO, vb.pp("mlp"))?; let cfg = candle_nn::Conv2dConfig { padding: LOCAL_CONV_SIZE / 2, groups: dim, ..Default::default() }; let local_conv = Conv2dBN::new(dim, dim, LOCAL_CONV_SIZE, cfg, vb.pp("local_conv"))?; let span = tracing::span!(tracing::Level::TRACE, "attention"); Ok(Self { attn, local_conv, mlp, window_size, input_resolution, span, }) } } impl Module for TinyViTBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (h, w) = self.input_resolution; let (b, l, c) = xs.dims3()?; let res_x = xs; let xs = if h == self.window_size && w == self.window_size { self.attn.forward(xs)? } else { let xs = xs.reshape((b, h, w, c))?; let pad_b = (self.window_size - h % self.window_size) % self.window_size; let pad_r = (self.window_size - w % self.window_size) % self.window_size; let xs = if pad_b > 0 { xs.pad_with_zeros(1, 0, pad_b)? } else { xs }; let xs = if pad_r > 0 { xs.pad_with_zeros(2, 0, pad_r)? } else { xs }; let (p_h, p_w) = (h + pad_b, w + pad_r); let n_h = p_h / self.window_size; let n_w = p_w / self.window_size; let xs = xs .reshape((b, n_h, self.window_size, n_w, self.window_size, c))? .transpose(2, 3)? .reshape((b * n_h * n_w, self.window_size * self.window_size, c))?; let xs = self.attn.forward(&xs)?; let xs = xs .reshape((b, n_h, n_w, self.window_size, self.window_size, c))? .transpose(2, 3)? .reshape((b, p_h, p_w, c))?; let xs = if pad_r > 0 { xs.i((.., .., ..w))?.contiguous()? } else { xs }; let xs = if pad_b > 0 { xs.i((.., ..h, ..))?.contiguous()? } else { xs }; xs.reshape((b, l, c))? }; let xs = (xs + res_x)?; let xs = xs .transpose(1, 2)? .reshape((b, c, h, w))? .apply(&self.local_conv)? .reshape((b, c, l))? .transpose(1, 2)?; &xs + self.mlp.forward(&xs)? } } #[derive(Debug)] struct BasicLayer { blocks: Vec<TinyViTBlock>, downsample: Option<PatchMerging>, span: tracing::Span, } impl BasicLayer { #[allow(clippy::too_many_arguments)] fn new( dim: usize, input_resolution: (usize, usize), depth: usize, num_heads: usize, window_size: usize, downsample: bool, out: usize, vb: VarBuilder, ) -> Result<Self> { let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let block = TinyViTBlock::new( dim, input_resolution, num_heads, window_size, vb_b.pp(index), )?; blocks.push(block) } let downsample = if downsample { let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "basic-layer"); Ok(Self { blocks, downsample, span, }) } } impl Module for BasicLayer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for block in self.blocks.iter() { xs = block.forward(&xs)? } match &self.downsample { None => Ok(xs), Some(downsample) => downsample.forward(&xs), } } } #[derive(Debug)] pub struct TinyViT { patch_embed: PatchEmbed, layer0: ConvLayer, layers: Vec<BasicLayer>, // norm_head: candle_nn::LayerNorm, // head: candle_nn::Linear, neck_conv1: candle_nn::Conv2d, neck_ln1: super::LayerNorm2d, neck_conv2: candle_nn::Conv2d, neck_ln2: super::LayerNorm2d, span: tracing::Span, span_neck: tracing::Span, } impl TinyViT { pub fn new( embed_dims: &[usize], depths: &[usize], num_heads: &[usize], window_sizes: &[usize], _num_classes: usize, vb: VarBuilder, ) -> Result<Self> { let patch_embed = PatchEmbed::new(IN_CHANNELS, embed_dims[0], vb.pp("patch_embed"))?; let patches_resolution = IMG_SIZE / 4; let vb_l = vb.pp("layers"); let layer0 = ConvLayer::new( /* dim */ embed_dims[0], /* out */ embed_dims[1], /* input_resolution */ (patches_resolution, patches_resolution), /* depth */ depths[0], /* downsample */ true, /* conv_expand_ratio */ MBCONV_EXPAND_RATIO, vb_l.pp(0), )?; let num_layers = embed_dims.len(); let mut layers = Vec::with_capacity(num_layers - 1); for i_layer in 1..num_layers { let patches_resolution = patches_resolution / (1 << usize::min(i_layer, 2)); let layer = BasicLayer::new( /* dim */ embed_dims[i_layer], /* input_resolution */ (patches_resolution, patches_resolution), /* depth */ depths[i_layer], /* num_heads */ num_heads[i_layer], /* window_size */ window_sizes[i_layer], /* downsample */ i_layer < num_layers - 1, /* out */ embed_dims[usize::min(i_layer + 1, num_layers - 1)], vb_l.pp(i_layer), )?; layers.push(layer) } let last_embed_dim = embed_dims[embed_dims.len() - 1]; // let norm_head = candle_nn::layer_norm(last_embed_dim, 1e-5, vb.pp("norm_head"))?; // let head = candle_nn::linear(last_embed_dim, num_classes, vb.pp("head"))?; let neck_conv1 = candle_nn::conv2d_no_bias(last_embed_dim, 256, 1, Default::default(), vb.pp("neck.0"))?; let neck_ln1 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.1"))?; let cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let neck_conv2 = candle_nn::conv2d_no_bias(256, 256, 3, cfg, vb.pp("neck.2"))?; let neck_ln2 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.3"))?; let span = tracing::span!(tracing::Level::TRACE, "tiny-vit"); let span_neck = tracing::span!(tracing::Level::TRACE, "neck"); Ok(Self { patch_embed, layer0, layers, neck_conv1, neck_ln1, neck_conv2, neck_ln2, span, span_neck, }) } } impl Module for TinyViT { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.patch_embed.forward(xs)?; let mut xs = self.layer0.forward(&xs)?; for layer in self.layers.iter() { xs = layer.forward(&xs)? } let (b, _, c) = xs.dims3()?; let _enter = self.span_neck.enter(); xs.reshape((b, 64, 64, c))? .permute((0, 3, 1, 2))? .apply(&self.neck_conv1)? .apply(&self.neck_ln1)? .apply(&self.neck_conv2)? .apply(&self.neck_ln2) } } pub fn tiny_vit_5m(vb: VarBuilder) -> Result<TinyViT> { TinyViT::new( /* embed_dims */ &[64, 128, 160, 320], /* depths */ &[2, 2, 6, 2], /* num_heads */ &[2, 4, 5, 10], /* window_sizes */ &[7, 7, 14, 7], /* num_classes */ 1000, vb, ) }
candle/candle-transformers/src/models/segment_anything/tiny_vit.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/tiny_vit.rs", "repo_id": "candle", "token_count": 10372 }
62
use candle::{Device, Result, Tensor}; pub fn linspace(start: f64, stop: f64, steps: usize) -> Result<Tensor> { if steps == 0 { Tensor::from_vec(Vec::<f64>::new(), steps, &Device::Cpu) } else if steps == 1 { Tensor::from_vec(vec![start], steps, &Device::Cpu) } else { let delta = (stop - start) / (steps - 1) as f64; let vs = (0..steps) .map(|step| start + step as f64 * delta) .collect::<Vec<_>>(); Tensor::from_vec(vs, steps, &Device::Cpu) } } /// A linear interpolator for a sorted array of x and y values. struct LinearInterpolator<'x, 'y> { xp: &'x [f64], fp: &'y [f64], cache: usize, } impl LinearInterpolator<'_, '_> { fn accel_find(&mut self, x: f64) -> usize { let xidx = self.cache; if x < self.xp[xidx] { self.cache = self.xp[0..xidx].partition_point(|o| *o < x); self.cache = self.cache.saturating_sub(1); } else if x >= self.xp[xidx + 1] { self.cache = self.xp[xidx..self.xp.len()].partition_point(|o| *o < x) + xidx; self.cache = self.cache.saturating_sub(1); } self.cache } fn eval(&mut self, x: f64) -> f64 { if x < self.xp[0] || x > self.xp[self.xp.len() - 1] { return f64::NAN; } let idx = self.accel_find(x); let x_l = self.xp[idx]; let x_h = self.xp[idx + 1]; let y_l = self.fp[idx]; let y_h = self.fp[idx + 1]; let dx = x_h - x_l; if dx > 0.0 { y_l + (x - x_l) / dx * (y_h - y_l) } else { f64::NAN } } } pub fn interp(x: &[f64], xp: &[f64], fp: &[f64]) -> Vec<f64> { let mut interpolator = LinearInterpolator { xp, fp, cache: 0 }; x.iter().map(|&x| interpolator.eval(x)).collect() }
candle/candle-transformers/src/models/stable_diffusion/utils.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/utils.rs", "repo_id": "candle", "token_count": 971 }
63
//! Apply penalty and repeat_kv use candle::{Result, Tensor}; pub fn apply_repeat_penalty(logits: &Tensor, penalty: f32, context: &[u32]) -> Result<Tensor> { let device = logits.device(); let mut logits = logits.to_dtype(candle::DType::F32)?.to_vec1::<f32>()?; let mut already_seen = std::collections::HashSet::new(); for token_id in context { if already_seen.contains(token_id) { continue; } already_seen.insert(token_id); if let Some(logit) = logits.get_mut(*token_id as usize) { if *logit >= 0. { *logit /= penalty } else { *logit *= penalty } } } let logits_len = logits.len(); Tensor::from_vec(logits, logits_len, device) } /// Repeats a key or value tensor for grouped query attention /// The input tensor should have a shape `(batch, num_kv_heads, seq_len, head_dim)`, pub fn repeat_kv(xs: Tensor, n_rep: usize) -> Result<Tensor> { if n_rep == 1 { Ok(xs) } else { let (b_sz, n_kv_head, seq_len, head_dim) = xs.dims4()?; // Using cat is faster than a broadcast as it avoids going through a potentially // strided copy. // https://github.com/huggingface/candle/pull/2043 Tensor::cat(&vec![&xs; n_rep], 2)?.reshape((b_sz, n_kv_head * n_rep, seq_len, head_dim)) } }
candle/candle-transformers/src/utils.rs/0
{ "file_path": "candle/candle-transformers/src/utils.rs", "repo_id": "candle", "token_count": 642 }
64
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use candle_transformers::models::blip; use candle_transformers::models::quantized_blip; use candle_wasm_example_blip::console_log; use candle_wasm_example_blip::token_output_stream::TokenOutputStream; use js_sys::Date; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; enum SelectedModel { M(blip::BlipForConditionalGeneration), Q(quantized_blip::BlipForConditionalGeneration), } impl SelectedModel { fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor, JsError> { match self { Self::M(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), Self::Q(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), } } fn reset_kv_cache(&mut self) { match self { Self::M(m) => m.reset_kv_cache(), Self::Q(m) => m.reset_kv_cache(), } } } #[wasm_bindgen] pub struct Model { model: SelectedModel, tokenizer: TokenOutputStream, } const SEP_TOKEN_ID: u32 = 102; #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, quantized: bool, ) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let tokenizer = TokenOutputStream::new(tokenizer); let config: blip::Config = serde_json::from_slice(&config)?; let device = Device::Cpu; let start = Date::now(); let model: SelectedModel = if quantized { let vb = quantized_blip::VarBuilder::from_gguf_buffer(&weights, &device)?; let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::Q(model) } else { let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, &device)?; let model = blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::M(model) }; console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.); Ok(Self { model, tokenizer }) } #[wasm_bindgen] pub fn generate_caption_from_image(&mut self, image: Vec<u8>) -> Result<String, JsError> { self.model.reset_kv_cache(); let device = Device::Cpu; console_log!("loading image as tensor"); let start = Date::now(); let image: Tensor = self.load_image(image)?.to_device(&device)?; console_log!("image loaded in {:?}s", (Date::now() - start) / 1000.); let start = Date::now(); let image_embeds: Tensor = match &mut self.model { SelectedModel::M(m) => image.unsqueeze(0)?.apply(m.vision_model())?, SelectedModel::Q(m) => image.unsqueeze(0)?.apply(m.vision_model())?, }; console_log!("image embedded in {:?}s", (Date::now() - start) / 1000.); let mut logits_processor = LogitsProcessor::new(299792458, None, None); let mut token_ids = vec![30522u32]; let mut text: String = "".to_string(); let start = Date::now(); for index in 0..1000 { let context_size = if index > 0 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = self.model.text_decoder_forward(&input_ids, &image_embeds)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; if token == SEP_TOKEN_ID { break; } token_ids.push(token); if let Some(t) = self.tokenizer.next_token(token)? { text.push_str(&t); } } if let Some(rest) = self .tokenizer .decode_rest() .map_err(|m| JsError::new(&m.to_string()))? { text.push_str(&rest); } console_log!("caption generated in {:?}s", (Date::now() - start) / 1000.); Ok(text) } } impl Model { fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> { let device = &Device::Cpu; let img = image::ImageReader::new(std::io::Cursor::new(image)) .with_guessed_format()? .decode() .map_err(|e| JsError::new(&e.to_string()))? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), device)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], device)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], device)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) .map_err(|e| JsError::new(&e.to_string())) } } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/blip/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/blip/src/bin/m.rs", "repo_id": "candle", "token_count": 2698 }
65
## Running Yolo Examples Here, we provide two examples of how to run YOLOv8 using a Candle-compiled WASM binary and runtimes. ### Pure Rust UI To build and test the UI made in Rust you will need [Trunk](https://trunkrs.dev/#install) From the `candle-wasm-examples/yolo` directory run: Download assets: ```bash wget -c https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg wget -c https://huggingface.co/lmz/candle-yolo-v8/resolve/main/yolov8s.safetensors ``` Run hot reload server: ```bash trunk serve --release --public-url / --port 8080 ``` ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model, ModelPose } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
candle/candle-wasm-examples/yolo/README.md/0
{ "file_path": "candle/candle-wasm-examples/yolo/README.md", "repo_id": "candle", "token_count": 412 }
66
#![allow(unused)] use candle::{ quantized::{self, k_quants, GgmlDType, GgmlType}, test_utils::to_vec2_round, Device, Module, Result, Tensor, }; use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn quantized_matmul_neg() -> Result<()> { let cpu = &Device::Cpu; let (m, k, n) = (3, 64, 4); let lhs = (0..(m * k)) .map(|v| v as f32 - (m * k) as f32 / 2.0) .collect::<Vec<_>>(); let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?; let mut dst = vec![42.; 3 * 4]; let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8]; let rhs = (0..k * n) .map(|v| v as f32 - (k * n) as f32 / 3.0) .collect::<Vec<_>>(); let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?; k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t)?; k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?; assert_eq!( dst.iter().map(|x| x.round()).collect::<Vec<_>>(), &[ 243524.0, -19596.0, -285051.0, -549815.0, 23777.0, 21651.0, 19398.0, 18367.0, -196472.0, 63012.0, 324585.0, 587902.0 ] ); let mm = tensor_lhs.matmul(&tensor_rhs)?; assert_eq!( to_vec2_round(&mm, 0)?, &[ [244064.0, -20128.0, -284320.0, -548512.0], [23563.0, 21515.0, 19467.0, 17419.0], [-196939.0, 63157.0, 323253.0, 583349.0] ] ); let qtensor = quantized::QTensor::new(quantized::QStorage::Cpu(Box::new(rhs_t)), (4, 64))?; let matmul = quantized::QMatMul::from_qtensor(qtensor)?; let res = matmul.forward(&tensor_lhs)?; assert_eq!( to_vec2_round(&res, 0)?, &[ [243524.0, -19596.0, -285051.0, -549815.0], [23777.0, 21651.0, 19398.0, 18367.0], [-196472.0, 63012.0, 324585.0, 587902.0] ] ); Ok(()) } /// Creates a vector similarly to the one used in GGML unit tests: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L26-L30 fn create_ggml_like_vector(offset: f32) -> Vec<f32> { const GGML_TEST_SIZE: usize = 32 * 128; (0..GGML_TEST_SIZE) .map(|i| 0.1 + 2.0 * (i as f32 + offset).cos()) .collect() } /// Very simple dot product implementation fn vec_dot_reference(a: &[f32], b: &[f32]) -> f32 { a.iter().zip(b).map(|(a, b)| a * b).sum() } /// Returns the error achieved by the GGML matmul unit test. fn ggml_reference_matmul_error(dtype: GgmlDType) -> Result<f32> { let err = match dtype { GgmlDType::F16 => 0.000010, GgmlDType::Q2K => 0.004086, GgmlDType::Q3K => 0.016148, GgmlDType::Q4K => 0.002425, GgmlDType::Q5K => 0.000740, GgmlDType::Q6K => 0.000952, GgmlDType::Q4_0 => 0.001143, GgmlDType::Q4_1 => 0.007784, GgmlDType::Q5_0 => 0.001353, GgmlDType::Q5_1 => 0.001363, GgmlDType::Q8_0 => 0.000092, // Not from the ggml repo. GgmlDType::Q8K => 0.00065, _ => candle::bail!("No GGML results for quantization type {dtype:?}",), }; Ok(err) } /// Mirrores the GGML matmul unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L76-L91 fn ggml_matmul_error_test<T: GgmlType>() -> Result<()> { const GGML_MAX_DOT_PRODUCT_ERROR: f32 = 0.02; let a = create_ggml_like_vector(0.0); let b = create_ggml_like_vector(1.0); let length = a.len(); let mut a_quant = vec![T::zeros(); length / T::BLCK_SIZE]; let mut b_quant = vec![T::VecDotType::zeros(); length / T::VecDotType::BLCK_SIZE]; T::from_float(&a, &mut a_quant)?; T::VecDotType::from_float(&b, &mut b_quant)?; let result = T::vec_dot(length, &a_quant, &b_quant)?; let result_unopt = T::vec_dot_unopt(length, &a_quant, &b_quant)?; let reference_result = vec_dot_reference(&a, &b); if (result - result_unopt).abs() / length as f32 > 1e-6 { candle::bail!( "the opt and unopt vec-dot returned different values, opt {result}, unopt {result_unopt}" ) } let error = (result - reference_result).abs() / length as f32; let ggml_error = ggml_reference_matmul_error(T::DTYPE)?; if !error.is_finite() || error > GGML_MAX_DOT_PRODUCT_ERROR { candle::bail!( "Dot product error {} exceeds max error {}", error, GGML_MAX_DOT_PRODUCT_ERROR ); } // We diverge slightly due to different rounding behavior / f16 to f32 conversions in GGML // => we use a slightly higher error threshold const ERROR_LENIENCY: f32 = 0.00001; if error - ERROR_LENIENCY > ggml_error { candle::bail!( "Dot product error {} exceeds ggml reference error {}", error, ggml_error ); } Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q40() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ4_0>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q50() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ5_0>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q80() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ8_0>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q2k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ2K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q3k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ3K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q4k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ4K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q5k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ5K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q6k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ6K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q8k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ8K>()?; Ok(()) }
candle/candle-wasm-tests/tests/quantized_tests.rs/0
{ "file_path": "candle/candle-wasm-tests/tests/quantized_tests.rs", "repo_id": "candle", "token_count": 3151 }
67
image: repository: huggingface name: chat-ui nodeSelector: role-huggingchat: "true" tolerations: - key: "huggingface.co/huggingchat" operator: "Equal" value: "true" effect: "NoSchedule" serviceAccount: enabled: true create: true name: huggingchat-prod ingress: path: "/chat" annotations: alb.ingress.kubernetes.io/healthcheck-path: "/healthcheck" alb.ingress.kubernetes.io/listen-ports: "[{\"HTTP\": 80}, {\"HTTPS\": 443}]" alb.ingress.kubernetes.io/group.name: "hub-prod" alb.ingress.kubernetes.io/scheme: "internet-facing" alb.ingress.kubernetes.io/ssl-redirect: "443" alb.ingress.kubernetes.io/tags: "Env=prod,Project=hub,Terraform=true" alb.ingress.kubernetes.io/target-node-labels: "role-hub-utils=true" kubernetes.io/ingress.class: "alb" ingressInternal: enabled: true path: "/chat" annotations: alb.ingress.kubernetes.io/group.name: hub-prod-internal-public alb.ingress.kubernetes.io/healthcheck-path: "/healthcheck" alb.ingress.kubernetes.io/listen-ports: "[{\"HTTP\": 80}, {\"HTTPS\": 443}]" alb.ingress.kubernetes.io/load-balancer-name: hub-prod-internal-public alb.ingress.kubernetes.io/target-group-attributes: deregistration_delay.timeout_seconds=30 alb.ingress.kubernetes.io/target-node-labels: role-hub-lb=true alb.ingress.kubernetes.io/target-type: ip kubernetes.io/ingress.class: "alb" envVars: ADDRESS_HEADER: 'X-Forwarded-For' ADMIN_CLI_LOGIN: "false" ALTERNATIVE_REDIRECT_URLS: '["huggingchat://login/callback"]' APP_BASE: "/chat" ALLOW_IFRAME: "false" COMMUNITY_TOOLS: "true" COOKIE_SAMESITE: "lax" COOKIE_SECURE: "true" ENABLE_ASSISTANTS: "true" ENABLE_ASSISTANTS_RAG: "true" ENABLE_CONFIG_MANAGER: "false" METRICS_PORT: 5565 LOG_LEVEL: "debug" METRICS_ENABLED: "true" MODELS: > [ { "name": "meta-llama/Llama-3.3-70B-Instruct", "id": "meta-llama/Llama-3.3-70B-Instruct", "description": "Ideal for everyday use. A fast and extremely capable model matching closed source models' capabilities. Now with the latest Llama 3.3 weights!", "modelUrl": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct", "websiteUrl": "https://llama.meta.com/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/meta-logo.png", "tools": true, "preprompt": "", "parameters": { "stop": ["<|endoftext|>", "<|eot_id|>"], "temperature": 0.6, "max_new_tokens": 1024, "truncate": 7167 }, "endpoints": [{"type" : "inference-client"}], "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ] }, { "name": "Qwen/Qwen3-235B-A22B", "description": "Qwen's flagship model featuring optional reasoning. Exceptional performance with benchmarks rivaling R1 and o1.", "modelUrl": "https://huggingface.co/Qwen/Qwen3-235B-A22B", "websiteUrl": "https://qwenlm.github.io/blog/qwen3/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/qwen-logo.png", "preprompt": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", "reasoning": { "type": "tokens", "beginToken": "<think>", "endToken": "</think>" }, "parameters": { "stop": ["<|endoftext|>", "<|im_end|>"], "temperature": 0.6, }, "tools": true, "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12) /nothink" }, { "title": "Build a website", "prompt": "Generate a snazzy static landing page for a local coffee shop using HTML and CSS. You can use tailwind using <script src='https://cdn.tailwindcss.com'></script>." }, { "title": "Larger number", "prompt": "9.11 or 9.9 which number is larger?" }, ], "endpoints": [ { "type": "inference-client", "baseURL": "https://api-inference.endpoints.huggingface.tech/models/Qwen/Qwen3-235B-A22B/v1" } ] }, { "name": "Qwen/Qwen2.5-72B-Instruct", "description": "The latest Qwen open model with improved role-playing, long text generation and structured data understanding.", "modelUrl": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", "websiteUrl": "https://qwenlm.github.io/blog/qwen2.5/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/qwen-logo.png", "preprompt": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", "parameters": { "stop": ["<|endoftext|>", "<|im_end|>"], "temperature": 0.6, "truncate": 28672, "max_new_tokens": 3072 }, "tools": true, "endpoints": [{"type" : "inference-client"}], "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ] }, { "name": "CohereLabs/c4ai-command-r-plus-08-2024", "description": "Cohere's largest language model, optimized for conversational interaction and tool use. Now with the 2024 update!", "modelUrl": "https://huggingface.co/CohereLabs/c4ai-command-r-plus-08-2024", "websiteUrl": "https://docs.cohere.com/docs/command-r-plus", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/cohere-logo.png", "tools": true, "systemRoleSupported": false, "parameters": { "stop": ["<|END_OF_TURN_TOKEN|>", "<|im_end|>"], "truncate": 28672, "max_new_tokens": 2048, "temperature": 0.3 }, "endpoints": [{"type" : "inference-client"}], "promptExamples": [ { "title": "Generate image", "prompt": "Generate the portrait of a scientific mouse in its laboratory." }, { "title": "Review code", "prompt": "Review this pull request: https://github.com/huggingface/chat-ui/pull/1131/files" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." } ] }, { "name": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "modelUrl": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "websiteUrl": "https://deepseek.com/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/deepseek-logo.png", "description": "The first reasoning model from DeepSeek, distilled into a 32B dense model. Outperforms o1-mini on multiple benchmarks.", "reasoning": { "type": "tokens", "beginToken": "", "endToken": "</think>" }, "tools": true, "promptExamples": [ { "title": "Rs in strawberry", "prompt": "how many R in strawberry?" }, { "title": "Larger number", "prompt": "9.11 or 9.9 which number is larger?" }, { "title": "Measuring 6 liters", "prompt": "I have a 6- and a 12-liter jug. I want to measure exactly 6 liters." } ], "endpoints": [ { "type": "inference-client", "baseURL": "https://b8xf586h164t4vk7.us-east-1.aws.endpoints.huggingface.cloud/v1" } ] }, { "name": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "modelUrl": "https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "websiteUrl": "https://www.nvidia.com/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/nvidia-logo.png", "description": "Nvidia's latest Llama fine-tune, topping alignment benchmarks and optimized for instruction following.", "parameters": { "stop": ["<|eot_id|>", "<|im_end|>"], "temperature": 0.5, "truncate": 28672, "max_new_tokens": 2048 }, "promptExamples": [ { "title": "Rs in strawberry", "prompt": "how many R in strawberry?" }, { "title": "Larger number", "prompt": "9.11 or 9.9 which number is larger?" }, { "title": "Measuring 6 liters", "prompt": "I have a 6- and a 12-liter jug. I want to measure exactly 6 liters." } ], "endpoints": [ { "type": "inference-client", "baseURL": "https://api-inference.endpoints.huggingface.tech/models/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF/v1" } ] }, { "name": "Qwen/QwQ-32B", "tools": true, "preprompt": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.", "modelUrl": "https://huggingface.co/Qwen/QwQ-32B", "websiteUrl": "https://qwenlm.github.io/blog/qwq-32b/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/qwen-logo.png", "description": "QwQ is the latest reasoning model released by the Qwen team, approaching the capabilities of R1 in benchmarks.", "reasoning": { "type": "tokens", "beginToken": "", "endToken": "</think>" }, "promptExamples": [ { "title": "Rs in strawberry", "prompt": "how many R in strawberry?" }, { "title": "Larger number", "prompt": "9.11 or 9.9 which number is larger?" }, { "title": "Measuring 6 liters", "prompt": "I have a 6- and a 12-liter jug. I want to measure exactly 6 liters." } ], "endpoints": [ { "type": "inference-client", } ] }, { "name": "google/gemma-3-27b-it", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/google-logo.png", "multimodal": true, "tools": true, "systemRoleSupported" : false, "description": "Google's latest open model with great multilingual performance, supports image inputs natively.", "websiteUrl": "https://blog.google/technology/developers/gemma-3/", "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ], "endpoints": [ { "type": "inference-client", "baseURL": "https://wp0d3hn6s3k8jk22.us-east-1.aws.endpoints.huggingface.cloud/v1", "multimodal": { "image": { "maxSizeInMB": 10, "maxWidth": 560, "maxHeight": 560, "supportedMimeTypes": ["image/jpeg", "image/png", "image/webp"], "preferredMimeType": "image/webp" } } } ] }, { "name": "mistralai/Mistral-Small-3.1-24B-Instruct-2503", "tools": true, "displayName": "mistralai/Mistral-Small-3.1-24B-Instruct-2503", "description": "A small model with good capabilities in language understanding and commonsense reasoning.", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png", "websiteUrl": "https://mistral.ai/news/mistral-nemo/", "modelUrl": "https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503", "preprompt": "", "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ], "endpoints": [ { "type": "inference-client", "baseURL": "https://hkjfqcryevvq9cie.us-east-1.aws.endpoints.huggingface.cloud/v1" } ] }, { "name": "Qwen/Qwen2.5-VL-32B-Instruct", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/qwen-logo.png", "description": "The latest multimodal model from Qwen! Supports image inputs natively.", "websiteUrl": "https://qwenlm.github.io/blog/qwen2.5-vl/", "modelUrl": "https://huggingface.co/Qwen/Qwen2.5-VL-32B-Instruct", "multimodal": true, "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ], "endpoints": [ { "type": "inference-client", "multimodal": { "image": { "maxSizeInMB": 10, "maxWidth": 1024, "maxHeight": 1024, "supportedMimeTypes": ["image/png", "image/jpeg", "image/webp"], "preferredMimeType": "image/webp" } } } ] }, { "name": "microsoft/Phi-4", "tools": true, "systemRoleSupported": false, "description": "One of the best small models, super fast for simple tasks.", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/microsoft-logo.png", "modelUrl": "https://huggingface.co/microsoft/Phi-4", "websiteUrl": "https://techcommunity.microsoft.com/blog/aiplatformblog/introducing-phi-4-microsoft%E2%80%99s-newest-small-language-model-specializing-in-comple/4357090", "preprompt": "", "parameters": { "stop": ["<|end|>", "<|endoftext|>", "<|assistant|>"], "temperature": 0.6, "truncate": 28672, "max_new_tokens": 3072 }, "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ], "endpoints": [ { "type": "inference-client", "baseURL": "https://up5ijetg6a2e9zlb.us-east-1.aws.endpoints.huggingface.cloud/v1" } ] }, { "name": "NousResearch/Hermes-3-Llama-3.1-8B", "description": "Nous Research's latest Hermes 3 release in 8B size. Follows instruction closely.", "tools": true, "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/nous-logo.png", "websiteUrl": "https://nousresearch.com/", "modelUrl": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B", "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ], "parameters": { "stop": ["<|im_end|>"], "temperature": 0.6, "truncate": 14336, "max_new_tokens": 1536 }, "endpoints": [{"type" : "inference-client"}] } ] NODE_ENV: "prod" NODE_LOG_STRUCTURED_DATA: true OLD_MODELS: > [ { "name": "bigcode/starcoder" }, { "name": "OpenAssistant/oasst-sft-6-llama-30b-xor" }, { "name": "HuggingFaceH4/zephyr-7b-alpha" }, { "name": "openchat/openchat_3.5" }, { "name": "openchat/openchat-3.5-1210" }, { "name": "tiiuae/falcon-180B-chat" }, { "name": "codellama/CodeLlama-34b-Instruct-hf" }, { "name": "google/gemma-7b-it" }, { "name": "meta-llama/Llama-2-70b-chat-hf" }, { "name": "codellama/CodeLlama-70b-Instruct-hf" }, { "name": "openchat/openchat-3.5-0106" }, { "name": "meta-llama/Meta-Llama-3-70B-Instruct" }, { "name": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8" }, { "name": "CohereForAI/c4ai-command-r-plus-08-2024", "transferTo": "CohereLabs/c4ai-command-r-plus-08-2024" }, { "name": "CohereForAI/c4ai-command-r-plus", "transferTo": "CohereLabs/c4ai-command-r-plus-08-2024" }, { "name": "01-ai/Yi-1.5-34B-Chat", "transferTo": "CohereLabs/c4ai-command-r-plus-08-2024" }, { "name": "mistralai/Mixtral-8x7B-Instruct-v0.1", "transferTo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503" }, { "name": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "transferTo": "NousResearch/Hermes-3-Llama-3.1-8B" }, { "name": "mistralai/Mistral-7B-Instruct-v0.3", "transferTo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503" }, { "name": "microsoft/Phi-3-mini-4k-instruct", "transferTo": "microsoft/Phi-4" }, { "name": "meta-llama/Meta-Llama-3.1-70B-Instruct", "transferTo": "meta-llama/Llama-3.3-70B-Instruct" }, { "name": "Qwen/QwQ-32B-Preview", "transferTo": "Qwen/QwQ-32B" }, { "name": "mistralai/Mistral-Nemo-Instruct-2407", "transferTo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503" }, { "name": "microsoft/Phi-3.5-mini-instruct", "transferTo": "microsoft/Phi-4" }, { "name": "Qwen/Qwen2.5-Coder-32B-Instruct", "transferTo": "Qwen/QwQ-32B" }, { "name": "meta-llama/Llama-3.2-11B-Vision-Instruct", "transferTo" : "Qwen/Qwen2.5-VL-32B-Instruct" } ] PUBLIC_ORIGIN: "https://huggingface.co" PUBLIC_SHARE_PREFIX: "https://hf.co/chat" PUBLIC_ANNOUNCEMENT_BANNERS: > [ { "title": "Qwen 3 235B is available!", "linkTitle": "Try it out!", "linkHref": "https://huggingface.co/chat/models/Qwen/Qwen3-235B-A22B" } ] PUBLIC_APP_NAME: "HuggingChat" PUBLIC_APP_ASSETS: "huggingchat" PUBLIC_APP_COLOR: "yellow" PUBLIC_APP_DESCRIPTION: "Making the community's best AI chat models available to everyone." PUBLIC_APP_DISCLAIMER_MESSAGE: "Disclaimer: AI is an area of active research with known problems such as biased generation and misinformation. Do not use this application for high-stakes decisions or advice." PUBLIC_APP_GUEST_MESSAGE: "Sign in with a free Hugging Face account to continue using HuggingChat." PUBLIC_APP_DATA_SHARING: 0 PUBLIC_APP_DISCLAIMER: 1 PUBLIC_PLAUSIBLE_SCRIPT_URL: "/js/script.js" REQUIRE_FEATURED_ASSISTANTS: "true" TASK_MODEL: > { "name": "NousResearch/Hermes-3-Llama-3.1-8B", "unlisted": true, "endpoints": [{"type" : "inference-client"}], "parameters": { "temperature": 0.1, "max_new_tokens": 256 } } TEXT_EMBEDDING_MODELS: > [{ "name": "bge-base-en-v1-5-sxa", "displayName": "bge-base-en-v1-5-sxa", "chunkCharLength": 512, "endpoints": [{ "type": "tei", "url": "https://huggingchat-tei.hf.space/" }] }] WEBSEARCH_BLOCKLIST: '["youtube.com", "twitter.com"]' XFF_DEPTH: '2' TOOLS: > [ { "_id": "000000000000000000000001", "displayName": "Image Generation", "description": "Use this tool to generate images based on a prompt.", "color": "yellow", "icon": "camera", "baseUrl": "black-forest-labs/FLUX.1-schnell", "name": "image_generation", "endpoint": "/infer", "inputs": [ { "name": "prompt", "description": "A prompt to generate an image from", "paramType": "required", "type": "str" }, { "name": "seed", "paramType": "fixed", "value": "0", "type": "float" }, { "name": "randomize_seed", "paramType": "fixed", "value": "true", "type": "bool" }, { "name": "width", "description": "numeric value between 256 and 2048", "paramType": "optional", "default": 1024, "type": "float" }, { "name": "height", "description": "numeric value between 256 and 2048", "paramType": "optional", "default": 1024, "type": "float" }, { "name": "num_inference_steps", "paramType": "fixed", "value": "4", "type": "float" } ], "outputComponent": "image", "outputComponentIdx": 0, "showOutput": true }, { "_id": "000000000000000000000002", "displayName": "Document Parser", "description": "Use this tool to parse any document and get its content in markdown format.", "color": "yellow", "icon": "cloud", "baseUrl": "huggingchat/document-parser", "name": "document_parser", "endpoint": "/predict", "inputs": [ { "name": "document", "description": "Filename of the document to parse", "paramType": "required", "type": "file", "mimeTypes": 'application/*' }, { "name": "filename", "paramType": "fixed", "value": "document.pdf", "type": "str" } ], "outputComponent": "textbox", "outputComponentIdx": 0, "showOutput": false, "isHidden": true }, { "_id": "000000000000000000000003", "name": "edit_image", "baseUrl": "multimodalart/cosxl", "endpoint": "/run_edit", "inputs": [ { "name": "image", "description": "The image path to be edited", "paramType": "required", "type": "file", "mimeTypes": 'image/*' }, { "name": "prompt", "description": "The prompt with which to edit the image", "paramType": "required", "type": "str" }, { "name": "negative_prompt", "paramType": "fixed", "value": "", "type": "str" }, { "name": "guidance_scale", "paramType": "fixed", "value": 6.5, "type": "float" }, { "name": "steps", "paramType": "fixed", "value": 30, "type": "float" } ], "outputComponent": "image", "showOutput": true, "displayName": "Image Editor", "color": "green", "icon": "camera", "description": "This tool lets you edit images", "outputComponentIdx": 0 } ] HF_ORG_ADMIN: '644171cfbd0c97265298aa99' HF_ORG_EARLY_ACCESS: '5e67bd5b1009063689407478' HF_API_ROOT: 'https://api-inference.endpoints.huggingface.tech/models' infisical: enabled: true env: "prod-us-east-1" autoscaling: enabled: true minReplicas: 2 maxReplicas: 30 targetMemoryUtilizationPercentage: "50" targetCPUUtilizationPercentage: "50" resources: requests: cpu: 2 memory: 4Gi limits: cpu: 4 memory: 8Gi monitoring: enabled: true
chat-ui/chart/env/prod.yaml/0
{ "file_path": "chat-ui/chart/env/prod.yaml", "repo_id": "chat-ui", "token_count": 13215 }
68
# Metrics The server can expose prometheus metrics on port `5565` but is off by default. You may enable the metrics server with `METRICS_ENABLED=true` and change the port with `METRICS_PORT=1234`. <Tip> In development with `npm run dev`, the metrics server does not shutdown gracefully due to Sveltekit not providing hooks for restart. It's recommended to disable the metrics server in this case. </Tip>
chat-ui/docs/source/configuration/metrics.md/0
{ "file_path": "chat-ui/docs/source/configuration/metrics.md", "repo_id": "chat-ui", "token_count": 111 }
69
# Theming You can use a few environment variables to customize the look and feel of Chat UI. These are by default: ```ini PUBLIC_APP_NAME=ChatUI PUBLIC_APP_ASSETS=chatui PUBLIC_APP_COLOR=blue PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone." PUBLIC_APP_DATA_SHARING= PUBLIC_APP_DISCLAIMER= ``` - `PUBLIC_APP_NAME` The name used as a title throughout the app. - `PUBLIC_APP_ASSETS` Is used to find logos & favicons in `static/$PUBLIC_APP_ASSETS`, current options are `chatui` and `huggingchat`. - `PUBLIC_APP_COLOR` Can be any of the [tailwind colors](https://tailwindcss.com/docs/customizing-colors#default-color-palette). - `PUBLIC_APP_DATA_SHARING` Can be set to 1 to add a toggle in the user settings that lets your users opt-in to data sharing with models creator. - `PUBLIC_APP_DISCLAIMER` If set to 1, we show a disclaimer about generated outputs on login.
chat-ui/docs/source/configuration/theming.md/0
{ "file_path": "chat-ui/docs/source/configuration/theming.md", "repo_id": "chat-ui", "token_count": 286 }
70
<script lang="ts"> import CopyToClipBoardBtn from "./CopyToClipBoardBtn.svelte"; import DOMPurify from "isomorphic-dompurify"; interface Props { code?: string; rawCode?: string; } let { code = "", rawCode = "" }: Props = $props(); </script> <div class="group relative my-4 rounded-lg"> <pre class="scrollbar-custom overflow-auto px-5 font-mono scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20"><code ><!-- eslint-disable svelte/no-at-html-tags -->{@html DOMPurify.sanitize(code)}</code ></pre> <CopyToClipBoardBtn classNames="btn rounded-lg border border-gray-200 px-2 py-2 text-sm shadow-sm transition-all hover:border-gray-300 active:shadow-inner dark:border-gray-700 dark:hover:border-gray-500 absolute top-2 right-2 invisible opacity-0 group-hover:visible group-hover:opacity-100 dark:text-gray-700 text-gray-200" value={rawCode} /> </div>
chat-ui/src/lib/components/CodeBlock.svelte/0
{ "file_path": "chat-ui/src/lib/components/CodeBlock.svelte", "repo_id": "chat-ui", "token_count": 350 }
71
<script lang="ts"> import CarbonCaretLeft from "~icons/carbon/caret-left"; import CarbonCaretRight from "~icons/carbon/caret-right"; interface Props { href: string; direction: "next" | "previous"; isDisabled?: boolean; } let { href, direction, isDisabled = false }: Props = $props(); </script> <a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 {isDisabled ? 'pointer-events-none opacity-50' : ''}" {href} > {#if direction === "previous"} <CarbonCaretLeft classNames="mr-1.5" /> Previous {:else} Next <CarbonCaretRight classNames="ml-1.5" /> {/if} </a>
chat-ui/src/lib/components/PaginationArrow.svelte/0
{ "file_path": "chat-ui/src/lib/components/PaginationArrow.svelte", "repo_id": "chat-ui", "token_count": 254 }
72
<script lang="ts"> import type { Message } from "$lib/types/Message"; import CarbonTrashCan from "~icons/carbon/trash-can"; import CarbonChevronLeft from "~icons/carbon/chevron-left"; import CarbonChevronRight from "~icons/carbon/chevron-right"; import { createEventDispatcher } from "svelte"; import { page } from "$app/state"; import { error } from "$lib/stores/errors"; import { invalidate } from "$app/navigation"; import { UrlDependency } from "$lib/types/UrlDependency"; import { handleResponse, useAPIClient } from "$lib/APIClient"; interface Props { message: Message; alternatives?: Message["id"][]; loading?: boolean; } let { message, alternatives = [], loading = false }: Props = $props(); let currentIdx = $derived(alternatives.findIndex((id) => id === message.id)); const dispatch = createEventDispatcher<{ showAlternateMsg: { id: Message["id"] }; }>(); const client = useAPIClient(); </script> <div class="font-white group/navbranch z-0 -mt-1 ml-3.5 mr-auto flex h-6 w-fit select-none flex-row items-center justify-center gap-1 text-sm" > <button class="inline text-lg font-thin text-gray-400 hover:text-gray-800 disabled:pointer-events-none disabled:opacity-25 dark:text-gray-500 dark:hover:text-gray-200" onclick={() => dispatch("showAlternateMsg", { id: alternatives[Math.max(0, currentIdx - 1)] })} disabled={currentIdx === 0 || loading} > <CarbonChevronLeft class="text-sm" /> </button> <span class=" text-gray-400 dark:text-gray-500"> {currentIdx + 1} / {alternatives.length} </span> <button class="inline text-lg font-thin text-gray-400 hover:text-gray-800 disabled:pointer-events-none disabled:opacity-25 dark:text-gray-500 dark:hover:text-gray-200" onclick={() => dispatch("showAlternateMsg", { id: alternatives[Math.min(alternatives.length - 1, currentIdx + 1)], })} disabled={currentIdx === alternatives.length - 1 || loading} > <CarbonChevronRight class="text-sm" /> </button> {#if !loading && message.children} <button class="hidden group-hover/navbranch:block" onclick={() => { if (confirm("Are you sure you want to delete this branch?")) { client .conversations({ id: page.params.id }) .message({ messageId: message.id }) .delete() .then(handleResponse) .then(async () => { await invalidate(UrlDependency.Conversation); }) .catch((err) => { console.error(err); $error = String(err); }); } }} > <div class="flex items-center justify-center text-xs text-gray-400 hover:text-gray-800 dark:text-gray-500 dark:hover:text-gray-200" > <CarbonTrashCan /> </div> </button> {/if} </div>
chat-ui/src/lib/components/chat/Alternatives.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/Alternatives.svelte", "repo_id": "chat-ui", "token_count": 1033 }
73
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg class={classNames} xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" > <path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)" /> <path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)" /><rect fill="none" width="32" height="32" /> </svg>
chat-ui/src/lib/components/icons/IconCopy.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconCopy.svelte", "repo_id": "chat-ui", "token_count": 324 }
74
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import type { Semaphores } from "$lib/types/Semaphore"; /** * Returns the lock id if the lock was acquired, false otherwise */ export async function acquireLock(key: Semaphores): Promise<ObjectId | false> { try { const id = new ObjectId(); const insert = await collections.semaphores.insertOne({ _id: id, key, createdAt: new Date(), updatedAt: new Date(), deleteAt: new Date(Date.now() + 1000 * 60 * 3), // 3 minutes }); return insert.acknowledged ? id : false; // true if the document was inserted } catch (e) { // unique index violation, so there must already be a lock return false; } } export async function releaseLock(key: Semaphores, lockId: ObjectId) { await collections.semaphores.deleteOne({ _id: lockId, key, }); } export async function isDBLocked(key: Semaphores): Promise<boolean> { const res = await collections.semaphores.countDocuments({ key, }); return res > 0; } export async function refreshLock(key: Semaphores, lockId: ObjectId): Promise<boolean> { const result = await collections.semaphores.updateOne( { _id: lockId, key, }, { $set: { updatedAt: new Date(), deleteAt: new Date(Date.now() + 1000 * 60 * 3), // 3 minutes }, } ); return result.matchedCount > 0; }
chat-ui/src/lib/migrations/lock.ts/0
{ "file_path": "chat-ui/src/lib/migrations/lock.ts", "repo_id": "chat-ui", "token_count": 475 }
75
import { config } from "$lib/server/config"; import type { Session } from "$lib/types/Session"; import { logger } from "./logger"; import { v4 } from "uuid"; class AdminTokenManager { private token = config.ADMIN_TOKEN || v4(); // contains all session ids that are currently admin sessions private adminSessions: Array<Session["sessionId"]> = []; public get enabled() { // if open id is configured, disable the feature return config.ADMIN_CLI_LOGIN === "true"; } public isAdmin(sessionId: Session["sessionId"]) { if (!this.enabled) return false; return this.adminSessions.includes(sessionId); } public checkToken(token: string, sessionId: Session["sessionId"]) { if (!this.enabled) return false; if (token === this.token) { logger.info(`[ADMIN] Token validated`); this.adminSessions.push(sessionId); this.token = config.ADMIN_TOKEN || v4(); return true; } return false; } public removeSession(sessionId: Session["sessionId"]) { this.adminSessions = this.adminSessions.filter((id) => id !== sessionId); } public displayToken() { // if admin token is set, don't display it if (!this.enabled || config.ADMIN_TOKEN) return; let port = process.argv.includes("--port") ? parseInt(process.argv[process.argv.indexOf("--port") + 1]) : undefined; if (!port) { const mode = process.argv.find((arg) => arg === "preview" || arg === "dev"); if (mode === "preview") { port = 4173; } else if (mode === "dev") { port = 5173; } else { port = 3000; } } const url = (config.PUBLIC_ORIGIN || `http://localhost:${port}`) + "?token="; logger.info(`[ADMIN] You can login with ${url + this.token}`); } } export const adminTokenManager = new AdminTokenManager();
chat-ui/src/lib/server/adminToken.ts/0
{ "file_path": "chat-ui/src/lib/server/adminToken.ts", "repo_id": "chat-ui", "token_count": 625 }
76
import { z } from "zod"; import type { EmbeddingEndpoint } from "../embeddingEndpoints"; import type { Tensor, FeatureExtractionPipeline } from "@huggingface/transformers"; import { pipeline } from "@huggingface/transformers"; export const embeddingEndpointTransformersJSParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("transformersjs"), }); // Use the Singleton pattern to enable lazy construction of the pipeline. class TransformersJSModelsSingleton { static instances: Array<[string, Promise<FeatureExtractionPipeline>]> = []; static async getInstance(modelName: string): Promise<FeatureExtractionPipeline> { const modelPipelineInstance = this.instances.find(([name]) => name === modelName); if (modelPipelineInstance) { const [, modelPipeline] = modelPipelineInstance; // dispose of the previous pipeline to clear memory await (await modelPipeline).dispose(); this.instances = this.instances.filter(([name]) => name !== modelName); } const newModelPipeline = pipeline("feature-extraction", modelName); this.instances.push([modelName, newModelPipeline]); return newModelPipeline; } } export async function calculateEmbedding(modelName: string, inputs: string[]) { const extractor = await TransformersJSModelsSingleton.getInstance(modelName); const output: Tensor = await extractor(inputs, { pooling: "mean", normalize: true }); return output.tolist(); } export function embeddingEndpointTransformersJS( input: z.input<typeof embeddingEndpointTransformersJSParametersSchema> ): EmbeddingEndpoint { const { model } = embeddingEndpointTransformersJSParametersSchema.parse(input); return async ({ inputs }) => { return calculateEmbedding(model.name, inputs); }; }
chat-ui/src/lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 542 }
77
import { config } from "$lib/server/config"; import { buildPrompt } from "$lib/buildPrompt"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { z } from "zod"; import { logger } from "$lib/server/logger"; export const endpointLlamacppParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("llamacpp"), url: z.string().url().default("http://127.0.0.1:8080"), // legacy, feel free to remove in breaking change update baseURL: z.string().url().optional(), accessToken: z.string().default(config.HF_TOKEN ?? config.HF_ACCESS_TOKEN), }); export function endpointLlamacpp( input: z.input<typeof endpointLlamacppParametersSchema> ): Endpoint { const { baseURL, url, model } = endpointLlamacppParametersSchema.parse(input); return async ({ messages, preprompt, continueMessage, generateSettings }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const parameters = { ...model.parameters, ...generateSettings }; const r = await fetch(`${baseURL ?? url}/completion`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ prompt, stream: true, temperature: parameters.temperature, top_p: parameters.top_p, top_k: parameters.top_k, stop: parameters.stop, repeat_penalty: parameters.repetition_penalty, n_predict: parameters.max_new_tokens, cache_prompt: true, }), }); if (!r.ok) { throw new Error(`Failed to generate text: ${await r.text()}`); } const encoder = new TextDecoderStream(); const reader = r.body?.pipeThrough(encoder).getReader(); return (async function* () { let stop = false; let generatedText = ""; let tokenId = 0; let accumulatedData = ""; // Buffer to accumulate data chunks while (!stop) { // Read the stream and log the outputs to console const out = (await reader?.read()) ?? { done: false, value: undefined }; // If it's done, we cancel if (out.done) { reader?.cancel(); return; } if (!out.value) { return; } // Accumulate the data chunk accumulatedData += out.value; // Process each complete JSON object in the accumulated data while (accumulatedData.includes("\n")) { // Assuming each JSON object ends with a newline const endIndex = accumulatedData.indexOf("\n"); let jsonString = accumulatedData.substring(0, endIndex).trim(); // Remove the processed part from the buffer accumulatedData = accumulatedData.substring(endIndex + 1); if (jsonString.startsWith("data: ")) { jsonString = jsonString.slice(6); let data = null; try { data = JSON.parse(jsonString); } catch (e) { logger.error(e, "Failed to parse JSON"); logger.error(jsonString, "Problematic JSON string:"); continue; // Skip this iteration and try the next chunk } // Handle the parsed data if (data.content || data.stop) { generatedText += data.content; const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: data.content ?? "", logprob: 0, special: false, }, generated_text: data.stop ? generatedText : null, details: null, }; if (data.stop) { stop = true; output.token.special = true; reader?.cancel(); } yield output; } } } } })(); }; } export default endpointLlamacpp;
chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts", "repo_id": "chat-ui", "token_count": 1446 }
78
import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint"; import { taskModel } from "../models"; import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator"; import { getToolOutput } from "../tools/getToolOutput"; import type { Tool } from "$lib/types/Tool"; import { logger } from "../logger"; export async function generateSummaryOfReasoning(buffer: string): Promise<string> { let summary: string | undefined; const messages = [ { from: "user" as const, content: buffer.slice(-300), }, ]; const preprompt = `You are tasked with submitting a summary of the latest reasoning steps into a tool. Never describe results of the reasoning, only the process. Remain vague in your summary. The text might be incomplete, try your best to summarize it in one very short sentence, starting with a gerund and ending with three points. The sentence must be very short, ideally 5 words or less.`; if (taskModel.tools) { const summaryTool = { name: "summary", description: "Submit a summary for the submitted text", inputs: [ { name: "summary", type: "str", description: "The short summary of the reasoning steps. 5 words or less. Must start with a gerund.", paramType: "required", }, ], } as unknown as Tool; const endpoint = await taskModel.getEndpoint(); summary = await getToolOutput({ messages, preprompt, tool: summaryTool, endpoint, }).catch(() => { logger.warn("Error getting tool output"); return undefined; }); } if (!summary) { summary = await getReturnFromGenerator( generateFromDefaultEndpoint({ messages: [ { from: "user", content: buffer.slice(-300), }, ], preprompt: `You are tasked with summarizing the latest reasoning steps. Never describe results of the reasoning, only the process. Remain vague in your summary. The text might be incomplete, try your best to summarize it in one very short sentence, starting with a gerund and ending with three points. Example: "Thinking about life...", "Summarizing the results...", "Processing the input..."`, generateSettings: { max_new_tokens: 50, }, }) ); } if (!summary) { return "Reasoning..."; } const parts = summary.split("..."); return parts[0].slice(0, 100) + "..."; }
chat-ui/src/lib/server/textGeneration/reasoning.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/reasoning.ts", "repo_id": "chat-ui", "token_count": 781 }
79
import { collapseString, sanitizeString } from "./utils/nlp"; import { stringifyHTMLElements, stringifyHTMLElementsUnformatted } from "./utils/stringify"; import { MarkdownElementType, tagNameMap, type HeaderElement, type MarkdownElement } from "./types"; import type { SerializedHTMLElement } from "../scrape/types"; interface ConversionState { defaultType: | MarkdownElementType.Paragraph | MarkdownElementType.BlockQuote | MarkdownElementType.UnorderedListItem | MarkdownElementType.OrderedListItem; listDepth: number; blockQuoteDepth: number; } export function htmlElementToMarkdownElements( parent: HeaderElement, elem: SerializedHTMLElement | string, prevState: ConversionState = { defaultType: MarkdownElementType.Paragraph, listDepth: 0, blockQuoteDepth: 0, } ): MarkdownElement | MarkdownElement[] { // Found text so create an element based on the previous state if (typeof elem === "string") { if (elem.trim().length === 0) return []; if ( prevState.defaultType === MarkdownElementType.UnorderedListItem || prevState.defaultType === MarkdownElementType.OrderedListItem ) { return { parent, type: prevState.defaultType, content: elem, depth: prevState.listDepth, }; } if (prevState.defaultType === MarkdownElementType.BlockQuote) { return { parent, type: prevState.defaultType, content: elem, depth: prevState.blockQuoteDepth, }; } return { parent, type: prevState.defaultType, content: elem }; } const type = tagNameMap[elem.tagName] ?? MarkdownElementType.Paragraph; // Update the state based on the current element const state: ConversionState = { ...prevState }; if (type === MarkdownElementType.UnorderedList || type === MarkdownElementType.OrderedList) { state.listDepth += 1; state.defaultType = type === MarkdownElementType.UnorderedList ? MarkdownElementType.UnorderedListItem : MarkdownElementType.OrderedListItem; } if (type === MarkdownElementType.BlockQuote) { state.defaultType = MarkdownElementType.BlockQuote; state.blockQuoteDepth += 1; } // Headers if (type === MarkdownElementType.Header) { return { parent, type, level: Number(elem.tagName[1]), content: collapseString(stringifyHTMLElements(elem.content)), children: [], }; } // Code blocks if (type === MarkdownElementType.CodeBlock) { return { parent, type, content: sanitizeString(stringifyHTMLElementsUnformatted(elem.content)), }; } // Typical case, we want to flatten the DOM and only create elements when we see text return elem.content.flatMap((el) => htmlElementToMarkdownElements(parent, el, state)); } export function mergeAdjacentElements(elements: MarkdownElement[]): MarkdownElement[] { return elements.reduce<MarkdownElement[]>((acc, elem) => { const last = acc[acc.length - 1]; if (last && last.type === MarkdownElementType.Paragraph && last.type === elem.type) { last.content += elem.content; return acc; } return [...acc, elem]; }, []); }
chat-ui/src/lib/server/websearch/markdown/fromHtml.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/fromHtml.ts", "repo_id": "chat-ui", "token_count": 1033 }
80
import { config } from "$lib/server/config"; import { isURL } from "$lib/utils/isUrl"; import type { WebSearchSource } from "$lib/types/WebSearch"; type SerpStackResponse = { organic_results: { title: string; url: string; snippet?: string; }[]; error?: string; }; export default async function searchSerpStack(query: string): Promise<WebSearchSource[]> { const response = await fetch( `http://api.serpstack.com/search?access_key=${config.SERPSTACK_API_KEY}&query=${query}&hl=en&gl=us`, { headers: { "Content-type": "application/json; charset=UTF-8" } } ); const data = (await response.json()) as SerpStackResponse; if (!response.ok) { throw new Error( data.error ?? `SerpStack API returned error code ${response.status} - ${response.statusText}` ); } return data.organic_results .filter(({ url }) => isURL(url)) .map(({ title, url, snippet }) => ({ title, link: url, text: snippet ?? "", })); }
chat-ui/src/lib/server/websearch/search/endpoints/serpStack.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serpStack.ts", "repo_id": "chat-ui", "token_count": 343 }
81
// Ideally shouldn't be needed, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850 import type { Conversation } from "./Conversation"; import type { Timestamps } from "./Timestamps"; export interface AbortedGeneration extends Timestamps { conversationId: Conversation["_id"]; }
chat-ui/src/lib/types/AbortedGeneration.ts/0
{ "file_path": "chat-ui/src/lib/types/AbortedGeneration.ts", "repo_id": "chat-ui", "token_count": 93 }
82
import { defaultModel } from "$lib/server/models"; import type { Assistant } from "./Assistant"; import type { Timestamps } from "./Timestamps"; import type { User } from "./User"; export interface Settings extends Timestamps { userId?: User["_id"]; sessionId?: string; /** * Note: Only conversations with this settings explicitly set to true should be shared. * * This setting is explicitly set to true when users accept the ethics modal. * */ shareConversationsWithModelAuthors: boolean; ethicsModalAcceptedAt: Date | null; activeModel: string; hideEmojiOnSidebar?: boolean; // model name and system prompts customPrompts?: Record<string, string>; assistants?: Assistant["_id"][]; tools?: string[]; disableStream: boolean; directPaste: boolean; } export type SettingsEditable = Omit<Settings, "ethicsModalAcceptedAt" | "createdAt" | "updatedAt">; // TODO: move this to a constant file along with other constants export const DEFAULT_SETTINGS = { shareConversationsWithModelAuthors: true, activeModel: defaultModel.id, hideEmojiOnSidebar: false, customPrompts: {}, assistants: [], tools: [], disableStream: false, directPaste: false, } satisfies SettingsEditable;
chat-ui/src/lib/types/Settings.ts/0
{ "file_path": "chat-ui/src/lib/types/Settings.ts", "repo_id": "chat-ui", "token_count": 369 }
83
export function formatUserCount(userCount: number): string { const userCountRanges: { min: number; max: number; label: string }[] = [ { min: 0, max: 1, label: "1" }, { min: 2, max: 9, label: "1-10" }, { min: 10, max: 49, label: "10+" }, { min: 50, max: 99, label: "50+" }, { min: 100, max: 299, label: "100+" }, { min: 300, max: 499, label: "300+" }, { min: 500, max: 999, label: "500+" }, { min: 1_000, max: 2_999, label: "1k+" }, { min: 3_000, max: 4_999, label: "3k+" }, { min: 5_000, max: 9_999, label: "5k+" }, { min: 10_000, max: 19_999, label: "10k+" }, { min: 20_000, max: 29_999, label: "20k+" }, { min: 30_000, max: 39_999, label: "30k+" }, { min: 40_000, max: 49_999, label: "40k+" }, { min: 50_000, max: 59_999, label: "50k+" }, { min: 60_000, max: 69_999, label: "60k+" }, { min: 70_000, max: 79_999, label: "70k+" }, { min: 80_000, max: 89_999, label: "80k+" }, { min: 90_000, max: 99_999, label: "90k+" }, { min: 100_000, max: 109_999, label: "100k+" }, { min: 110_000, max: 119_999, label: "110k+" }, { min: 120_000, max: 129_999, label: "120k+" }, { min: 130_000, max: 139_999, label: "130k+" }, { min: 140_000, max: 149_999, label: "140k+" }, { min: 150_000, max: 199_999, label: "150k+" }, { min: 200_000, max: 299_999, label: "200k+" }, { min: 300_000, max: 499_999, label: "300k+" }, { min: 500_000, max: 749_999, label: "500k+" }, { min: 750_000, max: 999_999, label: "750k+" }, { min: 1_000_000, max: Infinity, label: "1M+" }, ]; const range = userCountRanges.find(({ min, max }) => userCount >= min && userCount <= max); return range?.label ?? ""; }
chat-ui/src/lib/utils/formatUserCount.ts/0
{ "file_path": "chat-ui/src/lib/utils/formatUserCount.ts", "repo_id": "chat-ui", "token_count": 767 }
84
export async function sha256(input: string): Promise<string> { const utf8 = new TextEncoder().encode(input); const hashBuffer = await crypto.subtle.digest("SHA-256", utf8); const hashArray = Array.from(new Uint8Array(hashBuffer)); const hashHex = hashArray.map((bytes) => bytes.toString(16).padStart(2, "0")).join(""); return hashHex; }
chat-ui/src/lib/utils/sha256.ts/0
{ "file_path": "chat-ui/src/lib/utils/sha256.ts", "repo_id": "chat-ui", "token_count": 119 }
85
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import { v4 } from "uuid"; export function convertLegacyConversation( conv: Pick<Conversation, "messages" | "rootMessageId" | "preprompt"> ): Pick<Conversation, "messages" | "rootMessageId" | "preprompt"> { if (conv.rootMessageId) return conv; // not a legacy conversation if (conv.messages.length === 0) return conv; // empty conversation const messages = [ { from: "system", content: conv.preprompt ?? "", createdAt: new Date(), updatedAt: new Date(), id: v4(), } satisfies Message, ...conv.messages, ]; const rootMessageId = messages[0].id; const newMessages = messages.map((message, index) => { return { ...message, ancestors: messages.slice(0, index).map((m) => m.id), children: index < messages.length - 1 ? [messages[index + 1].id] : [], }; }); return { ...conv, rootMessageId, messages: newMessages, }; }
chat-ui/src/lib/utils/tree/convertLegacyConversation.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/convertLegacyConversation.ts", "repo_id": "chat-ui", "token_count": 354 }
86
import { collections } from "$lib/server/database"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { base } from "$app/paths"; import { config } from "$lib/server/config"; import { ReviewStatus } from "$lib/types/Review"; import { sendSlack } from "$lib/server/sendSlack"; import { z } from "zod"; const schema = z.object({ status: z.nativeEnum(ReviewStatus), }); export async function PATCH({ params, request, locals, url }) { const assistantId = params.id; const { status } = schema.parse(await request.json()); if (!assistantId) { return error(400, "Assistant ID is required"); } const assistant = await collections.assistants.findOne({ _id: new ObjectId(assistantId), }); if (!assistant) { return error(404, "Assistant not found"); } if ( !locals.user || (!locals.isAdmin && assistant.createdById.toString() !== locals.user._id.toString()) ) { return error(403, "Permission denied"); } // only admins can set the status to APPROVED or DENIED // if the status is already APPROVED or DENIED, only admins can change it if ( (status === ReviewStatus.APPROVED || status === ReviewStatus.DENIED || assistant.review === ReviewStatus.APPROVED || assistant.review === ReviewStatus.DENIED) && !locals.isAdmin ) { return error(403, "Permission denied"); } const result = await collections.assistants.updateOne( { _id: assistant._id }, { $set: { review: status } } ); if (result.modifiedCount === 0) { return error(500, "Failed to update review status"); } if (status === ReviewStatus.PENDING) { const prefixUrl = config.PUBLIC_SHARE_PREFIX || `${config.PUBLIC_ORIGIN || url.origin}${base}`; const assistantUrl = `${prefixUrl}/assistant/${assistantId}`; const username = locals.user?.username; await sendSlack( `🟢 Assistant <${assistantUrl}|${assistant?.name}> requested to be featured by ${ username ? `<http://hf.co/${username}|${username}>` : "non-logged in user" }.` ); } return new Response("Review status updated", { status: 200 }); }
chat-ui/src/routes/api/assistant/[id]/review/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/assistant/[id]/review/+server.ts", "repo_id": "chat-ui", "token_count": 708 }
87
import { app } from "$api"; type RequestHandler = (v: { request: Request; locals: App.Locals }) => Response | Promise<Response>; export const GET: RequestHandler = ({ request }) => app.handle(request); export const POST: RequestHandler = ({ request }) => app.handle(request); export const PUT: RequestHandler = ({ request }) => app.handle(request); export const PATCH: RequestHandler = ({ request }) => app.handle(request); export const DELETE: RequestHandler = ({ request }) => app.handle(request);
chat-ui/src/routes/api/v2/[...slugs]/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/v2/[...slugs]/+server.ts", "repo_id": "chat-ui", "token_count": 133 }
88
export async function GET() { return new Response("OK", { status: 200 }); }
chat-ui/src/routes/healthcheck/+server.ts/0
{ "file_path": "chat-ui/src/routes/healthcheck/+server.ts", "repo_id": "chat-ui", "token_count": 22 }
89
import { collections } from "$lib/server/database"; import { z } from "zod"; import { authCondition } from "$lib/server/auth"; import { DEFAULT_SETTINGS, type SettingsEditable } from "$lib/types/Settings"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import { ObjectId } from "mongodb"; export async function POST({ request, locals }) { const body = await request.json(); const { ethicsModalAccepted, ...settings } = z .object({ shareConversationsWithModelAuthors: z .boolean() .default(DEFAULT_SETTINGS.shareConversationsWithModelAuthors), hideEmojiOnSidebar: z.boolean().default(DEFAULT_SETTINGS.hideEmojiOnSidebar), ethicsModalAccepted: z.boolean().optional(), activeModel: z.string().default(DEFAULT_SETTINGS.activeModel), customPrompts: z.record(z.string()).default({}), tools: z.array(z.string()).optional(), disableStream: z.boolean().default(false), directPaste: z.boolean().default(false), }) .parse(body) satisfies SettingsEditable; // make sure all tools exist // either in db or in config if (settings.tools) { const newTools = [ ...(await collections.tools .find({ _id: { $in: settings.tools.map((toolId) => new ObjectId(toolId)) } }) .project({ _id: 1 }) .toArray() .then((tools) => tools.map((tool) => tool._id.toString()))), ...toolFromConfigs .filter((el) => (settings?.tools ?? []).includes(el._id.toString())) .map((el) => el._id.toString()), ]; settings.tools = newTools; } await collections.settings.updateOne( authCondition(locals), { $set: { ...settings, ...(ethicsModalAccepted && { ethicsModalAcceptedAt: new Date() }), updatedAt: new Date(), }, $setOnInsert: { createdAt: new Date(), }, }, { upsert: true, } ); // return ok response return new Response(); }
chat-ui/src/routes/settings/(nav)/+server.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/+server.ts", "repo_id": "chat-ui", "token_count": 695 }
90
<script lang="ts"> interface Props { type: string; value: string | boolean | number; disabled?: boolean; } let { type, value = $bindable(), disabled = false }: Props = $props(); let innerValue: string | boolean | number = $state( (() => { if (type === "bool") { return Boolean(value) || false; } else if (type === "int" || type === "float") { return Number(value) || 0; } else { return value || ""; } })() ); let previousValue: string | boolean | number = $state(""); $effect(() => { previousValue = innerValue; }); $effect(() => { value = typeof innerValue === "string" ? innerValue : innerValue.toString(); }); </script> {#if type === "str" && typeof innerValue === "string"} <input type="text" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={innerValue} {disabled} /> {:else if type === "int" && typeof innerValue === "number"} <input type="number" step="1" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" {disabled} oninput={(e) => { const value = e.currentTarget.value; if (value === "" || isNaN(parseInt(value))) { innerValue = previousValue; e.currentTarget.value = previousValue.toString(); return; } else { innerValue = parseFloat(value); previousValue = innerValue; } }} value={innerValue} /> {:else if type === "float" && typeof innerValue === "number"} <input type="number" step="0.001" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" {disabled} oninput={(e) => { const value = e.currentTarget.value; if (value === "" || isNaN(parseFloat(value))) { innerValue = previousValue; e.currentTarget.value = previousValue.toString(); return; } else { innerValue = parseFloat(value); previousValue = innerValue; } }} value={innerValue} /> {:else if type === "bool" && typeof innerValue === "boolean"} <input type="checkbox" class="peer my-auto mr-4 size-6 rounded-lg border-2 border-gray-200 bg-gray-100 p-1" bind:checked={innerValue} /> <!-- Literal['bigvgan_24khz_100band', 'bigvgan_base_24khz_100band', 'bigvgan_22khz_80band', 'bigvgan_base_22khz_80band', 'bigvgan_v2_22khz_80band_256x', 'bigvgan_v2_22khz_80band_fmax8k_256x', 'bigvgan_v2_24khz_100band_256x', 'bigvgan_v2_44khz_128band_256x', 'bigvgan_v2_44khz_128band_512x'] --> {:else if type.startsWith("Literal[") && typeof innerValue === "string"} {@const options = type .slice(8, -1) .split(",") .map((option) => option.trim().replaceAll("'", ""))} <select class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={innerValue} {disabled} > {#each options as option} <option value={option}>{option}</option> {/each} </select> {:else} <span class="font-mono text-red-800">{innerValue}-{typeof innerValue}</span> {/if}
chat-ui/src/routes/tools/ToolInputComponent.svelte/0
{ "file_path": "chat-ui/src/routes/tools/ToolInputComponent.svelte", "repo_id": "chat-ui", "token_count": 1186 }
91
.PHONY: quality style test check_dirs := tests src benchmarks utils # Check that source code meets quality standards quality: ruff check $(check_dirs) setup.py # linter ruff format --check $(check_dirs) setup.py # formatter # Format source code automatically style: ruff check --fix $(check_dirs) setup.py # linter ruff format $(check_dirs) setup.py # formatter # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/
datasets/Makefile/0
{ "file_path": "datasets/Makefile", "repo_id": "datasets", "token_count": 148 }
92
# Create a dataset Sometimes, you may need to create a dataset if you're working with your own data. Creating a dataset with 🤗 Datasets confers all the advantages of the library to your dataset: fast loading and processing, [stream enormous datasets](stream), [memory-mapping](https://huggingface.co/course/chapter5/4?fw=pt#the-magic-of-memory-mapping), and more. You can easily and rapidly create a dataset with 🤗 Datasets low-code approaches, reducing the time it takes to start training a model. In many cases, it is as easy as [dragging and dropping](upload_dataset#upload-with-the-hub-ui) your data files into a dataset repository on the Hub. In this tutorial, you'll learn how to use 🤗 Datasets low-code methods for creating all types of datasets: - Folder-based builders for quickly creating an image or audio dataset - `from_` methods for creating datasets from local files ## File-based builders 🤗 Datasets supports many common formats such as `csv`, `json/jsonl`, `parquet`, `txt`. For example it can read a dataset made up of one or several CSV files (in this case, pass your CSV files as a list): ```py >>> from datasets import load_dataset >>> dataset = load_dataset("csv", data_files="my_file.csv") ``` To get the list of supported formats and code examples, follow this guide [here](https://huggingface.co/docs/datasets/loading#local-and-remote-files). ## Folder-based builders There are two folder-based builders, [`ImageFolder`] and [`AudioFolder`]. These are low-code methods for quickly creating an image or speech and audio dataset with several thousand examples. They are great for rapidly prototyping computer vision and speech models before scaling to a larger dataset. Folder-based builders takes your data and automatically generates the dataset's features, splits, and labels. Under the hood: - [`ImageFolder`] uses the [`~datasets.Image`] feature to decode an image file. Many image extension formats are supported, such as jpg and png, but other formats are also supported. You can check the complete [list](https://github.com/huggingface/datasets/blob/b5672a956d5de864e6f5550e493527d962d6ae55/src/datasets/packaged_modules/imagefolder/imagefolder.py#L39) of supported image extensions. - [`AudioFolder`] uses the [`~datasets.Audio`] feature to decode an audio file. Extensions such as wav, mp3, and even mp4 are supported, and you can check the complete [list](https://ffmpeg.org/ffmpeg-formats.html) of supported audio extensions. Decoding is done via ffmpeg. The dataset splits are generated from the repository structure, and the label names are automatically inferred from the directory name. For example, if your image dataset (it is the same for an audio dataset) is stored like this: ``` pokemon/train/grass/bulbasaur.png pokemon/train/fire/charmander.png pokemon/train/water/squirtle.png pokemon/test/grass/ivysaur.png pokemon/test/fire/charmeleon.png pokemon/test/water/wartortle.png ``` Then this is how the folder-based builder generates an example: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/folder-based-builder.png" /> </div> Create the image dataset by specifying `imagefolder` in [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/pokemon") ``` An audio dataset is created in the same way, except you specify `audiofolder` in [`load_dataset`] instead: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder") ``` Any additional information about your dataset, such as text captions or transcriptions, can be included with a `metadata.csv` file in the folder containing your dataset. The metadata file needs to have a `file_name` column that links the image or audio file to its corresponding metadata: ``` file_name, text bulbasaur.png, There is a plant seed on its back right from the day this Pokémon is born. charmander.png, It has a preference for hot things. squirtle.png, When it retracts its long neck into its shell, it squirts out water with vigorous force. ``` To learn more about each of these folder-based builders, check out the and <a href="https://huggingface.co/docs/datasets/image_dataset#imagefolder"><span class="underline decoration-yellow-400 decoration-2 font-semibold">ImageFolder</span></a> or <a href="https://huggingface.co/docs/datasets/audio_dataset#audiofolder"><span class="underline decoration-pink-400 decoration-2 font-semibold">AudioFolder</span></a> guides. ## From Python dictionaries You can also create a dataset from data in Python dictionaries. There are two ways you can create a dataset using the `from_` methods: * The [`~Dataset.from_generator`] method is the most memory-efficient way to create a dataset from a [generator](https://wiki.python.org/moin/Generators) due to a generators iterative behavior. This is especially useful when you're working with a really large dataset that may not fit in memory, since the dataset is generated on disk progressively and then memory-mapped. ```py >>> from datasets import Dataset >>> def gen(): ... yield {"pokemon": "bulbasaur", "type": "grass"} ... yield {"pokemon": "squirtle", "type": "water"} >>> ds = Dataset.from_generator(gen) >>> ds[0] {"pokemon": "bulbasaur", "type": "grass"} ``` A generator-based [`IterableDataset`] needs to be iterated over with a `for` loop for example: ```py >>> from datasets import IterableDataset >>> ds = IterableDataset.from_generator(gen) >>> for example in ds: ... print(example) {"pokemon": "bulbasaur", "type": "grass"} {"pokemon": "squirtle", "type": "water"} ``` * The [`~Dataset.from_dict`] method is a straightforward way to create a dataset from a dictionary: ```py >>> from datasets import Dataset >>> ds = Dataset.from_dict({"pokemon": ["bulbasaur", "squirtle"], "type": ["grass", "water"]}) >>> ds[0] {"pokemon": "bulbasaur", "type": "grass"} ``` To create an image or audio dataset, chain the [`~Dataset.cast_column`] method with [`~Dataset.from_dict`] and specify the column and feature type. For example, to create an audio dataset: ```py >>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", ..., "path/to/audio_n"]}).cast_column("audio", Audio()) ``` Now that you know how to create a dataset, consider sharing it on the Hub so the community can also benefit from your work! Go on to the next section to learn how to share your dataset.
datasets/docs/source/create_dataset.mdx/0
{ "file_path": "datasets/docs/source/create_dataset.mdx", "repo_id": "datasets", "token_count": 1994 }
93
# Load a dataset from the Hub Finding high-quality datasets that are reproducible and accessible can be difficult. One of 🤗 Datasets main goals is to provide a simple way to load a dataset of any format or type. The easiest way to get started is to discover an existing dataset on the [Hugging Face Hub](https://huggingface.co/datasets) - a community-driven collection of datasets for tasks in NLP, computer vision, and audio - and use 🤗 Datasets to download and generate the dataset. This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) and [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) datasets, but feel free to load any dataset you want and follow along. Head over to the Hub now and find a dataset for your task! ## Load a dataset Before you take the time to download a dataset, it's often helpful to quickly get some general information about a dataset. A dataset's information is stored inside [`DatasetInfo`] and can include information such as the dataset description, features, and dataset size. Use the [`load_dataset_builder`] function to load a dataset builder and inspect a dataset's attributes without committing to downloading it: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder("cornell-movie-review-data/rotten_tomatoes") # Inspect dataset description >>> ds_builder.info.description Movie Review Dataset. This is a dataset of containing 5,331 positive and 5,331 negative processed sentences from Rotten Tomatoes movie reviews. This data was first used in Bo Pang and Lillian Lee, ``Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales.'', Proceedings of the ACL, 2005. # Inspect dataset features >>> ds_builder.info.features {'label': ClassLabel(names=['neg', 'pos']), 'text': Value('string')} ``` If you're happy with the dataset, then load it with [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") ``` ## Splits A split is a specific subset of a dataset like `train` and `test`. List a dataset's split names with the [`get_dataset_split_names`] function: ```py >>> from datasets import get_dataset_split_names >>> get_dataset_split_names("cornell-movie-review-data/rotten_tomatoes") ['train', 'validation', 'test'] ``` Then you can load a specific split with the `split` parameter. Loading a dataset `split` returns a [`Dataset`] object: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") >>> dataset Dataset({ features: ['text', 'label'], num_rows: 8530 }) ``` If you don't specify a `split`, 🤗 Datasets returns a [`DatasetDict`] object instead: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("cornell-movie-review-data/rotten_tomatoes") DatasetDict({ train: Dataset({ features: ['text', 'label'], num_rows: 8530 }) validation: Dataset({ features: ['text', 'label'], num_rows: 1066 }) test: Dataset({ features: ['text', 'label'], num_rows: 1066 }) }) ``` ## Configurations Some datasets contain several sub-datasets. For example, the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset has several sub-datasets, each one containing audio data in a different language. These sub-datasets are known as *configurations* or *subsets*, and you must explicitly select one when loading the dataset. If you don't provide a configuration name, 🤗 Datasets will raise a `ValueError` and remind you to choose a configuration. Use the [`get_dataset_config_names`] function to retrieve a list of all the possible configurations available to your dataset: ```py >>> from datasets import get_dataset_config_names >>> configs = get_dataset_config_names("PolyAI/minds14") >>> print(configs) ['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN', 'all'] ``` Then load the configuration you want: ```py >>> from datasets import load_dataset >>> mindsFR = load_dataset("PolyAI/minds14", "fr-FR", split="train") ```
datasets/docs/source/load_hub.mdx/0
{ "file_path": "datasets/docs/source/load_hub.mdx", "repo_id": "datasets", "token_count": 1342 }
94
# Load tabular data A tabular dataset is a generic dataset used to describe any data stored in rows and columns, where the rows represent an example and the columns represent a feature (can be continuous or categorical). These datasets are commonly stored in CSV files, Pandas DataFrames, and in database tables. This guide will show you how to load and create a tabular dataset from: - CSV files - Pandas DataFrames - Databases ## CSV files 🤗 Datasets can read CSV files by specifying the generic `csv` dataset builder name in the [`~datasets.load_dataset`] method. To load more than one CSV file, pass them as a list to the `data_files` parameter: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("csv", data_files="my_file.csv") # load multiple CSV files >>> dataset = load_dataset("csv", data_files=["my_file_1.csv", "my_file_2.csv", "my_file_3.csv"]) ``` You can also map specific CSV files to the train and test splits: ```py >>> dataset = load_dataset("csv", data_files={"train": ["my_train_file_1.csv", "my_train_file_2.csv"], "test": "my_test_file.csv"}) ``` To load remote CSV files, pass the URLs instead: ```py >>> base_url = "https://huggingface.co/datasets/lhoestq/demo1/resolve/main/data/" >>> dataset = load_dataset('csv', data_files={"train": base_url + "train.csv", "test": base_url + "test.csv"}) ``` To load zipped CSV files: ```py >>> url = "https://domain.org/train_data.zip" >>> data_files = {"train": url} >>> dataset = load_dataset("csv", data_files=data_files) ``` ## Pandas DataFrames 🤗 Datasets also supports loading datasets from [Pandas DataFrames](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) with the [`~datasets.Dataset.from_pandas`] method: ```py >>> from datasets import Dataset >>> import pandas as pd # create a Pandas DataFrame >>> df = pd.read_csv("https://huggingface.co/datasets/imodels/credit-card/raw/main/train.csv") >>> df = pd.DataFrame(df) # load Dataset from Pandas DataFrame >>> dataset = Dataset.from_pandas(df) ``` Use the `splits` parameter to specify the name of the dataset split: ```py >>> train_ds = Dataset.from_pandas(train_df, split="train") >>> test_ds = Dataset.from_pandas(test_df, split="test") ``` If the dataset doesn't look as expected, you should explicitly [specify your dataset features](loading#specify-features). A [pandas.Series](https://pandas.pydata.org/docs/reference/api/pandas.Series.html) may not always carry enough information for Arrow to automatically infer a data type. For example, if a DataFrame is of length `0` or if the Series only contains `None/NaN` objects, the type is set to `null`. ## Databases Datasets stored in databases are typically accessed with SQL queries. With 🤗 Datasets, you can connect to a database, query for the data you need, and create a dataset out of it. Then you can use all the processing features of 🤗 Datasets to prepare your dataset for training. ### SQLite SQLite is a small, lightweight database that is fast and easy to set up. You can use an existing database if you'd like, or follow along and start from scratch. Start by creating a quick SQLite database with this [Covid-19 data](https://github.com/nytimes/covid-19-data/blob/master/us-states.csv) from the New York Times: ```py >>> import sqlite3 >>> import pandas as pd >>> conn = sqlite3.connect("us_covid_data.db") >>> df = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv") >>> df.to_sql("states", conn, if_exists="replace") ``` This creates a `states` table in the `us_covid_data.db` database which you can now load into a dataset. To connect to the database, you'll need the [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) that identifies your database. Connecting to a database with a URI caches the returned dataset. The URI string differs for each database dialect, so be sure to check the [Database URLs](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) for whichever database you're using. For SQLite, it is: ```py >>> uri = "sqlite:///us_covid_data.db" ``` Load the table by passing the table name and URI to [`~datasets.Dataset.from_sql`]: ```py >>> from datasets import Dataset >>> ds = Dataset.from_sql("states", uri) >>> ds Dataset({ features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'], num_rows: 54382 }) ``` Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example: ```py >>> ds.filter(lambda x: x["state"] == "California") ``` You can also load a dataset from a SQL query instead of an entire table, which is useful for querying and joining multiple tables. Load the dataset by passing your query and URI to [`~datasets.Dataset.from_sql`]: ```py >>> from datasets import Dataset >>> ds = Dataset.from_sql('SELECT * FROM states WHERE state="California";', uri) >>> ds Dataset({ features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'], num_rows: 1019 }) ``` Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example: ```py >>> ds.filter(lambda x: x["cases"] > 10000) ``` ### PostgreSQL You can also connect and load a dataset from a PostgreSQL database, however we won't directly demonstrate how in the documentation because the example is only meant to be run in a notebook. Instead, take a look at how to install and setup a PostgreSQL server in this [notebook](https://colab.research.google.com/github/nateraw/huggingface-hub-examples/blob/main/sql_with_huggingface_datasets.ipynb#scrollTo=d83yGQMPHGFi)! After you've setup your PostgreSQL database, you can use the [`~datasets.Dataset.from_sql`] method to load a dataset from a table or query.
datasets/docs/source/tabular_load.mdx/0
{ "file_path": "datasets/docs/source/tabular_load.mdx", "repo_id": "datasets", "token_count": 1868 }
95
[tool.ruff] line-length = 119 [tool.ruff.lint] # Ignored rules: # "E501" -> line length violation # "F821" -> undefined named in type annotation (e.g. Literal["something"]) # "C901" -> `function_name` is too complex ignore = ["E501", "F821", "C901"] select = ["C", "E", "F", "I", "W"] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["datasets"] [tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401", "F403", "F405"] [tool.pytest.ini_options] # Test fails if a FutureWarning is thrown by `huggingface_hub` filterwarnings = [ "error::FutureWarning:huggingface_hub*", ] markers = [ "unit: unit test", "integration: integration test", ]
datasets/pyproject.toml/0
{ "file_path": "datasets/pyproject.toml", "repo_id": "datasets", "token_count": 274 }
96
from typing import TypeVar from .arrow_dataset import Dataset, _split_by_node_map_style_dataset from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType: """ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`. For map-style datasets: Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. To maximize data loading throughput, chunks are made of contiguous data on disk if possible. For iterable datasets: If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.num_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. Args: dataset ([`Dataset`] or [`IterableDataset`]): The dataset to split by node. rank (`int`): Rank of the current node. world_size (`int`): Total number of nodes. Returns: [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`. """ if isinstance(dataset, Dataset): return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size) else: return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
datasets/src/datasets/distributed.py/0
{ "file_path": "datasets/src/datasets/distributed.py", "repo_id": "datasets", "token_count": 582 }
97
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Optional, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.logging import get_logger if TYPE_CHECKING: from .arrow_dataset import Dataset logger = get_logger(__name__) # Fingerprinting allows to have one deterministic fingerprint per dataset state. # A dataset fingerprint is updated after each transform. # Re-running the same transforms on a dataset in a different session results in the same fingerprint. # This is possible thanks to a custom hashing function that works with most python objects. # Fingerprinting is the main mechanism that enables caching. # The caching mechanism allows to reload an existing cache file if it's already been computed. ################# # Caching ################# _CACHING_ENABLED = True _TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None _DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None class _TempCacheDir: """ A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files before deleting the directory itself to avoid permission errors on Windows. """ def __init__(self): self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX) self._finalizer = weakref.finalize(self, self._cleanup) def _cleanup(self): for dset in get_datasets_with_cache_file_in_temp_dir(): dset.__del__() if os.path.exists(self.name): try: shutil.rmtree(self.name) except Exception as e: raise OSError( f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually." ) from e def cleanup(self): if self._finalizer.detach(): self._cleanup() def maybe_register_dataset_for_temp_dir_deletion(dataset): """ This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order to properly delete them before deleting the temporary directory. The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled. """ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: return global _DATASETS_WITH_TABLE_IN_TEMP_DIR if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None: _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet() if any( Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents for cache_file in dataset.cache_files ): _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset) def get_datasets_with_cache_file_in_temp_dir(): return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else [] def enable_caching(): """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED _CACHING_ENABLED = True def disable_caching(): """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED _CACHING_ENABLED = False def is_caching_enabled() -> bool: """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED return bool(_CACHING_ENABLED) def get_temporary_cache_files_directory() -> str: """Return a directory that is deleted when session closes.""" global _TEMP_DIR_FOR_TEMP_CACHE_FILES if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir() return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name ################# # Hashing ################# class Hasher: """Hasher that accepts python objects as inputs.""" dispatch: dict = {} def __init__(self): self.m = xxhash.xxh64() @classmethod def hash_bytes(cls, value: Union[bytes, list[bytes]]) -> str: value = [value] if isinstance(value, bytes) else value m = xxhash.xxh64() for x in value: m.update(x) return m.hexdigest() @classmethod def hash(cls, value: Any) -> str: return cls.hash_bytes(dumps(value)) def update(self, value: Any) -> None: header_for_update = f"=={type(value)}==" value_for_update = self.hash(value) self.m.update(header_for_update.encode("utf8")) self.m.update(value_for_update.encode("utf-8")) def hexdigest(self) -> str: return self.m.hexdigest() ################# # Fingerprinting ################# fingerprint_rng = random.Random() # we show a warning only once when fingerprinting fails to avoid spam fingerprint_warnings: dict[str, bool] = {} def generate_fingerprint(dataset: "Dataset") -> str: state = dataset.__dict__ hasher = Hasher() for key in sorted(state): if key == "_fingerprint": continue hasher.update(key) hasher.update(state[key]) # hash data files last modification timestamps as well for cache_file in dataset.cache_files: hasher.update(os.path.getmtime(cache_file["filename"])) return hasher.hexdigest() def generate_random_fingerprint(nbits: int = 64) -> str: return f"{fingerprint_rng.getrandbits(nbits):0{nbits // 4}x}" def update_fingerprint(fingerprint, transform, transform_args): global fingerprint_warnings hasher = Hasher() hasher.update(fingerprint) try: hasher.update(transform) except: # noqa various errors might raise here from pickle or dill if _CACHING_ENABLED: if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): logger.warning( f"Transform {transform} couldn't be hashed properly, a random hash was used instead. " "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " "This warning is only shown once. Subsequent hashing failures won't be shown." ) fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True else: logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.") else: logger.info( f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." ) return generate_random_fingerprint() for key in sorted(transform_args): hasher.update(key) try: hasher.update(transform_args[key]) except: # noqa various errors might raise here from pickle or dill if _CACHING_ENABLED: if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): logger.warning( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. " "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " "This warning is only shown once. Subsequent hashing failures won't be shown." ) fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True else: logger.info( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead." ) else: logger.info( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." ) return generate_random_fingerprint() return hasher.hexdigest() def validate_fingerprint(fingerprint: str, max_length=64): """ Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default, so that the fingerprint can be used to name cache files without issues. """ if not isinstance(fingerprint, str) or not fingerprint: raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.") for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH: if invalid_char in fingerprint: raise ValueError( f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. " f"They could create issues when creating cache files." ) if len(fingerprint) > max_length: raise ValueError( f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}." "It could create issues when creating cache files." ) def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str: """ Format a transform to the format that will be used to update the fingerprint. """ transform = f"{func.__module__}.{func.__qualname__}" if version is not None: transform += f"@{version}" return transform def format_kwargs_for_fingerprint( func: Callable, args: tuple, kwargs: dict[str, Any], use_kwargs: Optional[list[str]] = None, ignore_kwargs: Optional[list[str]] = None, randomized_function: bool = False, ) -> dict[str, Any]: """ Format the kwargs of a transform to the format that will be used to update the fingerprint. """ kwargs_for_fingerprint = kwargs.copy() if args: params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD] args = args[1:] # assume the first argument is the dataset params = params[1:] kwargs_for_fingerprint.update(zip(params, args)) else: del kwargs_for_fingerprint[ next(iter(inspect.signature(func).parameters)) ] # assume the first key is the dataset # keep the right kwargs to be hashed to generate the fingerprint if use_kwargs: kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs} if ignore_kwargs: kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs} if randomized_function: # randomized functions have `seed` and `generator` parameters if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] kwargs_for_fingerprint["generator"] = np.random.default_rng(seed) # remove kwargs that are the default values default_values = { p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty } for default_varname, default_value in default_values.items(): if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value: kwargs_for_fingerprint.pop(default_varname) return kwargs_for_fingerprint def fingerprint_transform( inplace: bool, use_kwargs: Optional[list[str]] = None, ignore_kwargs: Optional[list[str]] = None, fingerprint_names: Optional[list[str]] = None, randomized_function: bool = False, version: Optional[str] = None, ): """ Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint`` Args: inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace. Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of setting the fingerprint of the returned Dataset. use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account to update the fingerprint to the wrapped method that should take care of setting the fingerprint of the returned Dataset. By default all the arguments are used. ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs. fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]): If the dataset transforms is not inplace and returns a DatasetDict, then it can require several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names, one fingerprint named after each element of fingerprint_names is going to be passed. randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has optional parameters "seed" and "generator", then you can set randomized_function to True. This way, even if users set "seed" and "generator" to None, then the fingerprint is going to be randomly generated depending on numpy's current state. In this case, the generator is set to np.random.default_rng(np.random.get_state()[1][0]). version (:obj:`str`, optional): version of the transform. The version is taken into account when computing the fingerprint. If a datase transform changes (or at least if the output data that are cached changes), then one should increase the version. If the version stays the same, then old cached data could be reused that are not compatible with the new transform. It should be in the format "MAJOR.MINOR.PATCH". """ if use_kwargs is not None and not isinstance(use_kwargs, list): raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}") if ignore_kwargs is not None and not isinstance(ignore_kwargs, list): raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}") if inplace and fingerprint_names: raise ValueError("fingerprint_names are only used when inplace is False") fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"] def _fingerprint(func): if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names): raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature") if randomized_function: # randomized function have seed and generator parameters if "seed" not in func.__code__.co_varnames: raise ValueError(f"'seed' must be in {func}'s signature") if "generator" not in func.__code__.co_varnames: raise ValueError(f"'generator' must be in {func}'s signature") # this call has to be outside the wrapper or since __qualname__ changes in multiprocessing transform = format_transform_for_fingerprint(func, version=version) @wraps(func) def wrapper(*args, **kwargs): kwargs_for_fingerprint = format_kwargs_for_fingerprint( func, args, kwargs, use_kwargs=use_kwargs, ignore_kwargs=ignore_kwargs, randomized_function=randomized_function, ) if args: dataset: Dataset = args[0] args = args[1:] else: dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters))) # compute new_fingerprint and add it to the args of not in-place transforms if inplace: new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint) else: for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes if kwargs.get(fingerprint_name) is None: kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name kwargs[fingerprint_name] = update_fingerprint( dataset._fingerprint, transform, kwargs_for_fingerprint ) else: validate_fingerprint(kwargs[fingerprint_name]) # Call actual function out = func(dataset, *args, **kwargs) # Update fingerprint of in-place transforms + update in-place history of transforms if inplace: # update after calling func so that the fingerprint doesn't change if the function fails dataset._fingerprint = new_fingerprint return out wrapper._decorator_name_ = "fingerprint" return wrapper return _fingerprint
datasets/src/datasets/fingerprint.py/0
{ "file_path": "datasets/src/datasets/fingerprint.py", "repo_id": "datasets", "token_count": 7513 }
98
import os from typing import BinaryIO, Optional, Union import fsspec import pyarrow.parquet as pq from .. import Dataset, Features, NamedSplit, config from ..arrow_writer import get_writer_batch_size from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class ParquetDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} hash = _PACKAGED_DATASETS_MODULES["parquet"][1] self.builder = Parquet( cache_dir=cache_dir, data_files=path_or_paths, features=features, hash=hash, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class ParquetDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, storage_options: Optional[dict] = None, **parquet_writer_kwargs, ): self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size or get_writer_batch_size(dataset.features) self.storage_options = storage_options or {} self.parquet_writer_kwargs = parquet_writer_kwargs def write(self) -> int: batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer: written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs) else: written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs) return written def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int: """Writes the pyarrow table as Parquet to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 _ = parquet_writer_kwargs.pop("path_or_buf", None) schema = self.dataset.features.arrow_schema writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) for offset in hf_tqdm( range(0, len(self.dataset), batch_size), unit="ba", desc="Creating parquet from Arrow format", ): batch = query_table( table=self.dataset._data, key=slice(offset, offset + batch_size), indices=self.dataset._indices, ) writer.write_table(batch) written += batch.nbytes writer.close() return written
datasets/src/datasets/io/parquet.py/0
{ "file_path": "datasets/src/datasets/io/parquet.py", "repo_id": "datasets", "token_count": 2023 }
99
import itertools from dataclasses import dataclass from typing import Any, Callable, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal logger = datasets.utils.logging.get_logger(__name__) _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"] _PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"] _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"] _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ["verbose"] @dataclass class CsvConfig(datasets.BuilderConfig): """BuilderConfig for CSV.""" sep: str = "," delimiter: Optional[str] = None header: Optional[Union[int, list[int], str]] = "infer" names: Optional[list[str]] = None column_names: Optional[list[str]] = None index_col: Optional[Union[int, str, list[int], list[str]]] = None usecols: Optional[Union[list[int], list[str]]] = None prefix: Optional[str] = None mangle_dupe_cols: bool = True engine: Optional[Literal["c", "python", "pyarrow"]] = None converters: dict[Union[int, str], Callable[[Any], Any]] = None true_values: Optional[list] = None false_values: Optional[list] = None skipinitialspace: bool = False skiprows: Optional[Union[int, list[int]]] = None nrows: Optional[int] = None na_values: Optional[Union[str, list[str]]] = None keep_default_na: bool = True na_filter: bool = True verbose: bool = False skip_blank_lines: bool = True thousands: Optional[str] = None decimal: str = "." lineterminator: Optional[str] = None quotechar: str = '"' quoting: int = 0 escapechar: Optional[str] = None comment: Optional[str] = None encoding: Optional[str] = None dialect: Optional[str] = None error_bad_lines: bool = True warn_bad_lines: bool = True skipfooter: int = 0 doublequote: bool = True memory_map: bool = False float_precision: Optional[str] = None chunksize: int = 10_000 features: Optional[datasets.Features] = None encoding_errors: Optional[str] = "strict" on_bad_lines: Literal["error", "warn", "skip"] = "error" date_format: Optional[str] = None def __post_init__(self): super().__post_init__() if self.delimiter is not None: self.sep = self.delimiter if self.column_names is not None: self.names = self.column_names @property def pd_read_csv_kwargs(self): pd_read_csv_kwargs = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.2 deprecated arguments if datasets.config.PANDAS_VERSION.release >= (2, 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class Csv(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = CsvConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self, files): schema = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str dtype = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(files)): csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(csv_file_reader): pa_table = pa.Table.from_pandas(df) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/csv/csv.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/csv/csv.py", "repo_id": "datasets", "token_count": 3883 }
100
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class PdfFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """BuilderConfig for ImageFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post_init__(self): super().__post_init__() class PdfFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Pdf BASE_COLUMN_NAME = "pdf" BUILDER_CONFIG_CLASS = PdfFolderConfig EXTENSIONS: list[str] = [".pdf"]
datasets/src/datasets/packaged_modules/pdffolder/pdffolder.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/pdffolder/pdffolder.py", "repo_id": "datasets", "token_count": 201 }
101
import importlib.util import os import tempfile from pathlib import PurePath from typing import TYPE_CHECKING, NamedTuple, Optional, Union import fsspec import numpy as np from .features import List from .utils import logging from .utils import tqdm as hf_tqdm if TYPE_CHECKING: from .arrow_dataset import Dataset # noqa: F401 try: from elasticsearch import Elasticsearch # noqa: F401 except ImportError: pass try: import faiss # noqa: F401 except ImportError: pass _has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None _has_faiss = importlib.util.find_spec("faiss") is not None logger = logging.get_logger(__name__) class MissingIndex(Exception): pass class SearchResults(NamedTuple): scores: list[float] indices: list[int] class BatchedSearchResults(NamedTuple): total_scores: list[list[float]] total_indices: list[list[int]] class NearestExamplesResults(NamedTuple): scores: list[float] examples: dict class BatchedNearestExamplesResults(NamedTuple): total_scores: list[list[float]] total_examples: list[dict] class BaseIndex: """Base class for indexing""" def search(self, query, k: int = 10, **kwargs) -> SearchResults: """ To implement. This method has to return the scores and the indices of the retrieved examples given a certain query. """ raise NotImplementedError def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults: """Find the nearest examples indices to the query. Args: queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index. k (`int`): The number of examples to retrieve per query. Output: total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query. total_indices (`List[List[int]]`): The indices of the retrieved examples per query. """ total_scores, total_indices = [], [] for query in queries: scores, indices = self.search(query, k) total_scores.append(scores) total_indices.append(indices) return BatchedSearchResults(total_scores, total_indices) def save(self, file: Union[str, PurePath]): """Serialize the index on disk""" raise NotImplementedError @classmethod def load(cls, file: Union[str, PurePath]) -> "BaseIndex": """Deserialize the index from disk""" raise NotImplementedError class ElasticSearchIndex(BaseIndex): """ Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity. An Elasticsearch server needs to be accessible, and a python client is declared with ``` es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}]) ``` for example. """ def __init__( self, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["Elasticsearch"] = None, es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): if not _has_elasticsearch: raise ImportError( "You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`" ) if es_client is not None and (host is not None or port is not None): raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.") host = host or "localhost" port = port or 9200 import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features from elasticsearch import Elasticsearch # noqa: F811 self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}]) self.es_index_name = ( es_index_name if es_index_name is not None else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name) ) self.es_index_config = ( es_index_config if es_index_config is not None else { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}}, } ) def add_documents(self, documents: Union[list[str], "Dataset"], column: Optional[str] = None): """ Add documents to the index. If the documents are inside a certain column, you can specify it using the `column` argument. """ index_name = self.es_index_name index_config = self.es_index_config self.es_client.indices.create(index=index_name, body=index_config) number_of_docs = len(documents) progress = hf_tqdm(unit="docs", total=number_of_docs) successes = 0 def passage_generator(): if column is not None: for i, example in enumerate(documents): yield {"text": example[column], "_id": i} else: for i, example in enumerate(documents): yield {"text": example, "_id": i} # create the ES index import elasticsearch as es for ok, action in es.helpers.streaming_bulk( client=self.es_client, index=index_name, actions=passage_generator(), ): progress.update(1) successes += ok if successes != len(documents): logger.warning( f"Some documents failed to be added to ElasticSearch. Failures: {len(documents) - successes}/{len(documents)}" ) logger.info(f"Indexed {successes:d} documents") def search(self, query: str, k=10, **kwargs) -> SearchResults: """Find the nearest examples indices to the query. Args: query (`str`): The query as a string. k (`int`): The number of examples to retrieve. Output: scores (`List[List[float]`): The retrieval scores of the retrieved examples. indices (`List[List[int]]`): The indices of the retrieved examples. """ response = self.es_client.search( index=self.es_index_name, body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k}, **kwargs, ) hits = response["hits"]["hits"] return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits]) def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults: import concurrent.futures total_scores, total_indices = [None] * len(queries), [None] * len(queries) with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)} for future in concurrent.futures.as_completed(future_to_index): index = future_to_index[future] results: SearchResults = future.result() total_scores[index] = results.scores total_indices[index] = results.indices return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores) class FaissIndex(BaseIndex): """ Dense index using Faiss. It is used to index vectors. Faiss is a library for efficient similarity search and clustering of dense vectors. It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM. You can find more information about Faiss here: - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU """ def __init__( self, device: Optional[Union[int, list[int]]] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, ): """ Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory """ if string_factory is not None and custom_index is not None: raise ValueError("Please specify either `string_factory` or `custom_index` but not both.") if device is not None and custom_index is not None: raise ValueError( "Cannot pass both 'custom_index' and 'device'. " "Pass 'custom_index' already transferred to the target device instead." ) self.device = device self.string_factory = string_factory self.metric_type = metric_type self.faiss_index = custom_index if not _has_faiss: raise ImportError( "You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. " "A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. " "Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available." ) def add_vectors( self, vectors: Union[np.array, "Dataset"], column: Optional[str] = None, batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: Optional[bool] = None, ): """ Add vectors to the index. If the arrays are inside a certain column, you can specify it using the `column` argument. """ import faiss # noqa: F811 if column and not isinstance(vectors.features[column], List): raise ValueError( f"Wrong feature type for column '{column}'. Expected 1d array, got {vectors.features[column]}" ) # Create index if self.faiss_index is None: size = len(vectors[0]) if column is None else len(vectors[0][column]) if self.string_factory is not None: if self.metric_type is None: index = faiss.index_factory(size, self.string_factory) else: index = faiss.index_factory(size, self.string_factory, self.metric_type) else: if self.metric_type is None: index = faiss.IndexFlat(size) else: index = faiss.IndexFlat(size, self.metric_type) self.faiss_index = self._faiss_index_to_device(index, self.device) logger.info(f"Created faiss index of type {type(self.faiss_index)}") # Set verbosity level if faiss_verbose is not None: self.faiss_index.verbose = faiss_verbose if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None: self.faiss_index.index.verbose = faiss_verbose if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None: self.faiss_index.quantizer.verbose = faiss_verbose if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None: self.faiss_index.clustering_index.verbose = faiss_verbose # Train if train_size is not None: train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column] logger.info(f"Training the index with the first {len(train_vecs)} vectors") self.faiss_index.train(train_vecs) else: logger.info("Ignored the training step of the faiss index as `train_size` is None.") # Add vectors logger.info(f"Adding {len(vectors)} vectors to the faiss index") for i in hf_tqdm(range(0, len(vectors), batch_size)): vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column] self.faiss_index.add(vecs) @staticmethod def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, list[int]]] = None) -> "faiss.Index": """ Sends a faiss index to a device. A device can either be a positive integer (GPU id), a negative integer (all GPUs), or a list of positive integers (select GPUs to use), or `None` for CPU. """ # If device is not specified, then it runs on CPU. if device is None: return index import faiss # noqa: F811 # If the device id is given as an integer if isinstance(device, int): # Positive integers are directly mapped to GPU ids if device > -1: faiss_res = faiss.StandardGpuResources() index = faiss.index_cpu_to_gpu(faiss_res, device, index) # And negative integers mean using all GPUs else: index = faiss.index_cpu_to_all_gpus(index) # Device ids given as a list mean mapping to those devices specified. elif isinstance(device, (list, tuple)): index = faiss.index_cpu_to_gpus_list(index, gpus=list(device)) else: raise TypeError( f"The argument type: {type(device)} is not expected. " + "Please pass in either nothing, a positive int, a negative int, or a list of positive ints." ) return index def search(self, query: np.array, k=10, **kwargs) -> SearchResults: """Find the nearest examples indices to the query. Args: query (`np.array`): The query as a numpy array. k (`int`): The number of examples to retrieve. Output: scores (`List[List[float]`): The retrieval scores of the retrieved examples. indices (`List[List[int]]`): The indices of the retrieved examples. """ if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1): raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)") queries = query.reshape(1, -1) if not queries.flags.c_contiguous: queries = np.asarray(queries, order="C") scores, indices = self.faiss_index.search(queries, k, **kwargs) return SearchResults(scores[0], indices[0].astype(int)) def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults: """Find the nearest examples indices to the queries. Args: queries (`np.array`): The queries as a numpy array. k (`int`): The number of examples to retrieve. Output: total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query. total_indices (`List[List[int]]`): The indices of the retrieved examples per query. """ if len(queries.shape) != 2: raise ValueError("Shape of query must be 2D") if not queries.flags.c_contiguous: queries = np.asarray(queries, order="C") scores, indices = self.faiss_index.search(queries, k, **kwargs) return BatchedSearchResults(scores, indices.astype(int)) def save(self, file: Union[str, PurePath], storage_options: Optional[dict] = None): """Serialize the FaissIndex on disk""" import faiss # noqa: F811 if self.device is not None and isinstance(self.device, (int, list, tuple)): index = faiss.index_gpu_to_cpu(self.faiss_index) else: index = self.faiss_index with fsspec.open(str(file), "wb", **(storage_options or {})) as f: faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write))) @classmethod def load( cls, file: Union[str, PurePath], device: Optional[Union[int, list[int]]] = None, storage_options: Optional[dict] = None, ) -> "FaissIndex": """Deserialize the FaissIndex from disk""" import faiss # noqa: F811 # Instances of FaissIndex is essentially just a wrapper for faiss indices. faiss_index = cls(device=device) with fsspec.open(str(file), "rb", **(storage_options or {})) as f: index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read))) faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device) return faiss_index class IndexableMixin: """Add indexing features to `datasets.Dataset`""" def __init__(self): self._indexes: dict[str, BaseIndex] = {} def __len__(self): raise NotImplementedError def __getitem__(self, key): raise NotImplementedError def is_index_initialized(self, index_name: str) -> bool: return index_name in self._indexes def _check_index_is_initialized(self, index_name: str): if not self.is_index_initialized(index_name): raise MissingIndex( f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first." ) def list_indexes(self) -> list[str]: """List the `colindex_nameumns`/identifiers of all the attached indexes.""" return list(self._indexes) def get_index(self, index_name: str) -> BaseIndex: """List the `index_name`/identifiers of all the attached indexes. Args: index_name (`str`): Index name. Returns: [`BaseIndex`] """ self._check_index_is_initialized(index_name) return self._indexes[index_name] def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[Union[int, list[int]]] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of the specified column. You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below). You can find more information about Faiss here: - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory Args: column (`str`): The column of the vectors to add to the index. index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. By default it corresponds to `column`. device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP. metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. <Added version="2.4.0"/> train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. """ index_name = index_name if index_name is not None else column faiss_index = FaissIndex( device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index ) faiss_index.add_vectors( self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose ) self._indexes[index_name] = faiss_index def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[Union[int, list[int]]] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of `external_arrays`. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory Args: external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`. It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP. metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. <Added version="2.4.0"/> train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. """ faiss_index = FaissIndex( device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index ) faiss_index.add_vectors( external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose ) self._indexes[index_name] = faiss_index def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[dict] = None): """Save a FaissIndex on disk. Args: index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`). storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.11.0"/> """ index = self.get_index(index_name) if not isinstance(index, FaissIndex): raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'") index.save(file, storage_options=storage_options) logger.info(f"Saved FaissIndex {index_name} at {file}") def load_faiss_index( self, index_name: str, file: Union[str, PurePath], device: Optional[Union[int, list[int]]] = None, storage_options: Optional[dict] = None, ): """Load a FaissIndex from disk. If you want to do additional configurations, you can have access to the faiss index object by doing `.get_index(index_name).faiss_index` to make it fit your needs. Args: index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`). device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.11.0"/> """ index = FaissIndex.load(file, device=device, storage_options=storage_options) if index.faiss_index.ntotal != len(self): raise ValueError( f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples." ) self._indexes[index_name] = index logger.info(f"Loaded FaissIndex {index_name} from {file}") def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["Elasticsearch"] = None, es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): """Add a text index using ElasticSearch for fast retrieval. Args: column (`str`): The column of the documents to add to the index. index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`. By default it corresponds to `column`. host (Optional `str`, defaults to localhost): host of where ElasticSearch is running port (Optional `str`, defaults to 9200): port of where ElasticSearch is running es_client (Optional `elasticsearch.Elasticsearch`): The elasticsearch client used to create the index if host and port are None. es_index_name (Optional `str`): The elasticsearch index name used to create the index. es_index_config (Optional `dict`): The configuration of the elasticsearch index. Default config is: Config:: { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } """ index_name = index_name if index_name is not None else column es_index = ElasticSearchIndex( host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config ) es_index.add_documents(self, column=column) self._indexes[index_name] = es_index def load_elasticsearch_index( self, index_name: str, es_index_name: str, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["Elasticsearch"] = None, es_index_config: Optional[dict] = None, ): """Load an existing text index using ElasticSearch for fast retrieval. Args: index_name (`str`): The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`. es_index_name (`str`): The name of elasticsearch index to load. host (`str`, *optional*, defaults to `localhost`): Host of where ElasticSearch is running. port (`str`, *optional*, defaults to `9200`): Port of where ElasticSearch is running. es_client (`elasticsearch.Elasticsearch`, *optional*): The elasticsearch client used to create the index if host and port are `None`. es_index_config (`dict`, *optional*): The configuration of the elasticsearch index. Default config is: ``` { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } ``` """ self._indexes[index_name] = ElasticSearchIndex( host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config ) def drop_index(self, index_name: str): """Drop the index with the specified column. Args: index_name (`str`): The `index_name`/identifier of the index. """ del self._indexes[index_name] def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults: """Find the nearest examples indices in the dataset to the query. Args: index_name (`str`): The name/identifier of the index. query (`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve. Returns: `(scores, indices)`: A tuple of `(scores, indices)` where: - **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples - **indices** (`List[List[int]]`): the indices of the retrieved examples """ self._check_index_is_initialized(index_name) return self._indexes[index_name].search(query, k, **kwargs) def search_batch( self, index_name: str, queries: Union[list[str], np.array], k: int = 10, **kwargs ) -> BatchedSearchResults: """Find the nearest examples indices in the dataset to the query. Args: index_name (`str`): The `index_name`/identifier of the index. queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve per query. Returns: `(total_scores, total_indices)`: A tuple of `(total_scores, total_indices)` where: - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query - **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query """ self._check_index_is_initialized(index_name) return self._indexes[index_name].search_batch(queries, k, **kwargs) def get_nearest_examples( self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs ) -> NearestExamplesResults: """Find the nearest examples in the dataset to the query. Args: index_name (`str`): The index_name/identifier of the index. query (`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve. Returns: `(scores, examples)`: A tuple of `(scores, examples)` where: - **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples - **examples** (`dict`): the retrieved examples """ self._check_index_is_initialized(index_name) scores, indices = self.search(index_name, query, k, **kwargs) top_indices = [i for i in indices if i >= 0] return NearestExamplesResults(scores[: len(top_indices)], self[top_indices]) def get_nearest_examples_batch( self, index_name: str, queries: Union[list[str], np.array], k: int = 10, **kwargs ) -> BatchedNearestExamplesResults: """Find the nearest examples in the dataset to the query. Args: index_name (`str`): The `index_name`/identifier of the index. queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve per query. Returns: `(total_scores, total_examples)`: A tuple of `(total_scores, total_examples)` where: - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query - **total_examples** (`List[dict]`): the retrieved examples per query """ self._check_index_is_initialized(index_name) total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs) total_scores = [ scores_i[: len([i for i in indices_i if i >= 0])] for scores_i, indices_i in zip(total_scores, total_indices) ] total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices] return BatchedNearestExamplesResults(total_scores, total_samples)
datasets/src/datasets/search.py/0
{ "file_path": "datasets/src/datasets/search.py", "repo_id": "datasets", "token_count": 15323 }
102
# Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logging utilities.""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from .tqdm import ( # noqa: F401 # imported for backward compatibility disable_progress_bar, enable_progress_bar, is_progress_bar_enabled, tqdm, ) log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING def _get_default_logging_level(): """ If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to ``_default_log_level`` """ env_level_str = os.getenv("DATASETS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DATASETS_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(logging.StreamHandler()) library_root_logger.setLevel(_get_default_logging_level()) def _reset_library_root_logger() -> None: library_root_logger = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET) def get_logger(name: Optional[str] = None) -> logging.Logger: """Return a logger with the specified name. This function can be used in dataset builders. """ if name is None: name = _get_library_name() return logging.getLogger(name) def get_verbosity() -> int: """Return the current level for the HuggingFace datasets library's root logger. Returns: Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. <Tip> HuggingFace datasets library has following logging levels: - `datasets.logging.CRITICAL`, `datasets.logging.FATAL` - `datasets.logging.ERROR` - `datasets.logging.WARNING`, `datasets.logging.WARN` - `datasets.logging.INFO` - `datasets.logging.DEBUG` </Tip> """ return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """Set the level for the Hugging Face Datasets library's root logger. Args: verbosity: Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. """ _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the level for the Hugging Face datasets library's root logger to `INFO`. This will display most of the logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`. """ return set_verbosity(INFO) def set_verbosity_warning(): """Set the level for the Hugging Face datasets library's root logger to `WARNING`. This will display only the warning and errors logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`. """ return set_verbosity(WARNING) def set_verbosity_debug(): """Set the level for the Hugging Face datasets library's root logger to `DEBUG`. This will display all the logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`. """ return set_verbosity(DEBUG) def set_verbosity_error(): """Set the level for the Hugging Face datasets library's root logger to `ERROR`. This will display only the errors logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`. """ return set_verbosity(ERROR) def disable_propagation() -> None: """Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _get_library_root_logger().propagate = False def enable_propagation() -> None: """Enable propagation of the library log outputs. Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has been configured. """ _get_library_root_logger().propagate = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger()
datasets/src/datasets/utils/logging.py/0
{ "file_path": "datasets/src/datasets/utils/logging.py", "repo_id": "datasets", "token_count": 1914 }
103
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Version utils.""" import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _VERSION_REG = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$") @total_ordering @dataclass class Version: """Dataset version `MAJOR.MINOR.PATCH`. Args: version_str (`str`): The dataset version. description (`str`): A description of what is new in this version. major (`str`): minor (`str`): patch (`str`): Example: ```py >>> VERSION = datasets.Version("1.0.0") ``` """ version_str: str description: Optional[str] = None major: Optional[Union[str, int]] = None minor: Optional[Union[str, int]] = None patch: Optional[Union[str, int]] = None def __post_init__(self): self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str) def __repr__(self): return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def tuple(self): return self.major, self.minor, self.patch def _validate_operand(self, other): if isinstance(other, str): return Version(other) elif isinstance(other, Version): return other raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.") def __eq__(self, other): try: other = self._validate_operand(other) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__(self, other): other = self._validate_operand(other) return self.tuple < other.tuple def __hash__(self): return hash(_version_tuple_to_str(self.tuple)) @classmethod def from_dict(cls, dic): field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _to_yaml_string(self) -> str: return self.version_str def _str_to_version_tuple(version_str): """Return the tuple (major, minor, patch) version extracted from the str.""" res = _VERSION_REG.match(version_str) if not res: raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.") return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")]) def _version_tuple_to_str(version_tuple): """Return the str version from the version tuple (major, minor, patch).""" return ".".join(str(v) for v in version_tuple)
datasets/src/datasets/utils/version.py/0
{ "file_path": "datasets/src/datasets/utils/version.py", "repo_id": "datasets", "token_count": 1291 }
104
import pytest from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.pandas.pandas import PandasConfig def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = PandasConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = PandasConfig(name="name", data_files=data_files)
datasets/tests/packaged_modules/test_pandas.py/0
{ "file_path": "datasets/tests/packaged_modules/test_pandas.py", "repo_id": "datasets", "token_count": 229 }
105
import unittest import warnings from datasets.utils import experimental @experimental def dummy_function(): return "success" class TestExperimentalFlag(unittest.TestCase): def test_experimental_warning(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") self.assertEqual(dummy_function(), "success") self.assertEqual(len(w), 1)
datasets/tests/test_experimental.py/0
{ "file_path": "datasets/tests/test_experimental.py", "repo_id": "datasets", "token_count": 152 }
106
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def test_patch_submodule(): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join mock = "__test_patch_submodule_mock__" with patch_submodule(_test_patching, "os.path.join", mock): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os, _PatchedModuleObj) assert isinstance(_test_patching.os.path, _PatchedModuleObj) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path, _PatchedModuleObj) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os, _PatchedModuleObj) assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path, _PatchedModuleObj) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def test_patch_submodule_builtin(): assert _test_patching.open is open mock = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching, "open", mock): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def test_patch_submodule_missing(): # pandas.read_csv is not present in _test_patching mock = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching, "pandas.read_csv", mock): pass def test_patch_submodule_missing_builtin(): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point mock = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching, "len", None) is None with patch_submodule(_test_patching, "len", mock): assert _test_patching.len is mock assert _test_patching.len is len def test_patch_submodule_start_and_stop(): mock = "__test_patch_submodule_start_and_stop_mock__" patch = patch_submodule(_test_patching, "open", mock) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def test_patch_submodule_successive(): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join mock_join = "__test_patch_submodule_successive_join__" mock_dirname = "__test_patch_submodule_successive_dirname__" mock_rename = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching, "os.path.join", mock_join): with patch_submodule(_test_patching, "os.rename", mock_rename): with patch_submodule(_test_patching, "os.path.dirname", mock_dirname): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching, "os.rename", mock_rename): with patch_submodule(_test_patching, "os.path.join", mock_join): with patch_submodule(_test_patching, "os.path.dirname", mock_dirname): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def test_patch_submodule_doesnt_exist(): mock = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", mock): pass with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", mock): pass
datasets/tests/test_patching.py/0
{ "file_path": "datasets/tests/test_patching.py", "repo_id": "datasets", "token_count": 2274 }
107
# Components and configs ## ComponentSpec [[autodoc]] diffusers.modular_pipelines.modular_pipeline.ComponentSpec ## ConfigSpec [[autodoc]] diffusers.modular_pipelines.modular_pipeline.ConfigSpec ## ComponentsManager [[autodoc]] diffusers.modular_pipelines.components_manager.ComponentsManager ## InsertableDict [[autodoc]] diffusers.modular_pipelines.modular_pipeline_utils.InsertableDict
diffusers/docs/source/en/api/modular_diffusers/pipeline_components.md/0
{ "file_path": "diffusers/docs/source/en/api/modular_diffusers/pipeline_components.md", "repo_id": "diffusers", "token_count": 134 }
108
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DDIM [Denoising Diffusion Implicit Models](https://huggingface.co/papers/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon. The abstract from the paper is: *Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.* The original codebase can be found at [ermongroup/ddim](https://github.com/ermongroup/ddim). ## DDIMPipeline [[autodoc]] DDIMPipeline - all - __call__ ## ImagePipelineOutput [[autodoc]] pipelines.ImagePipelineOutput
diffusers/docs/source/en/api/pipelines/ddim.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/ddim.md", "repo_id": "diffusers", "token_count": 477 }
109
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # InstructPix2Pix <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </div> [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://huggingface.co/papers/2211.09800) is by Tim Brooks, Aleksander Holynski and Alexei A. Efros. The abstract from the paper is: *We propose a method for editing images from human instructions: given an input image and a written instruction that tells the model what to do, our model follows these instructions to edit the image. To obtain training data for this problem, we combine the knowledge of two large pretrained models -- a language model (GPT-3) and a text-to-image model (Stable Diffusion) -- to generate a large dataset of image editing examples. Our conditional diffusion model, InstructPix2Pix, is trained on our generated data, and generalizes to real images and user-written instructions at inference time. Since it performs edits in the forward pass and does not require per example fine-tuning or inversion, our model edits images quickly, in a matter of seconds. We show compelling editing results for a diverse collection of input images and written instructions.* You can find additional information about InstructPix2Pix on the [project page](https://www.timothybrooks.com/instruct-pix2pix), [original codebase](https://github.com/timothybrooks/instruct-pix2pix), and try it out in a [demo](https://huggingface.co/spaces/timbrooks/instruct-pix2pix). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## StableDiffusionInstructPix2PixPipeline [[autodoc]] StableDiffusionInstructPix2PixPipeline - __call__ - all - load_textual_inversion - load_lora_weights - save_lora_weights ## StableDiffusionXLInstructPix2PixPipeline [[autodoc]] StableDiffusionXLInstructPix2PixPipeline - __call__ - all
diffusers/docs/source/en/api/pipelines/pix2pix.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/pix2pix.md", "repo_id": "diffusers", "token_count": 759 }
110
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> > [!WARNING] > This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. # Text2Video-Zero <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </div> [Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators](https://huggingface.co/papers/2303.13439) is by Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, [Zhangyang Wang](https://www.ece.utexas.edu/people/faculty/atlas-wang), Shant Navasardyan, [Humphrey Shi](https://www.humphreyshi.com). Text2Video-Zero enables zero-shot video generation using either: 1. A textual prompt 2. A prompt combined with guidance from poses or edges 3. Video Instruct-Pix2Pix (instruction-guided video editing) Results are temporally consistent and closely follow the guidance and textual prompts. ![teaser-img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/t2v_zero_teaser.png) The abstract from the paper is: *Recent text-to-video generation approaches rely on computationally heavy training and require large-scale video datasets. In this paper, we introduce a new task of zero-shot text-to-video generation and propose a low-cost approach (without any training or optimization) by leveraging the power of existing text-to-image synthesis methods (e.g., Stable Diffusion), making them suitable for the video domain. Our key modifications include (i) enriching the latent codes of the generated frames with motion dynamics to keep the global scene and the background time consistent; and (ii) reprogramming frame-level self-attention using a new cross-frame attention of each frame on the first frame, to preserve the context, appearance, and identity of the foreground object. Experiments show that this leads to low overhead, yet high-quality and remarkably consistent video generation. Moreover, our approach is not limited to text-to-video synthesis but is also applicable to other tasks such as conditional and content-specialized video generation, and Video Instruct-Pix2Pix, i.e., instruction-guided video editing. As experiments show, our method performs comparably or sometimes better than recent approaches, despite not being trained on additional video data.* You can find additional information about Text2Video-Zero on the [project page](https://text2video-zero.github.io/), [paper](https://huggingface.co/papers/2303.13439), and [original codebase](https://github.com/Picsart-AI-Research/Text2Video-Zero). ## Usage example ### Text-To-Video To generate a video from prompt, run the following Python code: ```python import torch from diffusers import TextToVideoZeroPipeline import imageio model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A panda is playing guitar on times square" result = pipe(prompt=prompt).images result = [(r * 255).astype("uint8") for r in result] imageio.mimsave("video.mp4", result, fps=4) ``` You can change these parameters in the pipeline call: * Motion field strength (see the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1): * `motion_field_strength_x` and `motion_field_strength_y`. Default: `motion_field_strength_x=12`, `motion_field_strength_y=12` * `T` and `T'` (see the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1) * `t0` and `t1` in the range `{0, ..., num_inference_steps}`. Default: `t0=45`, `t1=48` * Video length: * `video_length`, the number of frames video_length to be generated. Default: `video_length=8` We can also generate longer videos by doing the processing in a chunk-by-chunk manner: ```python import torch from diffusers import TextToVideoZeroPipeline import numpy as np model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") seed = 0 video_length = 24 #24 ÷ 4fps = 6 seconds chunk_size = 8 prompt = "A panda is playing guitar on times square" # Generate the video chunk-by-chunk result = [] chunk_ids = np.arange(0, video_length, chunk_size - 1) generator = torch.Generator(device="cuda") for i in range(len(chunk_ids)): print(f"Processing chunk {i + 1} / {len(chunk_ids)}") ch_start = chunk_ids[i] ch_end = video_length if i == len(chunk_ids) - 1 else chunk_ids[i + 1] # Attach the first frame for Cross Frame Attention frame_ids = [0] + list(range(ch_start, ch_end)) # Fix the seed for the temporal consistency generator.manual_seed(seed) output = pipe(prompt=prompt, video_length=len(frame_ids), generator=generator, frame_ids=frame_ids) result.append(output.images[1:]) # Concatenate chunks and save result = np.concatenate(result) result = [(r * 255).astype("uint8") for r in result] imageio.mimsave("video.mp4", result, fps=4) ``` - #### SDXL Support In order to use the SDXL model when generating a video from prompt, use the `TextToVideoZeroSDXLPipeline` pipeline: ```python import torch from diffusers import TextToVideoZeroSDXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = TextToVideoZeroSDXLPipeline.from_pretrained( model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") ``` ### Text-To-Video with Pose Control To generate a video from prompt with additional pose control 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/poses_skeleton_gifs/dance1_corr.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video containing extracted pose images ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 pose_images = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` To extract pose from actual video, read [ControlNet documentation](controlnet). 3. Run `StableDiffusionControlNetPipeline` with our custom attention processor ```python import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to("cuda") # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) prompt = "Darth Vader dancing in a desert" result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` - #### SDXL Support Since our attention processor also works with SDXL, it can be utilized to generate a video from prompt using ControlNet models powered by SDXL: ```python import torch from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor controlnet_model_id = 'thibaud/controlnet-openpose-sdxl-1.0' model_id = 'stabilityai/stable-diffusion-xl-base-1.0' controlnet = ControlNetModel.from_pretrained(controlnet_model_id, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to('cuda') # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 128, 128), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) prompt = "Darth Vader dancing in a desert" result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` ### Text-To-Video with Edge Control To generate a video from prompt with additional Canny edge control, follow the same steps described above for pose-guided generation using [Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny). ### Video Instruct-Pix2Pix To perform text-guided video editing (with [InstructPix2Pix](pix2pix)): 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/pix2pix video/camel.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video from path ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 video = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` 3. Run `StableDiffusionInstructPix2PixPipeline` with our custom attention processor ```python import torch from diffusers import StableDiffusionInstructPix2PixPipeline from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor model_id = "timbrooks/instruct-pix2pix" pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=3)) prompt = "make it Van Gogh Starry Night style" result = pipe(prompt=[prompt] * len(video), image=video).images imageio.mimsave("edited_video.mp4", result, fps=4) ``` ### DreamBooth specialization Methods **Text-To-Video**, **Text-To-Video with Pose Control** and **Text-To-Video with Edge Control** can run with custom [DreamBooth](../../training/dreambooth) models, as shown below for [Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny) and [Avatar style DreamBooth](https://huggingface.co/PAIR/text2video-zero-controlnet-canny-avatar) model: 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/canny_videos_mp4/girl_turning.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video from path ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 canny_edges = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` 3. Run `StableDiffusionControlNetPipeline` with custom trained DreamBooth model ```python import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor # set model id to custom model model_id = "PAIR/text2video-zero-controlnet-canny-avatar" controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to("cuda") # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(canny_edges), 1, 1, 1) prompt = "oil painting of a beautiful girl avatar style" result = pipe(prompt=[prompt] * len(canny_edges), image=canny_edges, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` You can filter out some available DreamBooth-trained models with [this link](https://huggingface.co/models?search=dreambooth). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## TextToVideoZeroPipeline [[autodoc]] TextToVideoZeroPipeline - all - __call__ ## TextToVideoZeroSDXLPipeline [[autodoc]] TextToVideoZeroSDXLPipeline - all - __call__ ## TextToVideoPipelineOutput [[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput
diffusers/docs/source/en/api/pipelines/text_to_video_zero.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/text_to_video_zero.md", "repo_id": "diffusers", "token_count": 4608 }
111
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ModularPipelineBlocks [`~modular_pipelines.ModularPipelineBlocks`] is the basic block for building a [`ModularPipeline`]. It defines what components, inputs/outputs, and computation a block should perform for a specific step in a pipeline. A [`~modular_pipelines.ModularPipelineBlocks`] connects with other blocks, using [state](./modular_diffusers_states), to enable the modular construction of workflows. A [`~modular_pipelines.ModularPipelineBlocks`] on it's own can't be executed. It is a blueprint for what a step should do in a pipeline. To actually run and execute a pipeline, the [`~modular_pipelines.ModularPipelineBlocks`] needs to be converted into a [`ModularPipeline`]. This guide will show you how to create a [`~modular_pipelines.ModularPipelineBlocks`]. ## Inputs and outputs > [!TIP] > Refer to the [States](./modular_diffusers_states) guide if you aren't familiar with how state works in Modular Diffusers. A [`~modular_pipelines.ModularPipelineBlocks`] requires `inputs`, and `intermediate_outputs`. - `inputs` are values provided by a user and retrieved from the [`~modular_pipelines.PipelineState`]. This is useful because some workflows resize an image, but the original image is still required. The [`~modular_pipelines.PipelineState`] maintains the original image. Use `InputParam` to define `inputs`. ```py from diffusers.modular_pipelines import InputParam user_inputs = [ InputParam(name="image", type_hint="PIL.Image", description="raw input image to process") ] ``` - `intermediate_inputs` are values typically created from a previous block but it can also be directly provided if no preceding block generates them. Unlike `inputs`, `intermediate_inputs` can be modified. Use `InputParam` to define `intermediate_inputs`. ```py user_intermediate_inputs = [ InputParam(name="processed_image", type_hint="torch.Tensor", description="image that has been preprocessed and normalized"), ] ``` - `intermediate_outputs` are new values created by a block and added to the [`~modular_pipelines.PipelineState`]. The `intermediate_outputs` are available as `intermediate_inputs` for subsequent blocks or available as the final output from running the pipeline. Use `OutputParam` to define `intermediate_outputs`. ```py from diffusers.modular_pipelines import OutputParam user_intermediate_outputs = [ OutputParam(name="image_latents", description="latents representing the image") ] ``` The intermediate inputs and outputs share data to connect blocks. They are accessible at any point, allowing you to track the workflow's progress. ## Computation logic The computation a block performs is defined in the `__call__` method and it follows a specific structure. 1. Retrieve the [`~modular_pipelines.BlockState`] to get a local view of the `inputs` and `intermediate_inputs`. 2. Implement the computation logic on the `inputs` and `intermediate_inputs`. 3. Update [`~modular_pipelines.PipelineState`] to push changes from the local [`~modular_pipelines.BlockState`] back to the global [`~modular_pipelines.PipelineState`]. 4. Return the components and state which becomes available to the next block. ```py def __call__(self, components, state): # Get a local view of the state variables this block needs block_state = self.get_block_state(state) # Your computation logic here # block_state contains all your inputs and intermediate_inputs # Access them like: block_state.image, block_state.processed_image # Update the pipeline state with your updated block_states self.set_block_state(state, block_state) return components, state ``` ### Components and configs The components and pipeline-level configs a block needs are specified in [`ComponentSpec`] and [`~modular_pipelines.ConfigSpec`]. - [`ComponentSpec`] contains the expected components used by a block. You need the `name` of the component and ideally a `type_hint` that specifies exactly what the component is. - [`~modular_pipelines.ConfigSpec`] contains pipeline-level settings that control behavior across all blocks. ```py from diffusers import ComponentSpec, ConfigSpec expected_components = [ ComponentSpec(name="unet", type_hint=UNet2DConditionModel), ComponentSpec(name="scheduler", type_hint=EulerDiscreteScheduler) ] expected_config = [ ConfigSpec("force_zeros_for_empty_prompt", True) ] ``` When the blocks are converted into a pipeline, the components become available to the block as the first argument in `__call__`. ```py def __call__(self, components, state): # Access components using dot notation unet = components.unet vae = components.vae scheduler = components.scheduler ```
diffusers/docs/source/en/modular_diffusers/pipeline_block.md/0
{ "file_path": "diffusers/docs/source/en/modular_diffusers/pipeline_block.md", "repo_id": "diffusers", "token_count": 1578 }
112
# T-GATE [T-GATE](https://github.com/HaozheLiu-ST/T-GATE/tree/main) accelerates inference for [Stable Diffusion](../api/pipelines/stable_diffusion/overview), [PixArt](../api/pipelines/pixart), and [Latency Consistency Model](../api/pipelines/latent_consistency_models.md) pipelines by skipping the cross-attention calculation once it converges. This method doesn't require any additional training and it can speed up inference from 10-50%. T-GATE is also compatible with other optimization methods like [DeepCache](./deepcache). Before you begin, make sure you install T-GATE. ```bash pip install tgate pip install -U torch diffusers transformers accelerate DeepCache ``` To use T-GATE with a pipeline, you need to use its corresponding loader. | Pipeline | T-GATE Loader | |---|---| | PixArt | TgatePixArtLoader | | Stable Diffusion XL | TgateSDXLLoader | | Stable Diffusion XL + DeepCache | TgateSDXLDeepCacheLoader | | Stable Diffusion | TgateSDLoader | | Stable Diffusion + DeepCache | TgateSDDeepCacheLoader | Next, create a `TgateLoader` with a pipeline, the gate step (the time step to stop calculating the cross attention), and the number of inference steps. Then call the `tgate` method on the pipeline with a prompt, gate step, and the number of inference steps. Let's see how to enable this for several different pipelines. <hfoptions id="pipelines"> <hfoption id="PixArt"> Accelerate `PixArtAlphaPipeline` with T-GATE: ```py import torch from diffusers import PixArtAlphaPipeline from tgate import TgatePixArtLoader pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16) gate_step = 8 inference_step = 25 pipe = TgatePixArtLoader( pipe, gate_step=gate_step, num_inference_steps=inference_step, ).to("cuda") image = pipe.tgate( "An alpaca made of colorful building blocks, cyberpunk.", gate_step=gate_step, num_inference_steps=inference_step, ).images[0] ``` </hfoption> <hfoption id="Stable Diffusion XL"> Accelerate `StableDiffusionXLPipeline` with T-GATE: ```py import torch from diffusers import StableDiffusionXLPipeline from diffusers import DPMSolverMultistepScheduler from tgate import TgateSDXLLoader pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) gate_step = 10 inference_step = 25 pipe = TgateSDXLLoader( pipe, gate_step=gate_step, num_inference_steps=inference_step, ).to("cuda") image = pipe.tgate( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", gate_step=gate_step, num_inference_steps=inference_step ).images[0] ``` </hfoption> <hfoption id="StableDiffusionXL with DeepCache"> Accelerate `StableDiffusionXLPipeline` with [DeepCache](https://github.com/horseee/DeepCache) and T-GATE: ```py import torch from diffusers import StableDiffusionXLPipeline from diffusers import DPMSolverMultistepScheduler from tgate import TgateSDXLDeepCacheLoader pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) gate_step = 10 inference_step = 25 pipe = TgateSDXLDeepCacheLoader( pipe, cache_interval=3, cache_branch_id=0, ).to("cuda") image = pipe.tgate( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", gate_step=gate_step, num_inference_steps=inference_step ).images[0] ``` </hfoption> <hfoption id="Latent Consistency Model"> Accelerate `latent-consistency/lcm-sdxl` with T-GATE: ```py import torch from diffusers import StableDiffusionXLPipeline from diffusers import UNet2DConditionModel, LCMScheduler from diffusers import DPMSolverMultistepScheduler from tgate import TgateSDXLLoader unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-sdxl", torch_dtype=torch.float16, variant="fp16", ) pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16, variant="fp16", ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) gate_step = 1 inference_step = 4 pipe = TgateSDXLLoader( pipe, gate_step=gate_step, num_inference_steps=inference_step, lcm=True ).to("cuda") image = pipe.tgate( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", gate_step=gate_step, num_inference_steps=inference_step ).images[0] ``` </hfoption> </hfoptions> T-GATE also supports [`StableDiffusionPipeline`] and [PixArt-alpha/PixArt-LCM-XL-2-1024-MS](https://hf.co/PixArt-alpha/PixArt-LCM-XL-2-1024-MS). ## Benchmarks | Model | MACs | Param | Latency | Zero-shot 10K-FID on MS-COCO | |-----------------------|----------|-----------|---------|---------------------------| | SD-1.5 | 16.938T | 859.520M | 7.032s | 23.927 | | SD-1.5 w/ T-GATE | 9.875T | 815.557M | 4.313s | 20.789 | | SD-2.1 | 38.041T | 865.785M | 16.121s | 22.609 | | SD-2.1 w/ T-GATE | 22.208T | 815.433 M | 9.878s | 19.940 | | SD-XL | 149.438T | 2.570B | 53.187s | 24.628 | | SD-XL w/ T-GATE | 84.438T | 2.024B | 27.932s | 22.738 | | Pixart-Alpha | 107.031T | 611.350M | 61.502s | 38.669 | | Pixart-Alpha w/ T-GATE | 65.318T | 462.585M | 37.867s | 35.825 | | DeepCache (SD-XL) | 57.888T | - | 19.931s | 23.755 | | DeepCache w/ T-GATE | 43.868T | - | 14.666s | 23.999 | | LCM (SD-XL) | 11.955T | 2.570B | 3.805s | 25.044 | | LCM w/ T-GATE | 11.171T | 2.024B | 3.533s | 25.028 | | LCM (Pixart-Alpha) | 8.563T | 611.350M | 4.733s | 36.086 | | LCM w/ T-GATE | 7.623T | 462.585M | 4.543s | 37.048 | The latency is tested on an NVIDIA 1080TI, MACs and Params are calculated with [calflops](https://github.com/MrYxJ/calculate-flops.pytorch), and the FID is calculated with [PytorchFID](https://github.com/mseitzer/pytorch-fid).
diffusers/docs/source/en/optimization/tgate.md/0
{ "file_path": "diffusers/docs/source/en/optimization/tgate.md", "repo_id": "diffusers", "token_count": 2963 }
113
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Reinforcement learning training with DDPO You can fine-tune Stable Diffusion on a reward function via reinforcement learning with the 🤗 TRL library and 🤗 Diffusers. This is done with the Denoising Diffusion Policy Optimization (DDPO) algorithm introduced by Black et al. in [Training Diffusion Models with Reinforcement Learning](https://huggingface.co/papers/2305.13301), which is implemented in 🤗 TRL with the [`~trl.DDPOTrainer`]. For more information, check out the [`~trl.DDPOTrainer`] API reference and the [Finetune Stable Diffusion Models with DDPO via TRL](https://huggingface.co/blog/trl-ddpo) blog post.
diffusers/docs/source/en/training/ddpo.md/0
{ "file_path": "diffusers/docs/source/en/training/ddpo.md", "repo_id": "diffusers", "token_count": 322 }
114
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LoRA [LoRA (Low-Rank Adaptation)](https://huggingface.co/papers/2106.09685) is a method for quickly training a model for a new task. It works by freezing the original model weights and adding a small number of *new* trainable parameters. This means it is significantly faster and cheaper to adapt an existing model to new tasks, such as generating images in a new style. LoRA checkpoints are typically only a couple hundred MBs in size, so they're very lightweight and easy to store. Load these smaller set of weights into an existing base model with [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] and specify the file name. <hfoptions id="usage"> <hfoption id="text-to-image"> ```py import torch from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors", adapter_name="cereal" ) pipeline("bears, pizza bites").images[0] ``` </hfoption> <hfoption id="text-to-video"> ```py import torch from diffusers import LTXConditionPipeline from diffusers.utils import export_to_video, load_image pipeline = LTXConditionPipeline.from_pretrained( "Lightricks/LTX-Video-0.9.5", torch_dtype=torch.bfloat16 ) pipeline.load_lora_weights( "Lightricks/LTX-Video-Cakeify-LoRA", weight_name="ltxv_095_cakeify_lora.safetensors", adapter_name="cakeify" ) pipeline.set_adapters("cakeify") # use "CAKEIFY" to trigger the LoRA prompt = "CAKEIFY a person using a knife to cut a cake shaped like a Pikachu plushie" image = load_image("https://huggingface.co/Lightricks/LTX-Video-Cakeify-LoRA/resolve/main/assets/images/pikachu.png") video = pipeline( prompt=prompt, image=image, width=576, height=576, num_frames=161, decode_timestep=0.03, decode_noise_scale=0.025, num_inference_steps=50, ).frames[0] export_to_video(video, "output.mp4", fps=26) ``` </hfoption> </hfoptions> The [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] method is the preferred way to load LoRA weights into the UNet and text encoder because it can handle cases where: - the LoRA weights don't have separate UNet and text encoder identifiers - the LoRA weights have separate UNet and text encoder identifiers The [`~loaders.PeftAdapterMixin.load_lora_adapter`] method is used to directly load a LoRA adapter at the *model-level*, as long as the model is a Diffusers model that is a subclass of [`PeftAdapterMixin`]. It builds and prepares the necessary model configuration for the adapter. This method also loads the LoRA adapter into the UNet. For example, if you're only loading a LoRA into the UNet, [`~loaders.PeftAdapterMixin.load_lora_adapter`] ignores the text encoder keys. Use the `prefix` parameter to filter and load the appropriate state dicts, `"unet"` to load. ```py import torch from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.unet.load_lora_adapter( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" prefix="unet" ) # use cnmt in the prompt to trigger the LoRA pipeline("A cute cnmt eating a slice of pizza, stunning color scheme, masterpiece, illustration").images[0] ``` ## torch.compile [torch.compile](../optimization/fp16#torchcompile) speeds up inference by compiling the PyTorch model to use optimized kernels. Before compiling, the LoRA weights need to be fused into the base model and unloaded first. ```py import torch from diffusers import DiffusionPipeline # load base model and LoRA pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea" ) # activate LoRA and set adapter weight pipeline.set_adapters("ikea", adapter_weights=0.7) # fuse LoRAs and unload weights pipeline.fuse_lora(adapter_names=["ikea"], lora_scale=1.0) pipeline.unload_lora_weights() ``` Typically, the UNet is compiled because its the most compute intensive component of the pipeline. ```py pipeline.unet.to(memory_format=torch.channels_last) pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) pipeline("A bowl of ramen shaped like a cute kawaii bear").images[0] ``` Refer to the [hotswapping](#hotswapping) section to learn how to avoid recompilation when working with compiled models and multiple LoRAs. ## Weight scale The `scale` parameter is used to control how much of a LoRA to apply. A value of `0` is equivalent to only using the base model weights and a value of `1` is equivalent to fully using the LoRA. <hfoptions id="weight-scale"> <hfoption id="simple use case"> For simple use cases, you can pass `cross_attention_kwargs={"scale": 1.0}` to the pipeline. ```py import torch from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors", adapter_name="cereal" ) pipeline("bears, pizza bites", cross_attention_kwargs={"scale": 1.0}).images[0] ``` </hfoption> <hfoption id="finer control"> > [!WARNING] > The [`~loaders.PeftAdapterMixin.set_adapters`] method only scales attention weights. If a LoRA has ResNets or down and upsamplers, these components keep a scale value of `1.0`. For finer control over each individual component of the UNet or text encoder, pass a dictionary instead. In the example below, the `"down"` block in the UNet is scaled by 0.9 and you can further specify in the `"up"` block the scales of the transformers in `"block_0"` and `"block_1"`. If a block like `"mid"` isn't specified, the default value `1.0` is used. ```py import torch from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors", adapter_name="cereal" ) scales = { "text_encoder": 0.5, "text_encoder_2": 0.5, "unet": { "down": 0.9, "up": { "block_0": 0.6, "block_1": [0.4, 0.8, 1.0], } } } pipeline.set_adapters("cereal", scales) pipeline("bears, pizza bites").images[0] ``` </hfoption> </hfoptions> ### Scale scheduling Dynamically adjusting the LoRA scale during sampling gives you better control over the overall composition and layout because certain steps may benefit more from an increased or reduced scale. The [character LoRA](https://huggingface.co/alvarobartt/ghibli-characters-flux-lora) in the example below starts with a higher scale that gradually decays over the first 20 steps to establish the character generation. In the later steps, only a scale of 0.2 is applied to avoid adding too much of the LoRA features to other parts of the image the LoRA wasn't trained on. ```py import torch from diffusers import FluxPipeline pipeline = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ).to("cuda") pipelne.load_lora_weights("alvarobartt/ghibli-characters-flux-lora", "lora") num_inference_steps = 30 lora_steps = 20 lora_scales = torch.linspace(1.5, 0.7, lora_steps).tolist() lora_scales += [0.2] * (num_inference_steps - lora_steps + 1) pipeline.set_adapters("lora", lora_scales[0]) def callback(pipeline: FluxPipeline, step: int, timestep: torch.LongTensor, callback_kwargs: dict): pipeline.set_adapters("lora", lora_scales[step + 1]) return callback_kwargs prompt = """ Ghibli style The Grinch, a mischievous green creature with a sly grin, peeking out from behind a snow-covered tree while plotting his antics, in a quaint snowy village decorated for the holidays, warm light glowing from cozy homes, with playful snowflakes dancing in the air """ pipeline( prompt=prompt, guidance_scale=3.0, num_inference_steps=num_inference_steps, generator=torch.Generator().manual_seed(42), callback_on_step_end=callback, ).images[0] ``` ## Hotswapping Hotswapping LoRAs is an efficient way to work with multiple LoRAs while avoiding accumulating memory from multiple calls to [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] and in some cases, recompilation, if a model is compiled. This workflow requires a loaded LoRA because the new LoRA weights are swapped in place for the existing loaded LoRA. ```py import torch from diffusers import DiffusionPipeline # load base model and LoRAs pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea" ) ``` > [!WARNING] > Hotswapping is unsupported for LoRAs that target the text encoder. Set `hotswap=True` in [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] to swap the second LoRA. Use the `adapter_name` parameter to indicate which LoRA to swap (`default_0` is the default name). ```py pipeline.load_lora_weights( "lordjia/by-feng-zikai", hotswap=True, adapter_name="ikea" ) ``` ### Compiled models For compiled models, use [`~loaders.lora_base.LoraBaseMixin.enable_lora_hotswap`] to avoid recompilation when hotswapping LoRAs. This method should be called *before* loading the first LoRA and `torch.compile` should be called *after* loading the first LoRA. > [!TIP] > The [`~loaders.lora_base.LoraBaseMixin.enable_lora_hotswap`] method isn't always necessary if the second LoRA targets the identical LoRA ranks and scales as the first LoRA. Within [`~loaders.lora_base.LoraBaseMixin.enable_lora_hotswap`], the `target_rank` parameter is important for setting the rank for all LoRA adapters. Setting it to `max_rank` sets it to the highest value. For LoRAs with different ranks, you set it to a higher rank value. The default rank value is 128. ```py import torch from diffusers import DiffusionPipeline # load base model and LoRAs pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") # 1. enable_lora_hotswap pipeline.enable_lora_hotswap(target_rank=max_rank) pipeline.load_lora_weights( "ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea" ) # 2. torch.compile pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) # 3. hotswap pipeline.load_lora_weights( "lordjia/by-feng-zikai", hotswap=True, adapter_name="ikea" ) ``` > [!TIP] > Move your code inside the `with torch._dynamo.config.patch(error_on_recompile=True)` context manager to detect if a model was recompiled. If a model is recompiled despite following all the steps above, please open an [issue](https://github.com/huggingface/diffusers/issues) with a reproducible example. If you expect to varied resolutions during inference with this feature, then make sure set `dynamic=True` during compilation. Refer to [this document](../optimization/fp16#dynamic-shape-compilation) for more details. There are still scenarios where recompulation is unavoidable, such as when the hotswapped LoRA targets more layers than the initial adapter. Try to load the LoRA that targets the most layers *first*. For more details about this limitation, refer to the PEFT [hotswapping](https://huggingface.co/docs/peft/main/en/package_reference/hotswap#peft.utils.hotswap.hotswap_adapter) docs. <details> <summary>Technical details of hotswapping</summary> The [`~loaders.lora_base.LoraBaseMixin.enable_lora_hotswap`] method converts the LoRA scaling factor from floats to torch.tensors and pads the shape of the weights to the largest required shape to avoid reassigning the whole attribute when the data in the weights are replaced. This is why the `max_rank` argument is important. The results are unchanged even when the values are padded with zeros. Computation may be slower though depending on the padding size. Since no new LoRA attributes are added, each subsequent LoRA is only allowed to target the same layers, or subset of layers, the first LoRA targets. Choosing the LoRA loading order is important because if the LoRAs target disjoint layers, you may end up creating a dummy LoRA that targets the union of all target layers. For more implementation details, take a look at the [`hotswap.py`](https://github.com/huggingface/peft/blob/92d65cafa51c829484ad3d95cf71d09de57ff066/src/peft/utils/hotswap.py) file. </details> ## Merge The weights from each LoRA can be merged together to produce a blend of multiple existing styles. There are several methods for merging LoRAs, each of which differ in *how* the weights are merged (may affect generation quality). ### set_adapters The [`~loaders.PeftAdapterMixin.set_adapters`] method merges LoRAs by concatenating their weighted matrices. Pass the LoRA names to [`~loaders.PeftAdapterMixin.set_adapters`] and use the `adapter_weights` parameter to control the scaling of each LoRA. For example, if `adapter_weights=[0.5, 0.5]`, the output is an average of both LoRAs. > [!TIP] > The `"scale"` parameter determines how much of the merged LoRA to apply. See the [Weight scale](#weight-scale) section for more details. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea" ) pipeline.load_lora_weights( "lordjia/by-feng-zikai", weight_name="fengzikai_v1.0_XL.safetensors", adapter_name="feng" ) pipeline.set_adapters(["ikea", "feng"], adapter_weights=[0.7, 0.8]) # use by Feng Zikai to activate the lordjia/by-feng-zikai LoRA pipeline("A bowl of ramen shaped like a cute kawaii bear, by Feng Zikai", cross_attention_kwargs={"scale": 1.0}).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lora_merge_set_adapters.png"/> </div> ### add_weighted_adapter > [!TIP] > This is an experimental method and you can refer to PEFTs [Model merging](https://huggingface.co/docs/peft/developer_guides/model_merging) for more details. Take a look at this [issue](https://github.com/huggingface/diffusers/issues/6892) if you're interested in the motivation and design behind this integration. The [`~peft.LoraModel.add_weighted_adapter`] method enables more efficient merging methods like [TIES](https://huggingface.co/papers/2306.01708) or [DARE](https://huggingface.co/papers/2311.03099). These merging methods remove redundant and potentially interfering parameters from merged models. Keep in mind the LoRA ranks need to have identical ranks to be merged. Make sure the latest stable version of Diffusers and PEFT is installed. ```bash pip install -U -q diffusers peft ``` Load a UNET that corresponds to the LoRA UNet. ```py import copy import torch from diffusers import AutoModel, DiffusionPipeline from peft import get_peft_model, LoraConfig, PeftModel unet = AutoModel.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16", subfolder="unet", ).to("cuda") ``` Load a pipeline, pass the UNet to it, and load a LoRA. ```py pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16, unet=unet ).to("cuda") pipeline.load_lora_weights( "ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea" ) ``` Create a [`~peft.PeftModel`] from the LoRA checkpoint by combining the first UNet you loaded and the LoRA UNet from the pipeline. ```py sdxl_unet = copy.deepcopy(unet) ikea_peft_model = get_peft_model( sdxl_unet, pipeline.unet.peft_config["ikea"], adapter_name="ikea" ) original_state_dict = {f"base_model.model.{k}": v for k, v in pipeline.unet.state_dict().items()} ikea_peft_model.load_state_dict(original_state_dict, strict=True) ``` > [!TIP] > You can save and reuse the `ikea_peft_model` by pushing it to the Hub as shown below. > ```py > ikea_peft_model.push_to_hub("ikea_peft_model", token=TOKEN) > ``` Repeat this process and create a [`~peft.PeftModel`] for the second LoRA. ```py pipeline.delete_adapters("ikea") sdxl_unet.delete_adapters("ikea") pipeline.load_lora_weights( "lordjia/by-feng-zikai", weight_name="fengzikai_v1.0_XL.safetensors", adapter_name="feng" ) pipeline.set_adapters(adapter_names="feng") feng_peft_model = get_peft_model( sdxl_unet, pipeline.unet.peft_config["feng"], adapter_name="feng" ) original_state_dict = {f"base_model.model.{k}": v for k, v in pipe.unet.state_dict().items()} feng_peft_model.load_state_dict(original_state_dict, strict=True) ``` Load a base UNet model and load the adapters. ```py base_unet = AutoModel.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16", subfolder="unet", ).to("cuda") model = PeftModel.from_pretrained( base_unet, "stevhliu/ikea_peft_model", use_safetensors=True, subfolder="ikea", adapter_name="ikea" ) model.load_adapter( "stevhliu/feng_peft_model", use_safetensors=True, subfolder="feng", adapter_name="feng" ) ``` Merge the LoRAs with [`~peft.LoraModel.add_weighted_adapter`] and specify how you want to merge them with `combination_type`. The example below uses the `"dare_linear"` method (refer to this [blog post](https://huggingface.co/blog/peft_merging) to learn more about these merging methods), which randomly prunes some weights and then performs a weighted sum of the tensors based on the set weightage of each LoRA in `weights`. Activate the merged LoRAs with [`~loaders.PeftAdapterMixin.set_adapters`]. ```py model.add_weighted_adapter( adapters=["ikea", "feng"], combination_type="dare_linear", weights=[1.0, 1.0], adapter_name="ikea-feng" ) model.set_adapters("ikea-feng") pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=model, variant="fp16", torch_dtype=torch.float16, ).to("cuda") pipeline("A bowl of ramen shaped like a cute kawaii bear, by Feng Zikai").images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ikea-feng-dare-linear.png"/> </div> ### fuse_lora The [`~loaders.lora_base.LoraBaseMixin.fuse_lora`] method fuses the LoRA weights directly with the original UNet and text encoder weights of the underlying model. This reduces the overhead of loading the underlying model for each LoRA because it only loads the model once, which lowers memory usage and increases inference speed. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea" ) pipeline.load_lora_weights( "lordjia/by-feng-zikai", weight_name="fengzikai_v1.0_XL.safetensors", adapter_name="feng" ) pipeline.set_adapters(["ikea", "feng"], adapter_weights=[0.7, 0.8]) ``` Call [`~loaders.lora_base.LoraBaseMixin.fuse_lora`] to fuse them. The `lora_scale` parameter controls how much to scale the output by with the LoRA weights. It is important to make this adjustment now because passing `scale` to `cross_attention_kwargs` won't work in the pipeline. ```py pipeline.fuse_lora(adapter_names=["ikea", "feng"], lora_scale=1.0) ``` Unload the LoRA weights since they're already fused with the underlying model. Save the fused pipeline with either [`~DiffusionPipeline.save_pretrained`] to save it locally or [`~PushToHubMixin.push_to_hub`] to save it to the Hub. <hfoptions id="save"> <hfoption id="save locally"> ```py pipeline.unload_lora_weights() pipeline.save_pretrained("path/to/fused-pipeline") ``` </hfoption> <hfoption id="save to Hub"> ```py pipeline.unload_lora_weights() pipeline.push_to_hub("fused-ikea-feng") ``` </hfoption> </hfoptions> The fused pipeline can now be quickly loaded for inference without requiring each LoRA to be separately loaded. ```py pipeline = DiffusionPipeline.from_pretrained( "username/fused-ikea-feng", torch_dtype=torch.float16, ).to("cuda") pipeline("A bowl of ramen shaped like a cute kawaii bear, by Feng Zikai").images[0] ``` Use [`~loaders.LoraLoaderMixin.unfuse_lora`] to restore the underlying models weights, for example, if you want to use a different `lora_scale` value. You can only unfuse if there is a single LoRA fused. For example, it won't work with the pipeline from above because there are multiple fused LoRAs. In these cases, you'll need to reload the entire model. ```py pipeline.unfuse_lora() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/fuse_lora.png"/> </div> ## Manage Diffusers provides several methods to help you manage working with LoRAs. These methods can be especially useful if you're working with multiple LoRAs. ### set_adapters [`~loaders.PeftAdapterMixin.set_adapters`] also activates the current LoRA to use if there are multiple active LoRAs. This allows you to switch between different LoRAs by specifying their name. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea" ) pipeline.load_lora_weights( "lordjia/by-feng-zikai", weight_name="fengzikai_v1.0_XL.safetensors", adapter_name="feng" ) # activates the feng LoRA instead of the ikea LoRA pipeline.set_adapters("feng") ``` ### save_lora_adapter Save an adapter with [`~loaders.PeftAdapterMixin.save_lora_adapter`]. ```py import torch from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.unet.load_lora_adapter( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" prefix="unet" ) pipeline.save_lora_adapter("path/to/save", adapter_name="cinematic") ``` ### unload_lora_weights The [`~loaders.lora_base.LoraBaseMixin.unload_lora_weights`] method unloads any LoRA weights in the pipeline to restore the underlying model weights. ```py pipeline.unload_lora_weights() ``` ### disable_lora The [`~loaders.PeftAdapterMixin.disable_lora`] method disables all LoRAs (but they're still kept on the pipeline) and restores the pipeline to the underlying model weights. ```py pipeline.disable_lora() ``` ### get_active_adapters The [`~loaders.lora_base.LoraBaseMixin.get_active_adapters`] method returns a list of active LoRAs attached to a pipeline. ```py pipeline.get_active_adapters() ["cereal", "ikea"] ``` ### get_list_adapters The [`~loaders.lora_base.LoraBaseMixin.get_list_adapters`] method returns the active LoRAs for each component in the pipeline. ```py pipeline.get_list_adapters() {"unet": ["cereal", "ikea"], "text_encoder_2": ["cereal"]} ``` ### delete_adapters The [`~loaders.PeftAdapterMixin.delete_adapters`] method completely removes a LoRA and its layers from a model. ```py pipeline.delete_adapters("ikea") ``` ## Resources Browse the [LoRA Studio](https://lorastudio.co/models) for different LoRAs to use or you can upload your favorite LoRAs from Civitai to the Hub with the Space below. <iframe src="https://multimodalart-civitai-to-hf.hf.space" frameborder="0" width="850" height="450" ></iframe> You can find additional LoRAs in the [FLUX LoRA the Explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer) and [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer) Spaces. Check out the [Fast LoRA inference for Flux with Diffusers and PEFT](https://huggingface.co/blog/lora-fast) blog post to learn how to optimize LoRA inference with methods like FlashAttention-3 and fp8 quantization.
diffusers/docs/source/en/tutorials/using_peft_for_inference.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/using_peft_for_inference.md", "repo_id": "diffusers", "token_count": 9243 }
115
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Inpainting [[open-in-colab]] Inpainting replaces or edits specific areas of an image. This makes it a useful tool for image restoration like removing defects and artifacts, or even replacing an image area with something entirely new. Inpainting relies on a mask to determine which regions of an image to fill in; the area to inpaint is represented by white pixels and the area to keep is represented by black pixels. The white pixels are filled in by the prompt. With 🤗 Diffusers, here is how you can do inpainting: 1. Load an inpainting checkpoint with the [`AutoPipelineForInpainting`] class. This'll automatically detect the appropriate pipeline class to load based on the checkpoint: ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() ``` <Tip> You'll notice throughout the guide, we use [`~DiffusionPipeline.enable_model_cpu_offload`] and [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`], to save memory and increase inference speed. If you're using PyTorch 2.0, it's not necessary to call [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`] on your pipeline because it'll already be using PyTorch 2.0's native [scaled-dot product attention](../optimization/fp16#scaled-dot-product-attention). </Tip> 2. Load the base and mask images: ```py init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") ``` 3. Create a prompt to inpaint the image with and pass it to the pipeline with the base and mask images: ```py prompt = "a black cat with glowing eyes, cute, adorable, disney, pixar, highly detailed, 8k" negative_prompt = "bad anatomy, deformed, ugly, disfigured" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">base image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">mask image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-cat.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption> </div> </div> ## Create a mask image Throughout this guide, the mask image is provided in all of the code examples for convenience. You can inpaint on your own images, but you'll need to create a mask image for it. Use the Space below to easily create a mask image. Upload a base image to inpaint on and use the sketch tool to draw a mask. Once you're done, click **Run** to generate and download the mask image. <iframe src="https://stevhliu-inpaint-mask-maker.hf.space" frameborder="0" width="850" height="450" ></iframe> ### Mask blur The [`~VaeImageProcessor.blur`] method provides an option for how to blend the original image and inpaint area. The amount of blur is determined by the `blur_factor` parameter. Increasing the `blur_factor` increases the amount of blur applied to the mask edges, softening the transition between the original image and inpaint area. A low or zero `blur_factor` preserves the sharper edges of the mask. To use this, create a blurred mask with the image processor. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image from PIL import Image pipeline = AutoPipelineForInpainting.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to('cuda') mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore_mask.png") blurred_mask = pipeline.mask_processor.blur(mask, blur_factor=33) blurred_mask ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore_mask.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">mask with no blur</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/mask_blurred.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">mask with blur applied</figcaption> </div> </div> ## Popular models [Stable Diffusion Inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting), [Stable Diffusion XL (SDXL) Inpainting](https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1), and [Kandinsky 2.2 Inpainting](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder-inpaint) are among the most popular models for inpainting. SDXL typically produces higher resolution images than Stable Diffusion v1.5, and Kandinsky 2.2 is also capable of generating high-quality images. ### Stable Diffusion Inpainting Stable Diffusion Inpainting is a latent diffusion model finetuned on 512x512 images on inpainting. It is a good starting point because it is relatively fast and generates good quality images. To use this model for inpainting, you'll need to pass a prompt, base and mask image to the pipeline: ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### Stable Diffusion XL (SDXL) Inpainting SDXL is a larger and more powerful version of Stable Diffusion v1.5. This model can follow a two-stage model process (though each model can also be used alone); the base model generates an image, and a refiner model takes that image and further enhances its details and quality. Take a look at the [SDXL](sdxl) guide for a more comprehensive guide on how to use SDXL and configure it's parameters. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### Kandinsky 2.2 Inpainting The Kandinsky model family is similar to SDXL because it uses two models as well; the image prior model creates image embeddings, and the diffusion model generates images from them. You can load the image prior and diffusion model separately, but the easiest way to use Kandinsky 2.2 is to load it into the [`AutoPipelineForInpainting`] class which uses the [`KandinskyV22InpaintCombinedPipeline`] under the hood. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">base image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-sdv1.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion Inpainting</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-sdxl.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion XL Inpainting</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-kandinsky.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Kandinsky 2.2 Inpainting</figcaption> </div> </div> ## Non-inpaint specific checkpoints So far, this guide has used inpaint specific checkpoints such as [stable-diffusion-v1-5/stable-diffusion-inpainting](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting). But you can also use regular checkpoints like [stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5). Let's compare the results of the two checkpoints. The image on the left is generated from a regular checkpoint, and the image on the right is from an inpaint checkpoint. You'll immediately notice the image on the left is not as clean, and you can still see the outline of the area the model is supposed to inpaint. The image on the right is much cleaner and the inpainted area appears more natural. <hfoptions id="regular-specific"> <hfoption id="stable-diffusion-v1-5/stable-diffusion-v1-5"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> <hfoption id="runwayml/stable-diffusion-inpainting"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> </hfoptions> <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-inpaint-specific.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">stable-diffusion-v1-5/stable-diffusion-v1-5</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-specific.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">runwayml/stable-diffusion-inpainting</figcaption> </div> </div> However, for more basic tasks like erasing an object from an image (like the rocks in the road for example), a regular checkpoint yields pretty good results. There isn't as noticeable of difference between the regular and inpaint checkpoint. <hfoptions id="inpaint"> <hfoption id="stable-diffusion-v1-5/stable-diffusion-v1-5"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/road-mask.png") image = pipeline(prompt="road", image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> <hfoption id="runwayml/stable-diffusion-inpaint"> ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/road-mask.png") image = pipeline(prompt="road", image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` </hfoption> </hfoptions> <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/regular-inpaint-basic.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">stable-diffusion-v1-5/stable-diffusion-v1-5</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/specific-inpaint-basic.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">runwayml/stable-diffusion-inpainting</figcaption> </div> </div> The trade-off of using a non-inpaint specific checkpoint is the overall image quality may be lower, but it generally tends to preserve the mask area (that is why you can see the mask outline). The inpaint specific checkpoints are intentionally trained to generate higher quality inpainted images, and that includes creating a more natural transition between the masked and unmasked areas. As a result, these checkpoints are more likely to change your unmasked area. If preserving the unmasked area is important for your task, you can use the [`VaeImageProcessor.apply_overlay`] method to force the unmasked area of an image to remain the same at the expense of some more unnatural transitions between the masked and unmasked areas. ```py import PIL import numpy as np import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid device = "cuda" pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline = pipeline.to(device) img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).resize((512, 512)) mask_image = load_image(mask_url).resize((512, 512)) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" repainted_image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0] repainted_image.save("repainted_image.png") unmasked_unchanged_image = pipeline.image_processor.apply_overlay(mask_image, init_image, repainted_image) unmasked_unchanged_image.save("force_unmasked_unchanged.png") make_image_grid([init_image, mask_image, repainted_image, unmasked_unchanged_image], rows=2, cols=2) ``` ## Configure pipeline parameters Image features - like quality and "creativity" - are dependent on pipeline parameters. Knowing what these parameters do is important for getting the results you want. Let's take a look at the most important parameters and see how changing them affects the output. ### Strength `strength` is a measure of how much noise is added to the base image, which influences how similar the output is to the base image. * 📈 a high `strength` value means more noise is added to an image and the denoising process takes longer, but you'll get higher quality images that are more different from the base image * 📉 a low `strength` value means less noise is added to an image and the denoising process is faster, but the image quality may not be as great and the generated image resembles the base image more ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.6).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-strength-0.6.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.6</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-strength-0.8.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.8</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-strength-1.0.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 1.0</figcaption> </div> </div> ### Guidance scale `guidance_scale` affects how aligned the text prompt and generated image are. * 📈 a high `guidance_scale` value means the prompt and generated image are closely aligned, so the output is a stricter interpretation of the prompt * 📉 a low `guidance_scale` value means the prompt and generated image are more loosely aligned, so the output may be more varied from the prompt You can use `strength` and `guidance_scale` together for more control over how expressive the model is. For example, a combination high `strength` and `guidance_scale` values gives the model the most creative freedom. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, guidance_scale=2.5).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-guidance-2.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 2.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-guidance-7.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 7.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-guidance-12.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 12.5</figcaption> </div> </div> ### Negative prompt A negative prompt assumes the opposite role of a prompt; it guides the model away from generating certain things in an image. This is useful for quickly improving image quality and preventing the model from generating things you don't want. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" negative_prompt = "bad architecture, unstable, poor details, blurry" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex justify-center"> <figure> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-negative.png" /> <figcaption class="text-center">negative_prompt = "bad architecture, unstable, poor details, blurry"</figcaption> </figure> </div> ### Padding mask crop A method for increasing the inpainting image quality is to use the [`padding_mask_crop`](https://huggingface.co/docs/diffusers/v0.25.0/en/api/pipelines/stable_diffusion/inpaint#diffusers.StableDiffusionInpaintPipeline.__call__.padding_mask_crop) parameter. When enabled, this option crops the masked area with some user-specified padding and it'll also crop the same area from the original image. Both the image and mask are upscaled to a higher resolution for inpainting, and then overlaid on the original image. This is a quick and easy way to improve image quality without using a separate pipeline like [`StableDiffusionUpscalePipeline`]. Add the `padding_mask_crop` parameter to the pipeline call and set it to the desired padding value. ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image from PIL import Image generator = torch.Generator(device='cuda').manual_seed(0) pipeline = AutoPipelineForInpainting.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to('cuda') base = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/seashore_mask.png") image = pipeline("boat", image=base, mask_image=mask, strength=0.75, generator=generator, padding_mask_crop=32).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/baseline_inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">default inpaint image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/padding_mask_crop_inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">inpaint image with `padding_mask_crop` enabled</figcaption> </div> </div> ## Chained inpainting pipelines [`AutoPipelineForInpainting`] can be chained with other 🤗 Diffusers pipelines to edit their outputs. This is often useful for improving the output quality from your other diffusion pipelines, and if you're using multiple pipelines, it can be more memory-efficient to chain them together to keep the outputs in latent space and reuse the same pipeline components. ### Text-to-image-to-inpaint Chaining a text-to-image and inpainting pipeline allows you to inpaint the generated image, and you don't have to provide a base image to begin with. This makes it convenient to edit your favorite text-to-image outputs without having to generate an entirely new image. Start with the text-to-image pipeline to create a castle: ```py import torch from diffusers import AutoPipelineForText2Image, AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() text2image = pipeline("concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k").images[0] ``` Load the mask image of the output from above: ```py mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_text-chain-mask.png") ``` And let's inpaint the masked area with a waterfall: ```py pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "digital painting of a fantasy waterfall, cloudy" image = pipeline(prompt=prompt, image=text2image, mask_image=mask_image).images[0] make_image_grid([text2image, mask_image, image], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-text-chain.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">text-to-image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-text-chain-out.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">inpaint</figcaption> </div> </div> ### Inpaint-to-image-to-image You can also chain an inpainting pipeline before another pipeline like image-to-image or an upscaler to improve the quality. Begin by inpainting an image: ```py import torch from diffusers import AutoPipelineForInpainting, AutoPipelineForImage2Image from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image_inpainting = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0] # resize image to 1024x1024 for SDXL image_inpainting = image_inpainting.resize((1024, 1024)) ``` Now let's pass the image to another inpainting pipeline with SDXL's refiner model to enhance the image details and quality: ```py pipeline = AutoPipelineForInpainting.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt=prompt, image=image_inpainting, mask_image=mask_image, output_type="latent").images[0] ``` <Tip> It is important to specify `output_type="latent"` in the pipeline to keep all the outputs in latent space to avoid an unnecessary decode-encode step. This only works if the chained pipelines are using the same VAE. For example, in the [Text-to-image-to-inpaint](#text-to-image-to-inpaint) section, Kandinsky 2.2 uses a different VAE class than the Stable Diffusion model so it won't work. But if you use Stable Diffusion v1.5 for both pipelines, then you can keep everything in latent space because they both use [`AutoencoderKL`]. </Tip> Finally, you can pass this image to an image-to-image pipeline to put the finishing touches on it. It is more efficient to use the [`~AutoPipelineForImage2Image.from_pipe`] method to reuse the existing pipeline components, and avoid unnecessarily loading all the pipeline components into memory again. ```py pipeline = AutoPipelineForImage2Image.from_pipe(pipeline) # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt=prompt, image=image).images[0] make_image_grid([init_image, mask_image, image_inpainting, image], rows=2, cols=2) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-to-image-chain.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">inpaint</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-to-image-final.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">image-to-image</figcaption> </div> </div> Image-to-image and inpainting are actually very similar tasks. Image-to-image generates a new image that resembles the existing provided image. Inpainting does the same thing, but it only transforms the image area defined by the mask and the rest of the image is unchanged. You can think of inpainting as a more precise tool for making specific changes and image-to-image has a broader scope for making more sweeping changes. ## Control image generation Getting an image to look exactly the way you want is challenging because the denoising process is random. While you can control certain aspects of generation by configuring parameters like `negative_prompt`, there are better and more efficient methods for controlling image generation. ### Prompt weighting Prompt weighting provides a quantifiable way to scale the representation of concepts in a prompt. You can use it to increase or decrease the magnitude of the text embedding vector for each concept in the prompt, which subsequently determines how much of each concept is generated. The [Compel](https://github.com/damian0815/compel) library offers an intuitive syntax for scaling the prompt weights and generating the embeddings. Learn how to create the embeddings in the [Prompt weighting](../using-diffusers/weighted_prompts) guide. Once you've generated the embeddings, pass them to the `prompt_embeds` (and `negative_prompt_embeds` if you're using a negative prompt) parameter in the [`AutoPipelineForInpainting`]. The embeddings replace the `prompt` parameter: ```py import torch from diffusers import AutoPipelineForInpainting from diffusers.utils import make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt_embeds=prompt_embeds, # generated from Compel negative_prompt_embeds=negative_prompt_embeds, # generated from Compel image=init_image, mask_image=mask_image ).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### ControlNet ControlNet models are used with other diffusion models like Stable Diffusion, and they provide an even more flexible and accurate way to control how an image is generated. A ControlNet accepts an additional conditioning image input that guides the diffusion model to preserve the features in it. For example, let's condition an image with a ControlNet pretrained on inpaint images: ```py import torch import numpy as np from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline from diffusers.utils import load_image, make_image_grid # load ControlNet controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16, variant="fp16") # pass ControlNet to the pipeline pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16" ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") # prepare control image def make_inpaint_condition(init_image, mask_image): init_image = np.array(init_image.convert("RGB")).astype(np.float32) / 255.0 mask_image = np.array(mask_image.convert("L")).astype(np.float32) / 255.0 assert init_image.shape[0:1] == mask_image.shape[0:1], "image and image_mask must have the same image size" init_image[mask_image > 0.5] = -1.0 # set as masked pixel init_image = np.expand_dims(init_image, 0).transpose(0, 3, 1, 2) init_image = torch.from_numpy(init_image) return init_image control_image = make_inpaint_condition(init_image, mask_image) ``` Now generate an image from the base, mask and control images. You'll notice features of the base image are strongly preserved in the generated image. ```py prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, control_image=control_image).images[0] make_image_grid([init_image, mask_image, PIL.Image.fromarray(np.uint8(control_image[0][0])).convert('RGB'), image], rows=2, cols=2) ``` You can take this a step further and chain it with an image-to-image pipeline to apply a new [style](https://huggingface.co/nitrosocke/elden-ring-diffusion): ```py from diffusers import AutoPipelineForImage2Image pipeline = AutoPipelineForImage2Image.from_pretrained( "nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "elden ring style castle" # include the token "elden ring style" in the prompt negative_prompt = "bad architecture, deformed, disfigured, poor details" image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image).images[0] make_image_grid([init_image, mask_image, image, image_elden_ring], rows=2, cols=2) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-controlnet.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">ControlNet inpaint</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint-img2img.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">image-to-image</figcaption> </div> </div> ## Optimize It can be difficult and slow to run diffusion models if you're resource constrained, but it doesn't have to be with a few optimization tricks. One of the biggest (and easiest) optimizations you can enable is switching to memory-efficient attention. If you're using PyTorch 2.0, [scaled-dot product attention](../optimization/fp16#scaled-dot-product-attention) is automatically enabled and you don't need to do anything else. For non-PyTorch 2.0 users, you can install and use [xFormers](../optimization/xformers)'s implementation of memory-efficient attention. Both options reduce memory usage and accelerate inference. You can also offload the model to the CPU to save even more memory: ```diff + pipeline.enable_xformers_memory_efficient_attention() + pipeline.enable_model_cpu_offload() ``` To speed-up your inference code even more, use [`torch_compile`](../optimization/fp16#torchcompile). You should wrap `torch.compile` around the most intensive component in the pipeline which is typically the UNet: ```py pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` Learn more in the [Reduce memory usage](../optimization/memory) and [Accelerate inference](../optimization/fp16) guides.
diffusers/docs/source/en/using-diffusers/inpaint.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/inpaint.md", "repo_id": "diffusers", "token_count": 14185 }
116
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Video Diffusion [[open-in-colab]] [Stable Video Diffusion (SVD)](https://huggingface.co/papers/2311.15127) is a powerful image-to-video generation model that can generate 2-4 second high resolution (576x1024) videos conditioned on an input image. This guide will show you how to use SVD to generate short videos from images. Before you begin, make sure you have the following libraries installed: ```py # Colab에서 필요한 라이브러리를 설치하기 위해 주석을 제외하세요 !pip install -q -U diffusers transformers accelerate ``` The are two variants of this model, [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid) and [SVD-XT](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt). The SVD checkpoint is trained to generate 14 frames and the SVD-XT checkpoint is further finetuned to generate 25 frames. You'll use the SVD-XT checkpoint for this guide. ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Load the conditioning image image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"source image of a rocket"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"generated video from source image"</figcaption> </div> </div> ## torch.compile You can gain a 20-25% speedup at the expense of slightly increased memory by [compiling](../optimization/fp16#torchcompile) the UNet. ```diff - pipe.enable_model_cpu_offload() + pipe.to("cuda") + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` ## Reduce memory usage Video generation is very memory intensive because you're essentially generating `num_frames` all at once, similar to text-to-image generation with a high batch size. To reduce the memory requirement, there are multiple options that trade-off inference speed for lower memory requirement: - enable model offloading: each component of the pipeline is offloaded to the CPU once it's not needed anymore. - enable feed-forward chunking: the feed-forward layer runs in a loop instead of running a single feed-forward with a huge batch size. - reduce `decode_chunk_size`: the VAE decodes frames in chunks instead of decoding them all together. Setting `decode_chunk_size=1` decodes one frame at a time and uses the least amount of memory (we recommend adjusting this value based on your GPU memory) but the video might have some flickering. ```diff - pipe.enable_model_cpu_offload() - frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] + pipe.enable_model_cpu_offload() + pipe.unet.enable_forward_chunking() + frames = pipe(image, decode_chunk_size=2, generator=generator, num_frames=25).frames[0] ``` Using all these tricks together should lower the memory requirement to less than 8GB VRAM. ## Micro-conditioning Stable Diffusion Video also accepts micro-conditioning, in addition to the conditioning image, which allows more control over the generated video: - `fps`: the frames per second of the generated video. - `motion_bucket_id`: the motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video. - `noise_aug_strength`: the amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video. For example, to generate a video with more motion, use the `motion_bucket_id` and `noise_aug_strength` micro-conditioning parameters: ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Load the conditioning image image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator, motion_bucket_id=180, noise_aug_strength=0.1).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket_with_conditions.gif)
diffusers/docs/source/en/using-diffusers/svd.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/svd.md", "repo_id": "diffusers", "token_count": 1829 }
117
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Diffusers에 기여하는 방법 🧨 [[how-to-contribute-to-diffusers-]] 오픈 소스 커뮤니티에서의 기여를 환영합니다! 누구나 참여할 수 있으며, 코드뿐만 아니라 질문에 답변하거나 문서를 개선하는 등 모든 유형의 참여가 가치 있고 감사히 여겨집니다. 질문에 답변하고 다른 사람들을 도와주며 소통하고 문서를 개선하는 것은 모두 커뮤니티에게 큰 도움이 됩니다. 따라서 관심이 있다면 두려워하지 말고 참여해보세요! 누구나 우리의 공개 Discord 채널에서 👋 인사하며 시작할 수 있도록 장려합니다. 우리는 diffusion 모델의 최신 동향을 논의하고 질문을 하며 개인 프로젝트를 자랑하고 기여에 대해 서로 도와주거나 그냥 어울리기 위해 모이는 곳입니다☕. <a href="https://Discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a> 어떤 방식으로든 기여하려는 경우, 우리는 개방적이고 환영하며 친근한 커뮤니티의 일부가 되기 위해 노력하고 있습니다. 우리의 [행동 강령](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md)을 읽고 상호 작용 중에 이를 존중하도록 주의해주시기 바랍니다. 또한 프로젝트를 안내하는 [윤리 지침](https://huggingface.co/docs/diffusers/conceptual/ethical_guidelines)에 익숙해지고 동일한 투명성과 책임성의 원칙을 준수해주시기를 부탁드립니다. 우리는 커뮤니티로부터의 피드백을 매우 중요하게 생각하므로, 라이브러리를 개선하는 데 도움이 될 가치 있는 피드백이 있다고 생각되면 망설이지 말고 의견을 제시해주세요 - 모든 메시지, 댓글, 이슈, Pull Request(PR)는 읽히고 고려됩니다. ## 개요 [[overview]] 이슈에 있는 질문에 답변하는 것에서부터 코어 라이브러리에 새로운 diffusion 모델을 추가하는 것까지 다양한 방법으로 기여를 할 수 있습니다. 이어지는 부분에서 우리는 다양한 방법의 기여에 대한 개요를 난이도에 따라 오름차순으로 정리하였습니다. 모든 기여는 커뮤니티에게 가치가 있습니다. 1. [Diffusers 토론 포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers)이나 [Discord](https://discord.gg/G7tWnz98XR)에서 질문에 대답하거나 질문을 할 수 있습니다. 2. [GitHub Issues 탭](https://github.com/huggingface/diffusers/issues/new/choose)에서 새로운 이슈를 열 수 있습니다. 3. [GitHub Issues 탭](https://github.com/huggingface/diffusers/issues)에서 이슈에 대답할 수 있습니다. 4. "Good first issue" 라벨이 지정된 간단한 이슈를 수정할 수 있습니다. [여기](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)를 참조하세요. 5. [문서](https://github.com/huggingface/diffusers/tree/main/docs/source)에 기여할 수 있습니다. 6. [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples)에 기여할 수 있습니다. 7. [예제](https://github.com/huggingface/diffusers/tree/main/examples)에 기여할 수 있습니다. 8. "Good second issue" 라벨이 지정된 어려운 이슈를 수정할 수 있습니다. [여기](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22)를 참조하세요. 9. 새로운 파이프라인, 모델 또는 스케줄러를 추가할 수 있습니다. ["새로운 파이프라인/모델"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) 및 ["새로운 스케줄러"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) 이슈를 참조하세요. 이 기여에 대해서는 [디자인 철학](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md)을 확인해주세요. 앞서 말한 대로, **모든 기여는 커뮤니티에게 가치가 있습니다**. 이어지는 부분에서 각 기여에 대해 조금 더 자세히 설명하겠습니다. 4부터 9까지의 모든 기여에는 Pull Request을 열어야 합니다. [Pull Request 열기](#how-to-open-a-pr)에서 자세히 설명되어 있습니다. ### 1. Diffusers 토론 포럼이나 Diffusers Discord에서 질문하고 답변하기 [[1-asking-and-answering-questions-on-the-diffusers-discussion-forum-or-on-the-diffusers-discord]] Diffusers 라이브러리와 관련된 모든 질문이나 의견은 [토론 포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)이나 [Discord](https://discord.gg/G7tWnz98XR)에서 할 수 있습니다. 이러한 질문과 의견에는 다음과 같은 내용이 포함됩니다(하지만 이에 국한되지는 않습니다): - 지식을 공유하기 위해서 훈련 또는 추론 실험에 대한 결과 보고 - 개인 프로젝트 소개 - 비공식 훈련 예제에 대한 질문 - 프로젝트 제안 - 일반적인 피드백 - 논문 요약 - Diffusers 라이브러리를 기반으로 하는 개인 프로젝트에 대한 도움 요청 - 일반적인 질문 - Diffusion 모델에 대한 윤리적 질문 - ... 포럼이나 Discord에서 질문을 하면 커뮤니티가 지식을 공개적으로 공유하도록 장려되며, 향후 동일한 질문을 가진 초보자에게도 도움이 될 수 있습니다. 따라서 궁금한 질문은 언제든지 하시기 바랍니다. 또한, 이러한 질문에 답변하는 것은 커뮤니티에게 매우 큰 도움이 됩니다. 왜냐하면 이렇게 하면 모두가 학습할 수 있는 공개적인 지식을 문서화하기 때문입니다. **주의**하십시오. 질문이나 답변에 투자하는 노력이 많을수록 공개적으로 문서화된 지식의 품질이 높아집니다. 마찬가지로, 잘 정의되고 잘 답변된 질문은 모두에게 접근 가능한 고품질 지식 데이터베이스를 만들어줍니다. 반면에 잘못된 질문이나 답변은 공개 지식 데이터베이스의 전반적인 품질을 낮출 수 있습니다. 간단히 말해서, 고품질의 질문이나 답변은 *명확하고 간결하며 관련성이 있으며 이해하기 쉽고 접근 가능하며 잘 형식화되어 있어야* 합니다. 자세한 내용은 [좋은 이슈 작성 방법](#how-to-write-a-good-issue) 섹션을 참조하십시오. **채널에 대한 참고사항**: [*포럼*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)은 구글과 같은 검색 엔진에서 더 잘 색인화됩니다. 게시물은 인기에 따라 순위가 매겨지며, 시간순으로 정렬되지 않습니다. 따라서 이전에 게시한 질문과 답변을 쉽게 찾을 수 있습니다. 또한, 포럼에 게시된 질문과 답변은 쉽게 링크할 수 있습니다. 반면 *Discord*는 채팅 형식으로 되어 있어 빠른 대화를 유도합니다. 질문에 대한 답변을 빠르게 받을 수는 있겠지만, 시간이 지나면 질문이 더 이상 보이지 않습니다. 또한, Discord에서 이전에 게시된 정보를 찾는 것은 훨씬 어렵습니다. 따라서 포럼을 사용하여 고품질의 질문과 답변을 하여 커뮤니티를 위한 오래 지속되는 지식을 만들기를 권장합니다. Discord에서의 토론이 매우 흥미로운 답변과 결론을 이끌어내는 경우, 해당 정보를 포럼에 게시하여 향후 독자들에게 더 쉽게 액세스할 수 있도록 권장합니다. ### 2. GitHub 이슈 탭에서 새로운 이슈 열기 [[2-opening-new-issues-on-the-github-issues-tab]] 🧨 Diffusers 라이브러리는 사용자들이 마주치는 문제를 알려주는 덕분에 견고하고 신뢰할 수 있습니다. 따라서 이슈를 보고해주셔서 감사합니다. 기억해주세요, GitHub 이슈는 Diffusers 라이브러리와 직접적으로 관련된 기술적인 질문, 버그 리포트, 기능 요청 또는 라이브러리 디자인에 대한 피드백에 사용됩니다. 간단히 말해서, Diffusers 라이브러리의 **코드와 관련되지 않은** 모든 것(문서 포함)은 GitHub가 아닌 [포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)이나 [Discord](https://discord.gg/G7tWnz98XR)에서 질문해야 합니다. **새로운 이슈를 열 때 다음 가이드라인을 고려해주세요**: - 이미 같은 이슈가 있는지 검색했는지 확인해주세요(GitHub의 이슈 탭에서 검색 기능을 사용하세요). - 다른(관련된) 이슈에 새로운 이슈를 보고하지 말아주세요. 다른 이슈와 관련이 높다면, 새로운 이슈를 열고 관련 이슈에 링크를 걸어주세요. - 이슈를 영어로 작성해주세요. 영어에 익숙하지 않다면, [DeepL](https://www.deepl.com/translator)과 같은 뛰어난 무료 온라인 번역 서비스를 사용하여 모국어에서 영어로 번역해주세요. - 이슈가 최신 Diffusers 버전으로 업데이트하면 해결될 수 있는지 확인해주세요. 이슈를 게시하기 전에 `python -c "import diffusers; print(diffusers.__version__)"` 명령을 실행하여 현재 사용 중인 Diffusers 버전이 최신 버전과 일치하거나 더 높은지 확인해주세요. - 새로운 이슈를 열 때 투자하는 노력이 많을수록 답변의 품질이 높아지고 Diffusers 이슈 전체의 품질도 향상됩니다. #### 2.1 재현 가능한 최소한의 버그 리포트 [[21-reproducible-minimal-bug-reports]] 버그 리포트는 항상 재현 가능한 코드 조각을 포함하고 가능한 한 최소한이어야 하며 간결해야 합니다. 자세히 말하면: - 버그를 가능한 한 좁혀야 합니다. **전체 코드 파일을 그냥 던지지 마세요**. - 코드의 서식을 지정해야 합니다. - Diffusers가 의존하는 외부 라이브러리를 제외한 다른 외부 라이브러리는 포함하지 마십시오. - **항상** 사용자 환경에 대한 모든 필요한 정보를 제공하세요. 이를 위해 쉘에서 `diffusers-cli env`를 실행하고 표시된 정보를 이슈에 복사하여 붙여넣을 수 있습니다. - 이슈를 설명해야 합니다. 독자가 문제가 무엇인지, 왜 문제가 되는지 모른다면 이슈를 해결할 수 없습니다. - **항상** 독자가 가능한 한 적은 노력으로 문제를 재현할 수 있어야 합니다. 코드 조각이 라이브러리가 없거나 정의되지 않은 변수 때문에 실행되지 않는 경우 독자가 도움을 줄 수 없습니다. 재현 가능한 코드 조각이 가능한 한 최소화되고 간단한 Python 셸에 복사하여 붙여넣을 수 있도록 해야 합니다. - 문제를 재현하기 위해 모델과/또는 데이터셋이 필요한 경우 독자가 해당 모델이나 데이터셋에 접근할 수 있도록 해야 합니다. 모델이나 데이터셋을 [Hub](https://huggingface.co)에 업로드하여 쉽게 다운로드할 수 있도록 할 수 있습니다. 문제 재현을 가능한 한 쉽게하기 위해 모델과 데이터셋을 가능한 한 작게 유지하려고 노력하세요. 자세한 내용은 [좋은 이슈 작성 방법](#how-to-write-a-good-issue) 섹션을 참조하세요. 버그 리포트를 열려면 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&projects=&template=bug-report.yml)를 클릭하세요. #### 2.2. 기능 요청 [[22-feature-requests]] 세계적인 기능 요청은 다음 사항을 다룹니다: 1. 먼저 동기부여: * 라이브러리와 관련된 문제/불만이 있나요? 그렇다면 왜 그런지 설명해주세요. 문제를 보여주는 코드 조각을 제공하는 것이 가장 좋습니다. * 프로젝트에 필요한 기능인가요? 우리는 그에 대해 듣고 싶습니다! * 커뮤니티에 도움이 될 수 있는 것을 작업했고 그것에 대해 생각하고 있는가요? 멋지네요! 어떤 문제를 해결했는지 알려주세요. 2. 기능을 *상세히 설명하는* 문단을 작성해주세요; 3. 향후 사용을 보여주는 **코드 조각**을 제공해주세요; 4. 논문과 관련된 내용인 경우 링크를 첨부해주세요; 5. 도움이 될 수 있다고 생각되는 추가 정보(그림, 스크린샷 등)를 첨부해주세요. 기능 요청은 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=)에서 열 수 있습니다. #### 2.3 피드백 [[23-feedback]] 라이브러리 디자인과 그것이 왜 좋은지 또는 나쁜지에 대한 이유에 대한 피드백은 핵심 메인테이너가 사용자 친화적인 라이브러리를 만드는 데 엄청난 도움이 됩니다. 현재 디자인 철학을 이해하려면 [여기](https://huggingface.co/docs/diffusers/conceptual/philosophy)를 참조해 주세요. 특정 디자인 선택이 현재 디자인 철학과 맞지 않는다고 생각되면, 그 이유와 어떻게 변경되어야 하는지 설명해 주세요. 반대로 특정 디자인 선택이 디자인 철학을 너무 따르기 때문에 사용 사례를 제한한다고 생각되면, 그 이유와 어떻게 변경되어야 하는지 설명해 주세요. 특정 디자인 선택이 매우 유용하다고 생각되면, 향후 디자인 결정에 큰 도움이 되므로 이에 대한 의견을 남겨 주세요. 피드백에 관한 이슈는 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=)에서 열 수 있습니다. #### 2.4 기술적인 질문 [[24-technical-questions]] 기술적인 질문은 주로 라이브러리의 특정 코드가 왜 특정 방식으로 작성되었는지 또는 코드의 특정 부분이 무엇을 하는지에 대한 질문입니다. 질문하신 코드 부분에 대한 링크를 제공하고 해당 코드 부분이 이해하기 어려운 이유에 대한 자세한 설명을 해주시기 바랍니다. 기술적인 질문에 관한 이슈를 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&template=bug-report.yml)에서 열 수 있습니다. #### 2.5 새로운 모델, 스케줄러 또는 파이프라인 추가 제안 [[25-proposal-to-add-a-new-model-scheduler-or-pipeline]] 만약 diffusion 모델 커뮤니티에서 Diffusers 라이브러리에 추가하고 싶은 새로운 모델, 파이프라인 또는 스케줄러가 있다면, 다음 정보를 제공해주세요: * Diffusion 파이프라인, 모델 또는 스케줄러에 대한 간단한 설명과 논문 또는 공개된 버전의 링크 * 해당 모델의 오픈 소스 구현에 대한 링크 * 모델 가중치가 있는 경우, 가중치의 링크 직접 모델에 기여하고 싶다면, 가장 잘 안내해드릴 수 있습니다. 또한, 가능하다면 구성 요소(모델, 스케줄러, 파이프라인 등)의 원저자를 GitHub 핸들로 태그하는 것을 잊지 마세요. 모델/파이프라인/스케줄러에 대한 요청을 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=New+model%2Fpipeline%2Fscheduler&template=new-model-addition.yml)에서 열 수 있습니다. ### 3. GitHub 이슈 탭에서 문제에 대한 답변하기 [[3-answering-issues-on-the-github-issues-tab]] GitHub에서 이슈에 대한 답변을 하기 위해서는 Diffusers에 대한 기술적인 지식이 필요할 수 있지만, 정확한 답변이 아니더라도 모두가 시도해기를 권장합니다. 이슈에 대한 고품질 답변을 제공하기 위한 몇 가지 팁: - 가능한 한 간결하고 최소한으로 유지합니다. - 주제에 집중합니다. 이슈에 대한 답변은 해당 이슈에 관련된 내용에만 집중해야 합니다. - 자신의 주장을 증명하거나 장려하는 코드, 논문 또는 기타 출처는 링크를 제공하세요. - 코드로 답변합니다. 간단한 코드 조각이 이슈에 대한 답변이거나 이슈를 해결하는 방법을 보여준다면, 완전히 재현 가능한 코드 조각을 제공해주세요. 또한, 많은 이슈들은 단순히 주제와 무관하거나 다른 이슈의 중복이거나 관련이 없는 경우가 많습니다. 이러한 이슈들에 대한 답변을 제공하고, 이슈 작성자에게 더 정확한 정보를 제공하거나, 중복된 이슈에 대한 링크를 제공하거나, [포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) 이나 [Discord](https://discord.gg/G7tWnz98XR)로 리디렉션하는 것은 메인테이너에게 큰 도움이 됩니다. 이슈가 올바른 버그 보고서이고 소스 코드에서 수정이 필요하다고 확인한 경우, 다음 섹션을 살펴보세요. 다음 모든 기여에 대해서는 PR을 열여야 합니다. [Pull Request 열기](#how-to-open-a-pr) 섹션에서 자세히 설명되어 있습니다. ### 4. "Good first issue" 고치기 [[4-fixing-a-good-first-issue]] *Good first issues*는 [Good first issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) 라벨로 표시됩니다. 일반적으로, 이슈는 이미 잠재적인 해결책이 어떻게 보이는지 설명하고 있어서 수정하기 쉽습니다. 만약 이슈가 아직 닫히지 않았고 이 문제를 해결해보고 싶다면, "이 이슈를 해결해보고 싶습니다."라는 메시지를 남기면 됩니다. 일반적으로 세 가지 시나리오가 있습니다: - a.) 이슈 설명에 이미 수정 사항을 제안하는 경우, 해결책이 이해되고 합리적으로 보인다면, PR 또는 드래프트 PR을 열어서 수정할 수 있습니다. - b.) 이슈 설명에 수정 사항이 제안되어 있지 않은 경우, 제안한 수정 사항이 가능할지 물어볼 수 있고, Diffusers 팀의 누군가가 곧 답변해줄 것입니다. 만약 어떻게 수정할지 좋은 아이디어가 있다면, 직접 PR을 열어도 됩니다. - c.) 이미 이 문제를 해결하기 위해 열린 PR이 있지만, 이슈가 아직 닫히지 않았습니다. PR이 더 이상 진행되지 않았다면, 새로운 PR을 열고 이전 PR에 링크를 걸면 됩니다. PR은 종종 원래 기여자가 갑자기 시간을 내지 못해 더 이상 진행하지 못하는 경우에 더 이상 진행되지 않게 됩니다. 이는 오픈 소스에서 자주 발생하는 일이며 매우 정상적인 상황입니다. 이 경우, 커뮤니티는 새로 시도하고 기존 PR의 지식을 활용해주면 매우 기쁠 것입니다. 이미 PR이 있고 활성화되어 있다면, 제안을 해주거나 PR을 검토하거나 PR에 기여할 수 있는지 물어보는 등 작성자를 도와줄 수 있습니다. ### 5. 문서에 기여하기 [[5-contribute-to-the-documentation]] 좋은 라이브러리는 항상 좋은 문서를 갖고 있습니다! 공식 문서는 라이브러리를 처음 사용하는 사용자들에게 첫 번째 접점 중 하나이며, 따라서 문서에 기여하는 것은 매우 가치 있는 기여입니다. 라이브러리에 기여하는 방법은 다양합니다: - 맞춤법이나 문법 오류를 수정합니다. - 공식 문서가 이상하게 표시되거나 링크가 깨진 경우, 올바르게 수정하는 데 시간을 내주시면 매우 기쁠 것입니다. - 문서의 입력 또는 출력 텐서의 모양이나 차원을 수정합니다. - 이해하기 어렵거나 잘못된 문서를 명확하게 합니다. - 오래된 코드 예제를 업데이트합니다. - 문서를 다른 언어로 번역합니다. [공식 Diffusers 문서 페이지](https://huggingface.co/docs/diffusers/index)에 표시된 모든 내용은 공식 문서의 일부이며, 해당 [문서 소스](https://github.com/huggingface/diffusers/tree/main/docs/source)에서 수정할 수 있습니다. 문서에 대한 변경 사항을 로컬에서 확인하는 방법은 [이 페이지](https://github.com/huggingface/diffusers/tree/main/docs)를 참조해주세요. ### 6. 커뮤니티 파이프라인에 기여하기 [[6-contribute-a-community-pipeline]] > [!TIP] > 커뮤니티 파이프라인에 대해 자세히 알아보려면 [커뮤니티 파이프라인](../using-diffusers/custom_pipeline_overview#community-pipelines) 가이드를 읽어보세요. 커뮤니티 파이프라인이 왜 필요한지 궁금하다면 GitHub 이슈 [#841](https://github.com/huggingface/diffusers/issues/841)를 확인해보세요 (기본적으로, 우리는 diffusion 모델이 추론에 사용될 수 있는 모든 방법을 유지할 수 없지만 커뮤니티가 이를 구축하는 것을 방해하고 싶지 않습니다). 커뮤니티 파이프라인에 기여하는 것은 창의성과 작업을 커뮤니티와 공유하는 좋은 방법입니다. [`DiffusionPipeline`]을 기반으로 빌드하여 `custom_pipeline` 매개변수를 설정함으로써 누구나 로드하고 사용할 수 있도록 할 수 있습니다. 이 섹션에서는 UNet이 단일 순방향 패스만 수행하고 스케줄러를 한 번 호출하는 간단한 파이프라인 (단계별 파이프라인)을 만드는 방법을 안내합니다. 1. 커뮤니티 파이프라인을 위한 one_step_unet.py 파일을 생성하세요. 이 파일은 사용자에 의해 설치되는 패키지를 포함할 수 있지만, [`DiffusionPipeline`]에서 모델 가중치와 스케줄러 구성을 로드하기 위해 하나의 파이프라인 클래스만 있어야 합니다. `__init__` 함수에 UNet과 스케줄러를 추가하세요. 또한 [`~DiffusionPipeline.save_pretrained`]를 사용하여 파이프라인과 그 구성 요소를 저장할 수 있도록 `register_modules` 함수를 추가해야 합니다. ```py from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) ``` 1. forward 패스에서 (`__call__`로 정의하는 것을 추천합니다), 원하는 어떤 기능이든 추가할 수 있습니다. "one-step" 파이프라인의 경우, 무작위 이미지를 생성하고 `timestep=1`로 설정하여 UNet과 스케줄러를 한 번 호출합니다. ```py from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) def __call__(self): image = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), ) timestep = 1 model_output = self.unet(image, timestep).sample scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample return scheduler_output ``` 이제 UNet과 스케줄러를 전달하여 파이프라인을 실행하거나, 파이프라인 구조가 동일한 경우 사전 학습된 가중치를 로드할 수 있습니다. ```py from diffusers import DDPMScheduler, UNet2DModel scheduler = DDPMScheduler() unet = UNet2DModel() pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler) output = pipeline() # load pretrained weights pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) output = pipeline() ``` 파이프라인을 GitHub 커뮤니티 파이프라인 또는 Hub 커뮤니티 파이프라인으로 공유할 수 있습니다. <hfoptions id="pipeline type"> <hfoption id="GitHub pipeline"> GitHub 파이프라인을 공유하려면 Diffusers [저장소](https://github.com/huggingface/diffusers)에서 Pull Request를 열고 one_step_unet.py 파일을 [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) 하위 폴더에 추가하세요. </hfoption> <hfoption id="Hub pipeline"> Hub 파이프라인을 공유하려면, 허브에 모델 저장소를 생성하고 one_step_unet.py 파일을 업로드하세요. </hfoption> </hfoptions> ### 7. 훈련 예제에 기여하기 [[7-contribute-to-training-examples]] Diffusers 예제는 [examples](https://github.com/huggingface/diffusers/tree/main/examples) 폴더에 있는 훈련 스크립트의 모음입니다. 두 가지 유형의 훈련 예제를 지원합니다: - 공식 훈련 예제 - 연구용 훈련 예제 연구용 훈련 예제는 [examples/research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects)에 위치하며, 공식 훈련 예제는 `research_projects` 및 `community` 폴더를 제외한 [examples](https://github.com/huggingface/diffusers/tree/main/examples)의 모든 폴더를 포함합니다. 공식 훈련 예제는 Diffusers의 핵심 메인테이너가 유지 관리하며, 연구용 훈련 예제는 커뮤니티가 유지 관리합니다. 이는 공식 파이프라인 vs 커뮤니티 파이프라인에 대한 [6. 커뮤니티 파이프라인 기여하기](#6-contribute-a-community-pipeline)에서 제시한 이유와 동일합니다: 핵심 메인테이너가 diffusion 모델의 모든 가능한 훈련 방법을 유지 관리하는 것은 현실적으로 불가능합니다. Diffusers 핵심 메인테이너와 커뮤니티가 특정 훈련 패러다임을 너무 실험적이거나 충분히 대중적이지 않다고 판단한다면, 해당 훈련 코드는 `research_projects` 폴더에 넣고 작성자에 의해 관리되어야 합니다. 공식 훈련 및 연구 예제는 하나 이상의 훈련 스크립트, requirements.txt 파일 및 README.md 파일을 포함하는 디렉토리로 구성됩니다. 사용자가 훈련 예제를 사용하려면 리포지토리를 복제해야 합니다: ```bash git clone https://github.com/huggingface/diffusers ``` 그리고 훈련에 필요한 모든 추가적인 의존성도 설치해야 합니다: ```bash pip install -r /examples/<your-example-folder>/requirements.txt ``` 따라서 예제를 추가할 때, `requirements.txt` 파일은 훈련 예제에 필요한 모든 pip 종속성을 정의해야 합니다. 이렇게 설치된 모든 종속성을 사용하여 사용자가 예제의 훈련 스크립트를 실행할 수 있어야 합니다. 예를 들어, [DreamBooth `requirements.txt` 파일](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt)을 참조하세요. Diffusers 라이브러리의 훈련 예제는 다음 철학을 따라야 합니다: - 예제를 실행하는 데 필요한 모든 코드는 하나의 Python 파일에 있어야 합니다. - 사용자는 명령 줄에서 `python <your-example>.py --args`와 같이 예제를 실행할 수 있어야 합니다. - 예제는 간단하게 유지되어야 하며, Diffusers를 사용한 훈련 방법을 보여주는 **예시**로 사용되어야 합니다. 예제 스크립트의 목적은 최첨단 diffusion 모델을 만드는 것이 아니라, 너무 많은 사용자 정의 로직을 추가하지 않고 이미 알려진 훈련 방법을 재현하는 것입니다. 이 점의 부산물로서, 예제는 좋은 교육 자료로써의 역할을 하기 위해 노력합니다. 예제에 기여하기 위해서는, 이미 존재하는 예제인 [dreambooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)와 같은 예제를 참고하여 어떻게 보여야 하는지에 대한 아이디어를 얻는 것이 매우 권장됩니다. Diffusers와 긴밀하게 통합되어 있기 때문에, 기여자들이 [Accelerate 라이브러리](https://github.com/huggingface/accelerate)를 사용하는 것을 강력히 권장합니다. 예제 스크립트가 작동하는 경우, 반드시 예제를 정확하게 사용하는 방법을 설명하는 포괄적인 `README.md`를 추가해야 합니다. 이 README에는 다음이 포함되어야 합니다: - [여기](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#running-locally-with-pytorch)에 표시된 예제 스크립트를 실행하는 방법에 대한 예제 명령어. - [여기](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5)에 표시된 훈련 결과 (로그, 모델 등)에 대한 링크로 사용자가 기대할 수 있는 내용을 보여줍니다. - 비공식/연구용 훈련 예제를 추가하는 경우, **반드시** git 핸들을 포함하여 이 훈련 예제를 유지 관리할 것임을 명시하는 문장을 추가해야 합니다. [여기](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations)에 표시된 것과 같습니다. 만약 공식 훈련 예제에 기여하는 경우, [examples/test_examples.py](https://github.com/huggingface/diffusers/blob/main/examples/test_examples.py)에 테스트를 추가하는 것도 확인해주세요. 비공식 훈련 예제에는 이 작업이 필요하지 않습니다. ### 8. "Good second issue" 고치기 [[8-fixing-a-good-second-issue]] "Good second issue"는 [Good second issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22) 라벨로 표시됩니다. Good second issue는 [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)보다 해결하기가 더 복잡합니다. 이슈 설명은 일반적으로 이슈를 해결하는 방법에 대해 덜 구체적이며, 관심 있는 기여자는 라이브러리에 대한 꽤 깊은 이해가 필요합니다. Good second issue를 해결하고자 하는 경우, 해당 이슈를 해결하기 위해 PR을 열고 PR을 이슈에 링크하세요. 이미 해당 이슈에 대한 PR이 열려있지만 병합되지 않은 경우, 왜 병합되지 않았는지 이해하기 위해 살펴보고 개선된 PR을 열어보세요. Good second issue는 일반적으로 Good first issue 이슈보다 병합하기가 더 어려우므로, 핵심 메인테이너에게 도움을 요청하는 것이 좋습니다. PR이 거의 완료된 경우, 핵심 메인테이너는 PR에 참여하여 커밋하고 병합을 진행할 수 있습니다. ### 9. 파이프라인, 모델, 스케줄러 추가하기 [[9-adding-pipelines-models-schedulers]] 파이프라인, 모델, 스케줄러는 Diffusers 라이브러리에서 가장 중요한 부분입니다. 이들은 최첨단 diffusion 기술에 쉽게 접근하도록 하며, 따라서 커뮤니티가 강력한 생성형 AI 애플리케이션을 만들 수 있도록 합니다. 새로운 모델, 파이프라인 또는 스케줄러를 추가함으로써, 사용자 인터페이스에 새로운 강력한 사용 사례를 활성화할 수 있으며, 이는 전체 생성형 AI 생태계에 매우 중요한 가치를 제공할 수 있습니다. Diffusers에는 세 가지 구성 요소에 대한 여러 개발 요청이 있습니다. 특정 구성 요소를 아직 정확히 어떤 것을 추가하고 싶은지 모르는 경우, 다음 링크를 참조하세요: - [모델 또는 파이프라인](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) - [스케줄러](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) 세 가지 구성 요소를 추가하기 전에, [철학 가이드](philosophy)를 읽어보는 것을 강력히 권장합니다. 세 가지 구성 요소 중 어느 것을 추가하든, 디자인 철학과 관련된 API 일관성을 유지하기 위해 우리의 디자인 철학과 크게 다른 구성 요소는 병합할 수 없습니다. 디자인 선택에 근본적으로 동의하지 않는 경우, [피드백 이슈](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=)를 열어 해당 디자인 패턴/선택이 라이브러리 전체에서 변경되어야 하는지, 디자인 철학을 업데이트해야 하는지에 대해 논의할 수 있습니다. 라이브러리 전체의 일관성은 우리에게 매우 중요합니다. PR에 원본 코드베이스/논문 링크를 추가하고, 가능하면 PR에서 원래 작성자에게 직접 알림을 보내어 진행 상황을 따라갈 수 있도록 해주세요. PR에서 막힌 경우나 도움이 필요한 경우, 첫 번째 리뷰나 도움을 요청하는 메시지를 남기는 것을 주저하지 마세요. #### Copied from mechanism [[copied-from-mechanism]] `# Copied from mechanism` 은 파이프라인, 모델 또는 스케줄러 코드를 추가할 때 이해해야 할 독특하고 중요한 기능입니다. 이것은 Diffusers 코드베이스 전반에서 볼 수 있으며, 이를 사용하는 이유는 코드베이스를 이해하고 유지 관리하기 쉽게 만들기 위해서입니다. `# Copied from mechanism` 으로 표시된 코드는 복사한 코드와 정확히 동일하도록 강제됩니다. 이렇게 하면 `make fix-copies`를 실행할 때마다 여러 파일에 걸쳐 변경 사항을 쉽게 업데이트하고 전파할 수 있습니다. 예를 들어, 아래 코드 예제에서 [`~diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput`]은 원래 코드이며, `AltDiffusionPipelineOutput`은 `# Copied from mechanism`을 사용하여 복사합니다. 유일한 차이점은 클래스 접두사를 `Stable`에서 `Alt`로 변경한 것입니다. ```py # Copied from diffusers.pipelines.stable_diffusion.pipeline_output.StableDiffusionPipelineOutput with Stable->Alt class AltDiffusionPipelineOutput(BaseOutput): """ Output class for Alt Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`List[bool]`) List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ ``` 더 자세히 알고 싶다면 [~Don't~ Repeat Yourself*](https://huggingface.co/blog/transformers-design-philosophy#4-machine-learning-models-are-static) 블로그 포스트의 이 섹션을 읽어보세요. ## 좋은 이슈 작성 방법 [[how-to-write-a-good-issue]] **이슈를 잘 작성할수록 빠르게 해결될 가능성이 높아집니다.** 1. 이슈에 적절한 템플릿을 사용했는지 확인하세요. [새 이슈를 열 때](https://github.com/huggingface/diffusers/issues/new/choose) 올바른 템플릿을 선택해야 합니다. *버그 보고서*, *기능 요청*, *API 디자인에 대한 피드백*, *새로운 모델/파이프라인/스케줄러 추가*, *포럼*, 또는 빈 이슈 중에서 선택하세요. 이슈를 열 때 올바른 템플릿을 선택하는 것이 중요합니다. 2. **명확성**: 이슈에 적합한 제목을 지정하세요. 이슈 설명을 가능한 간단하게 작성하세요. 이슈를 이해하고 해결하는 데 걸리는 시간을 줄이기 위해 가능한 한 명확하게 작성하세요. 하나의 이슈에 대해 여러 문제를 포함하지 않도록 주의하세요. 여러 문제를 발견한 경우, 각각의 이슈를 개별적으로 열어주세요. 버그인 경우, 어떤 버그인지 가능한 한 정확하게 설명해야 합니다. "diffusers에서 오류"와 같이 간단히 작성하지 마세요. 3. **재현 가능성**: 재현 가능한 코드 조각이 없으면 해결할 수 없습니다. 버그를 발견한 경우, 유지 관리자는 그 버그를 재현할 수 있어야 합니다. 이슈에 재현 가능한 코드 조각을 포함해야 합니다. 코드 조각은 Python 인터프리터에 복사하여 붙여넣을 수 있는 형태여야 합니다. 코드 조각이 작동해야 합니다. 즉, 누락된 import나 이미지에 대한 링크가 없어야 합니다. 이슈에는 오류 메시지와 정확히 동일한 오류 메시지를 재현하기 위해 수정하지 않고 복사하여 붙여넣을 수 있는 코드 조각이 포함되어야 합니다. 이슈에 사용자의 로컬 모델 가중치나 로컬 데이터를 사용하는 경우, 독자가 액세스할 수 없는 경우 이슈를 해결할 수 없습니다. 데이터나 모델을 공유할 수 없는 경우, 더미 모델이나 더미 데이터를 만들어 사용해보세요. 4. **간결성**: 가능한 한 간결하게 유지하여 독자가 문제를 빠르게 이해할 수 있도록 도와주세요. 문제와 관련이 없는 코드나 정보는 모두 제거해주세요. 버그를 발견한 경우, 문제를 설명하는 가장 간단한 코드 예제를 만들어보세요. 버그를 발견한 후에는 작업 흐름 전체를 문제에 던지는 것이 아니라, 에러가 발생하는 훈련 코드의 어느 부분이 문제인지 먼저 이해하고 몇 줄로 재현해보세요. 전체 데이터셋 대신 더미 데이터를 사용해보세요. 5. 링크 추가하기. 특정한 이름, 메서드, 또는 모델을 참조하는 경우, 독자가 더 잘 이해할 수 있도록 링크를 제공해주세요. 특정 PR이나 이슈를 참조하는 경우, 해당 이슈에 링크를 걸어주세요. 독자가 무엇을 말하는지 알고 있다고 가정하지 마세요. 이슈에 링크를 추가할수록 좋습니다. 6. 포맷팅. 코드를 파이썬 코드 구문으로, 에러 메시지를 일반 코드 구문으로 형식화하여 이슈를 깔끔하게 작성하세요. 자세한 내용은 [GitHub 공식 포맷팅 문서](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax)를 참조하세요. 7. 여러분의 이슈를 단순히 해결해야 할 티켓으로 생각하지 말고, 잘 작성된 백과사전 항목으로 생각해보세요. 추가된 모든 이슈는 공개적으로 이용 가능한 지식에 대한 기여입니다. 잘 작성된 이슈를 추가함으로써 메인테이너가 여러분의 이슈를 더 쉽게 해결할 수 있게 할 뿐만 아니라, 전체 커뮤니티가 라이브러리의 특정 측면을 더 잘 이해할 수 있도록 도움을 주게 됩니다. ## 좋은 PR 작성 방법 [[how-to-write-a-good-pr]] 1. 카멜레온이 되세요. 기존의 디자인 패턴과 구문을 이해하고, 여러분이 추가하는 코드가 기존 코드베이스와 자연스럽게 어우러지도록 해야 합니다. 기존 디자인 패턴이나 사용자 인터페이스와 크게 다른 Pull Request들은 병합되지 않습니다. 2. 레이저처럼 집중하세요. Pull Request는 하나의 문제, 오직 하나의 문제만 해결해야 합니다. "이왕 추가하는 김에 다른 문제도 고치자"는 함정에 빠지지 않도록 주의하세요. 여러 개의 관련 없는 문제를 해결하는 한 번에 해결하는 Pull Request들은 검토하기가 훨씬 더 어렵습니다. 3. 도움이 되는 경우, 추가한 내용이 어떻게 사용되는지 예제 코드 조각을 추가해보세요. 4. Pull Request의 제목은 기여 내용을 요약해야 합니다. 5. Pull Request가 이슈를 해결하는 경우, Pull Request의 설명에 이슈 번호를 언급하여 연결되도록 해주세요 (이슈를 참조하는 사람들이 작업 중임을 알 수 있도록). 6. 진행 중인 작업을 나타내려면 제목에 `[WIP]`를 접두사로 붙여주세요. 이는 중복 작업을 피하고, 병합 준비가 된 PR과 구분할 수 있도록 도움이 됩니다. 7. [좋은 이슈를 작성하는 방법](#how-to-write-a-good-issue)에 설명된 대로 텍스트를 구성하고 형식을 지정해보세요. 8. 기존 테스트가 통과하는지 확인하세요 9. 높은 커버리지를 가진 테스트를 추가하세요. 품질 테스트가 없으면 병합할 수 없습니다. - 새로운 `@slow` 테스트를 추가하는 경우, 다음 명령을 사용하여 통과하는지 확인하세요. `RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`. CircleCI는 느린 테스트를 실행하지 않지만, GitHub Actions는 매일 실행합니다! 10. 모든 공개 메서드는 마크다운과 잘 작동하는 정보성 docstring을 가져야 합니다. 예시로 [`pipeline_latent_diffusion.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py)를 참조하세요. 11. 리포지토리가 빠르게 성장하고 있기 때문에, 리포지토리에 큰 부담을 주는 파일이 추가되지 않도록 주의해야 합니다. 이미지, 비디오 및 기타 텍스트가 아닌 파일을 포함합니다. 이러한 파일을 배치하기 위해 hf.co 호스팅 `dataset`인 [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) 또는 [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images)를 활용하는 것이 우선입니다. 외부 기여인 경우, 이미지를 PR에 추가하고 Hugging Face 구성원에게 이미지를 이 데이터셋으로 이동하도록 요청하세요. ## PR을 열기 위한 방법 [[how-to-open-a-pr]] 코드를 작성하기 전에, 이미 누군가가 같은 작업을 하고 있는지 확인하기 위해 기존의 PR이나 이슈를 검색하는 것이 좋습니다. 확실하지 않은 경우, 피드백을 받기 위해 이슈를 열어보는 것이 항상 좋은 아이디어입니다. 🧨 Diffusers에 기여하기 위해서는 기본적인 `git` 사용법을 알아야 합니다. `git`은 가장 쉬운 도구는 아니지만, 가장 훌륭한 매뉴얼을 가지고 있습니다. 셸에서 `git --help`을 입력하고 즐기세요. 책을 선호하는 경우, [Pro Git](https://git-scm.com/book/en/v2)은 매우 좋은 참고 자료입니다. 다음 단계를 따라 기여를 시작하세요 ([지원되는 Python 버전](https://github.com/huggingface/diffusers/blob/main/setup.py#L244)): 1. 저장소 페이지에서 'Fork' 버튼을 클릭하여 [저장소](https://github.com/huggingface/diffusers)를 포크합니다. 이렇게 하면 코드의 사본이 GitHub 사용자 계정에 생성됩니다. 2. 포크한 저장소를 로컬 디스크에 클론하고, 기본 저장소를 원격으로 추가하세요: ```bash $ git clone git@github.com:<your GitHub handle>/diffusers.git $ cd diffusers $ git remote add upstream https://github.com/huggingface/diffusers.git ``` 3. 개발 변경 사항을 보관할 새로운 브랜치를 생성하세요: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` `main` 브랜치 위에서 **절대** 작업하지 마세요. 4. 가상 환경에서 다음 명령을 실행하여 개발 환경을 설정하세요: ```bash $ pip install -e ".[dev]" ``` 만약 저장소를 이미 클론한 경우, 가장 최신 변경 사항을 가져오기 위해 `git pull`을 실행해야 할 수도 있습니다. 5. 기능을 브랜치에서 개발하세요. 기능을 작업하는 동안 테스트 스위트가 통과되는지 확인해야 합니다. 다음과 같이 변경 사항에 영향을 받는 테스트를 실행해야 합니다: ```bash $ pytest tests/<TEST_TO_RUN>.py ``` 테스트를 실행하기 전에 테스트를 위해 필요한 의존성들을 설치하였는지 확인하세요. 다음의 커맨드를 통해서 확인할 수 있습니다: ```bash $ pip install -e ".[test]" ``` 다음 명령어로 전체 테스트 묶음 실행할 수도 있지만, Diffusers가 많이 성장하였기 때문에 결과를 적당한 시간 내에 생성하기 위해서는 강력한 컴퓨터가 필요합니다. 다음은 해당 명령어입니다: ```bash $ make test ``` 🧨 Diffusers는 소스 코드를 일관되게 포맷팅하기 위해 `black`과 `isort`를 사용합니다. 변경 사항을 적용한 후에는 다음과 같이 자동 스타일 수정 및 코드 검증을 적용할 수 있습니다: ```bash $ make style ``` 🧨 Diffusers `ruff`와 몇개의 커스텀 스크립트를 이용하여 코딩 실수를 확인합니다. 품질 제어는 CI에서 작동하지만, 동일한 검사를 다음을 통해서도 할 수 있습니다: ```bash $ make quality ``` 변경사항에 대해 만족한다면 `git add`를 사용하여 변경된 파일을 추가하고 `git commit`을 사용하여 변경사항에 대해 로컬상으로 저장한다: ```bash $ git add modified_file.py $ git commit -m "A descriptive message about your changes." ``` 코드를 정기적으로 원본 저장소와 동기화하는 것은 좋은 아이디어입니다. 이렇게 하면 변경 사항을 빠르게 반영할 수 있습니다: ```bash $ git pull upstream main ``` 변경 사항을 계정에 푸시하려면 다음을 사용하세요: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. 만족하셨다면, GitHub에서 포크한 웹페이지로 이동하여 'Pull request'를 클릭하여 변경사항을 프로젝트 메인테이너에게 검토를 요청합니다. 7. 메인테이너가 변경 사항을 요청하는 것은 괜찮습니다. 핵심 기여자들에게도 일어나는 일입니다! 따라서 변경 사항을 Pull request에서 볼 수 있도록 로컬 브랜치에서 작업하고 변경 사항을 포크에 푸시하면 자동으로 Pull request에 나타납니다. ### 테스트 [[tests]] 라이브러리 동작과 여러 예제를 테스트하기 위해 포괄적인 테스트 묶음이 포함되어 있습니다. 라이브러리 테스트는 [tests 폴더](https://github.com/huggingface/diffusers/tree/main/tests)에서 찾을 수 있습니다. `pytest`와 `pytest-xdist`를 선호하는 이유는 더 빠르기 때문입니다. 루트 디렉토리에서 라이브러리를 위해 `pytest`로 테스트를 실행하는 방법은 다음과 같습니다: ```bash $ python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` 사실, `make test`는 이렇게 구현되어 있습니다! 작업 중인 기능만 테스트하기 위해 더 작은 테스트 세트를 지정할 수 있습니다. 기본적으로 느린 테스트는 건너뜁니다. `RUN_SLOW` 환경 변수를 `yes`로 설정하여 실행할 수 있습니다. 이는 많은 기가바이트의 모델을 다운로드합니다. 충분한 디스크 공간과 좋은 인터넷 연결 또는 많은 인내심이 필요합니다! ```bash $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` `unittest`는 완전히 지원됩니다. 다음은 `unittest`를 사용하여 테스트를 실행하는 방법입니다: ```bash $ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v ``` ### upstream(HuggingFace) main과 forked main 동기화하기 [[syncing-forked-main-with-upstream-huggingface-main]] upstream 저장소에 불필요한 참조 노트를 추가하고 관련 개발자에게 알림을 보내는 것을 피하기 위해, forked 저장소의 main 브랜치를 동기화할 때 다음 단계를 따르세요: 1. 가능한 경우, forked 저장소에서 브랜치와 PR을 사용하여 upstream과 동기화하는 것을 피하세요. 대신 forked main으로 직접 병합하세요. 2. PR이 절대적으로 필요한 경우, 브랜치를 체크아웃한 후 다음 단계를 사용하세요: ```bash $ git checkout -b your-branch-for-syncing $ git pull --squash --no-commit upstream main $ git commit -m '<your message without GitHub references>' $ git push --set-upstream origin your-branch-for-syncing ``` ### 스타일 가이드 [[style-guide]] Documentation string에 대해서는, 🧨 Diffusers는 [Google 스타일](https://google.github.io/styleguide/pyguide.html)을 따릅니다.
diffusers/docs/source/ko/conceptual/contribution.md/0
{ "file_path": "diffusers/docs/source/ko/conceptual/contribution.md", "repo_id": "diffusers", "token_count": 35978 }
118
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # 훑어보기 Diffusion 모델은 이미지나 오디오와 같은 관심 샘플들을 생성하기 위해 랜덤 가우시안 노이즈를 단계별로 제거하도록 학습됩니다. 이로 인해 생성 AI에 대한 관심이 매우 높아졌으며, 인터넷에서 diffusion 생성 이미지의 예를 본 적이 있을 것입니다. 🧨 Diffusers는 누구나 diffusion 모델들을 널리 이용할 수 있도록 하기 위한 라이브러리입니다. 개발자든 일반 사용자든 이 훑어보기를 통해 🧨 Diffusers를 소개하고 빠르게 생성할 수 있도록 도와드립니다! 알아야 할 라이브러리의 주요 구성 요소는 크게 세 가지입니다: * [`DiffusionPipeline`]은 추론을 위해 사전 학습된 diffusion 모델에서 샘플을 빠르게 생성하도록 설계된 높은 수준의 엔드투엔드 클래스입니다. * Diffusion 시스템 생성을 위한 빌딩 블록으로 사용할 수 있는 널리 사용되는 사전 학습된 [model](./api/models) 아키텍처 및 모듈. * 다양한 [schedulers](./api/schedulers/overview) - 학습을 위해 노이즈를 추가하는 방법과 추론 중에 노이즈 제거된 이미지를 생성하는 방법을 제어하는 알고리즘입니다. 훑어보기에서는 추론을 위해 [`DiffusionPipeline`]을 사용하는 방법을 보여준 다음, 모델과 스케줄러를 결합하여 [`DiffusionPipeline`] 내부에서 일어나는 일을 복제하는 방법을 안내합니다. <Tip> 훑어보기는 간결한 버전의 🧨 Diffusers 소개로서 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) 빠르게 시작할 수 있도록 도와드립니다. 디퓨저의 목표, 디자인 철학, 핵심 API에 대한 추가 세부 정보를 자세히 알아보려면 노트북을 확인하세요! </Tip> 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```py # 주석 풀어서 Colab에 필요한 라이브러리 설치하기. #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index)는 추론 및 학습을 위한 모델 로딩 속도를 높여줍니다. - [🤗 Transformers](https://huggingface.co/docs/transformers/index)는 [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)과 같이 가장 많이 사용되는 diffusion 모델을 실행하는 데 필요합니다. ## DiffusionPipeline [`DiffusionPipeline`] 은 추론을 위해 사전 학습된 diffusion 시스템을 사용하는 가장 쉬운 방법입니다. 모델과 스케줄러를 포함하는 엔드 투 엔드 시스템입니다. 다양한 작업에 [`DiffusionPipeline`]을 바로 사용할 수 있습니다. 아래 표에서 지원되는 몇 가지 작업을 살펴보고, 지원되는 작업의 전체 목록은 [🧨 Diffusers Summary](./api/pipelines/overview#diffusers-summary) 표에서 확인할 수 있습니다. | **Task** | **Description** | **Pipeline** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) | 먼저 [`DiffusionPipeline`]의 인스턴스를 생성하고 다운로드할 파이프라인 체크포인트를 지정합니다. 허깅페이스 허브에 저장된 모든 [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads)에 대해 [`DiffusionPipeline`]을 사용할 수 있습니다. 이 훑어보기에서는 text-to-image 생성을 위한 [`stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) 체크포인트를 로드합니다. <Tip warning={true}> [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) 모델의 경우, 모델을 실행하기 전에 [라이선스](https://huggingface.co/spaces/CompVis/stable-diffusion-license)를 먼저 주의 깊게 읽어주세요. 🧨 Diffusers는 불쾌하거나 유해한 콘텐츠를 방지하기 위해 [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py)를 구현하고 있지만, 모델의 향상된 이미지 생성 기능으로 인해 여전히 잠재적으로 유해한 콘텐츠가 생성될 수 있습니다. </Tip> [`~DiffusionPipeline.from_pretrained`] 방법으로 모델 로드하기: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") ``` The [`DiffusionPipeline`]은 모든 모델링, 토큰화, 스케줄링 컴포넌트를 다운로드하고 캐시합니다. Stable Diffusion Pipeline은 무엇보다도 [`UNet2DConditionModel`]과 [`PNDMScheduler`]로 구성되어 있음을 알 수 있습니다: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` 이 모델은 약 14억 개의 파라미터로 구성되어 있으므로 GPU에서 파이프라인을 실행할 것을 강력히 권장합니다. PyTorch에서와 마찬가지로 제너레이터 객체를 GPU로 이동할 수 있습니다: ```python >>> pipeline.to("cuda") ``` 이제 `파이프라인`에 텍스트 프롬프트를 전달하여 이미지를 생성한 다음 노이즈가 제거된 이미지에 액세스할 수 있습니다. 기본적으로 이미지 출력은 [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) 객체로 감싸집니다. ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> `save`를 호출하여 이미지를 저장합니다: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### 로컬 파이프라인 파이프라인을 로컬에서 사용할 수도 있습니다. 유일한 차이점은 가중치를 먼저 다운로드해야 한다는 점입니다: ```bash !git lfs install !git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` 그런 다음 저장된 가중치를 파이프라인에 로드합니다: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") ``` 이제 위 섹션에서와 같이 파이프라인을 실행할 수 있습니다. ### 스케줄러 교체 스케줄러마다 노이즈 제거 속도와 품질이 서로 다릅니다. 자신에게 가장 적합한 스케줄러를 찾는 가장 좋은 방법은 직접 사용해 보는 것입니다! 🧨 Diffusers의 주요 기능 중 하나는 스케줄러 간에 쉽게 전환이 가능하다는 것입니다. 예를 들어, 기본 스케줄러인 [`PNDMScheduler`]를 [`EulerDiscreteScheduler`]로 바꾸려면, [`~diffusers.ConfigMixin.from_config`] 메서드를 사용하여 로드하세요: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` 새 스케줄러로 이미지를 생성해보고 어떤 차이가 있는지 확인해 보세요! 다음 섹션에서는 모델과 스케줄러라는 [`DiffusionPipeline`]을 구성하는 컴포넌트를 자세히 살펴보고 이러한 컴포넌트를 사용하여 고양이 이미지를 생성하는 방법을 배워보겠습니다. ## 모델 대부분의 모델은 노이즈가 있는 샘플을 가져와 각 시간 간격마다 노이즈가 적은 이미지와 입력 이미지 사이의 차이인 *노이즈 잔차*(다른 모델은 이전 샘플을 직접 예측하거나 속도 또는 [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)을 예측하는 학습을 합니다)을 예측합니다. 모델을 믹스 앤 매치하여 다른 diffusion 시스템을 만들 수 있습니다. 모델은 [`~ModelMixin.from_pretrained`] 메서드로 시작되며, 이 메서드는 모델 가중치를 로컬에 캐시하여 다음에 모델을 로드할 때 더 빠르게 로드할 수 있습니다. 훑어보기에서는 고양이 이미지에 대해 학습된 체크포인트가 있는 기본적인 unconditional 이미지 생성 모델인 [`UNet2DModel`]을 로드합니다: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id) ``` 모델 매개변수에 액세스하려면 `model.config`를 호출합니다: ```py >>> model.config ``` 모델 구성은 🧊 고정된 🧊 딕셔너리로, 모델이 생성된 후에는 해당 매개 변수들을 변경할 수 없습니다. 이는 의도적인 것으로, 처음에 모델 아키텍처를 정의하는 데 사용된 매개변수는 동일하게 유지하면서 다른 매개변수는 추론 중에 조정할 수 있도록 하기 위한 것입니다. 가장 중요한 매개변수들은 다음과 같습니다: * `sample_size`: 입력 샘플의 높이 및 너비 치수입니다. * `in_channels`: 입력 샘플의 입력 채널 수입니다. * `down_block_types` 및 `up_block_types`: UNet 아키텍처를 생성하는 데 사용되는 다운 및 업샘플링 블록의 유형. * `block_out_channels`: 다운샘플링 블록의 출력 채널 수. 업샘플링 블록의 입력 채널 수에 역순으로 사용되기도 합니다. * `layers_per_block`: 각 UNet 블록에 존재하는 ResNet 블록의 수입니다. 추론에 모델을 사용하려면 랜덤 가우시안 노이즈로 이미지 모양을 만듭니다. 모델이 여러 개의 무작위 노이즈를 수신할 수 있으므로 'batch' 축, 입력 채널 수에 해당하는 'channel' 축, 이미지의 높이와 너비를 나타내는 'sample_size' 축이 있어야 합니다: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` 추론을 위해 모델에 노이즈가 있는 이미지와 `timestep`을 전달합니다. 'timestep'은 입력 이미지의 노이즈 정도를 나타내며, 시작 부분에 더 많은 노이즈가 있고 끝 부분에 더 적은 노이즈가 있습니다. 이를 통해 모델이 diffusion 과정에서 시작 또는 끝에 더 가까운 위치를 결정할 수 있습니다. `sample` 메서드를 사용하여 모델 출력을 얻습니다: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` 하지만 실제 예를 생성하려면 노이즈 제거 프로세스를 안내할 스케줄러가 필요합니다. 다음 섹션에서는 모델을 스케줄러와 결합하는 방법에 대해 알아봅니다. ## 스케줄러 스케줄러는 모델 출력이 주어졌을 때 노이즈가 많은 샘플에서 노이즈가 적은 샘플로 전환하는 것을 관리합니다 - 이 경우 'noisy_residual'. <Tip> 🧨 Diffusers는 Diffusion 시스템을 구축하기 위한 툴박스입니다. [`DiffusionPipeline`]을 사용하면 미리 만들어진 Diffusion 시스템을 편리하게 시작할 수 있지만, 모델과 스케줄러 구성 요소를 개별적으로 선택하여 사용자 지정 Diffusion 시스템을 구축할 수도 있습니다. </Tip> 훑어보기의 경우, [`~diffusers.ConfigMixin.from_config`] 메서드를 사용하여 [`DDPMScheduler`]를 인스턴스화합니다: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 스케줄러가 구성에서 어떻게 인스턴스화되는지 주목하세요. 모델과 달리 스케줄러에는 학습 가능한 가중치가 없으며 매개변수도 없습니다! </Tip> 가장 중요한 매개변수는 다음과 같습니다: * `num_train_timesteps`: 노이즈 제거 프로세스의 길이, 즉 랜덤 가우스 노이즈를 데이터 샘플로 처리하는 데 필요한 타임스텝 수입니다. * `beta_schedule`: 추론 및 학습에 사용할 노이즈 스케줄 유형입니다. * `beta_start` 및 `beta_end`: 노이즈 스케줄의 시작 및 종료 노이즈 값입니다. 노이즈가 약간 적은 이미지를 예측하려면 스케줄러의 [`~diffusers.DDPMScheduler.step`] 메서드에 모델 출력, `timestep`, 현재 `sample`을 전달하세요. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` `less_noisy_sample`을 다음 `timestep`으로 넘기면 노이즈가 더 줄어듭니다! 이제 이 모든 것을 한데 모아 전체 노이즈 제거 과정을 시각화해 보겠습니다. 먼저 노이즈 제거된 이미지를 후처리하여 `PIL.Image`로 표시하는 함수를 만듭니다: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` 노이즈 제거 프로세스의 속도를 높이려면 입력과 모델을 GPU로 옮기세요: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` 이제 노이즈가 적은 샘플의 잔차를 예측하고 스케줄러로 노이즈가 적은 샘플을 계산하는 노이즈 제거 루프를 생성합니다: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` 가만히 앉아서 고양이가 소음으로만 생성되는 것을 지켜보세요!😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## 다음 단계 이번 훑어보기에서 🧨 Diffusers로 멋진 이미지를 만들어 보셨기를 바랍니다! 다음 단계로 넘어가세요: * [training](./tutorials/basic_training) 튜토리얼에서 모델을 학습하거나 파인튜닝하여 나만의 이미지를 생성할 수 있습니다. * 다양한 사용 사례는 공식 및 커뮤니티 [학습 또는 파인튜닝 스크립트](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) 예시를 참조하세요. * 스케줄러 로드, 액세스, 변경 및 비교에 대한 자세한 내용은 [다른 스케줄러 사용](./using-diffusers/schedulers) 가이드에서 확인하세요. * [Stable Diffusion](./stable_diffusion) 가이드에서 프롬프트 엔지니어링, 속도 및 메모리 최적화, 고품질 이미지 생성을 위한 팁과 요령을 살펴보세요. * [GPU에서 파이토치 최적화](./optimization/fp16) 가이드와 [애플 실리콘(M1/M2)에서의 Stable Diffusion](./optimization/mps) 및 [ONNX 런타임](./optimization/onnx) 실행에 대한 추론 가이드를 통해 🧨 Diffuser 속도를 높이는 방법을 더 자세히 알아보세요.
diffusers/docs/source/ko/quicktour.md/0
{ "file_path": "diffusers/docs/source/ko/quicktour.md", "repo_id": "diffusers", "token_count": 11452 }
119
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 조건부 이미지 생성 [[open-in-colab]] 조건부 이미지 생성을 사용하면 텍스트 프롬프트에서 이미지를 생성할 수 있습니다. 텍스트는 임베딩으로 변환되며, 임베딩은 노이즈에서 이미지를 생성하도록 모델을 조건화하는 데 사용됩니다. [`DiffusionPipeline`]은 추론을 위해 사전 훈련된 diffusion 시스템을 사용하는 가장 쉬운 방법입니다. 먼저 [`DiffusionPipeline`]의 인스턴스를 생성하고 다운로드할 파이프라인 [체크포인트](https://huggingface.co/models?library=diffusers&sort=downloads)를 지정합니다. 이 가이드에서는 [잠재 Diffusion](https://huggingface.co/CompVis/ldm-text2im-large-256)과 함께 텍스트-이미지 생성에 [`DiffusionPipeline`]을 사용합니다: ```python >>> from diffusers import DiffusionPipeline >>> generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") ``` [`DiffusionPipeline`]은 모든 모델링, 토큰화, 스케줄링 구성 요소를 다운로드하고 캐시합니다. 이 모델은 약 14억 개의 파라미터로 구성되어 있기 때문에 GPU에서 실행할 것을 강력히 권장합니다. PyTorch에서와 마찬가지로 생성기 객체를 GPU로 이동할 수 있습니다: ```python >>> generator.to("cuda") ``` 이제 텍스트 프롬프트에서 `생성기`를 사용할 수 있습니다: ```python >>> image = generator("An image of a squirrel in Picasso style").images[0] ``` 출력값은 기본적으로 [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) 객체로 래핑됩니다. 호출하여 이미지를 저장할 수 있습니다: ```python >>> image.save("image_of_squirrel_painting.png") ``` 아래 스페이스를 사용해보고 안내 배율 매개변수를 자유롭게 조정하여 이미지 품질에 어떤 영향을 미치는지 확인해 보세요! <iframe src="https://stabilityai-stable-diffusion.hf.space" frameborder="0" width="850" height="500" ></iframe>
diffusers/docs/source/ko/using-diffusers/conditional_image_generation.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/conditional_image_generation.md", "repo_id": "diffusers", "token_count": 1550 }
120
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Video Diffusion [[open-in-colab]] [Stable Video Diffusion (SVD)](https://huggingface.co/papers/2311.15127)은 입력 이미지에 맞춰 2~4초 분량의 고해상도(576x1024) 비디오를 생성할 수 있는 강력한 image-to-video 생성 모델입니다. 이 가이드에서는 SVD를 사용하여 이미지에서 짧은 동영상을 생성하는 방법을 설명합니다. 시작하기 전에 다음 라이브러리가 설치되어 있는지 확인하세요: ```py !pip install -q -U diffusers transformers accelerate ``` 이 모델에는 [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid)와 [SVD-XT](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt) 두 가지 종류가 있습니다. SVD 체크포인트는 14개의 프레임을 생성하도록 학습되었고, SVD-XT 체크포인트는 25개의 프레임을 생성하도록 파인튜닝되었습니다. 이 가이드에서는 SVD-XT 체크포인트를 사용합니다. ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Conditioning 이미지 불러오기 image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"source image of a rocket"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"generated video from source image"</figcaption> </div> </div> ## torch.compile UNet을 [컴파일](../optimization/torch2.0#torchcompile)하면 메모리 사용량이 살짝 증가하지만, 20~25%의 속도 향상을 얻을 수 있습니다. ```diff - pipe.enable_model_cpu_offload() + pipe.to("cuda") + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` ## 메모리 사용량 줄이기 비디오 생성은 기본적으로 배치 크기가 큰 text-to-image 생성과 유사하게 'num_frames'를 한 번에 생성하기 때문에 메모리 사용량이 매우 높습니다. 메모리 사용량을 줄이기 위해 추론 속도와 메모리 사용량을 절충하는 여러 가지 옵션이 있습니다: - 모델 오프로링 활성화: 파이프라인의 각 구성 요소가 더 이상 필요하지 않을 때 CPU로 오프로드됩니다. - Feed-forward chunking 활성화: feed-forward 레이어가 배치 크기가 큰 단일 feed-forward를 실행하는 대신 루프로 반복해서 실행됩니다. - `decode_chunk_size` 감소: VAE가 프레임들을 한꺼번에 디코딩하는 대신 chunk 단위로 디코딩합니다. `decode_chunk_size=1`을 설정하면 한 번에 한 프레임씩 디코딩하고 최소한의 메모리만 사용하지만(GPU 메모리에 따라 이 값을 조정하는 것이 좋습니다), 동영상에 약간의 깜박임이 발생할 수 있습니다. ```diff - pipe.enable_model_cpu_offload() - frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] + pipe.enable_model_cpu_offload() + pipe.unet.enable_forward_chunking() + frames = pipe(image, decode_chunk_size=2, generator=generator, num_frames=25).frames[0] ``` 이러한 모든 방법들을 사용하면 메모리 사용량이 8GAM VRAM보다 적을 것입니다. ## Micro-conditioning Stable Diffusion Video는 또한 이미지 conditoning 외에도 micro-conditioning을 허용하므로 생성된 비디오를 더 잘 제어할 수 있습니다: - `fps`: 생성된 비디오의 초당 프레임 수입니다. - `motion_bucket_id`: 생성된 동영상에 사용할 모션 버킷 아이디입니다. 생성된 동영상의 모션을 제어하는 데 사용할 수 있습니다. 모션 버킷 아이디를 늘리면 생성되는 동영상의 모션이 증가합니다. - `noise_aug_strength`: Conditioning 이미지에 추가되는 노이즈의 양입니다. 값이 클수록 비디오가 conditioning 이미지와 덜 유사해집니다. 이 값을 높이면 생성된 비디오의 움직임도 증가합니다. 예를 들어, 모션이 더 많은 동영상을 생성하려면 `motion_bucket_id` 및 `noise_aug_strength` micro-conditioning 파라미터를 사용합니다: ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Conditioning 이미지 불러오기 image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator, motion_bucket_id=180, noise_aug_strength=0.1).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket_with_conditions.gif)
diffusers/docs/source/ko/using-diffusers/svd.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/svd.md", "repo_id": "diffusers", "token_count": 3466 }
121
<!--版权 2025 HuggingFace 团队。保留所有权利。 根据 Apache 许可证 2.0 版本("许可证")授权;除非遵守许可证,否则不得使用此文件。 您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,否则根据许可证分发的软件按"原样"分发,不附带任何明示或暗示的担保或条件。请参阅许可证以了解具体的语言管理权限和限制。 --> # 混合推理 **通过混合推理赋能本地 AI 构建者** > [!TIP] > 混合推理是一项[实验性功能](https://huggingface.co/blog/remote_vae)。 > 可以在此处提供反馈[此处](https://github.com/huggingface/diffusers/issues/new?template=remote-vae-pilot-feedback.yml)。 ## 为什么使用混合推理? 混合推理提供了一种快速简单的方式来卸载本地生成需求。 - 🚀 **降低要求:** 无需昂贵硬件即可访问强大模型。 - 💎 **无妥协:** 在不牺牲性能的情况下实现最高质量。 - 💰 **成本效益高:** 它是免费的!🤑 - 🎯 **多样化用例:** 与 Diffusers � 和更广泛的社区完全兼容。 - 🔧 **开发者友好:** 简单请求,快速响应。 --- ## 可用模型 * **VAE 解码 🖼️:** 快速将潜在表示解码为高质量图像,不影响性能或工作流速度。 * **VAE 编码 🔢:** 高效将图像编码为潜在表示,用于生成和训练。 * **文本编码器 📃(即将推出):** 快速准确地计算提示的文本嵌入,确保流畅高质量的工作流。 --- ## 集成 * **[SD.Next](https://github.com/vladmandic/sdnext):** 一体化 UI,直接支持混合推理。 * **[ComfyUI-HFRemoteVae](https://github.com/kijai/ComfyUI-HFRemoteVae):** 用于混合推理的 ComfyUI 节点。 ## 更新日志 - 2025 年 3 月 10 日:添加了 VAE 编码 - 2025 年 3 月 2 日:初始发布,包含 VAE 解码 ## 内容 文档分为三个部分: * **VAE 解码** 学习如何使用混合推理进行 VAE 解码的基础知识。 * **VAE 编码** 学习如何使用混合推理进行 VAE 编码的基础知识。 * **API 参考** 深入了解任务特定设置和参数。
diffusers/docs/source/zh/hybrid_inference/overview.md/0
{ "file_path": "diffusers/docs/source/zh/hybrid_inference/overview.md", "repo_id": "diffusers", "token_count": 1485 }
122
<!--版权所有 2025 The HuggingFace Team。保留所有权利。 根据 Apache 许可证 2.0 版本("许可证")授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,否则根据许可证分发的软件按"原样"分发,无任何明示或暗示的担保或条件。有关许可证的具体语言,请参阅许可证中的权限和限制。 --> # DeepCache [DeepCache](https://huggingface.co/papers/2312.00858) 通过策略性地缓存和重用高级特征,同时利用 U-Net 架构高效更新低级特征,来加速 [`StableDiffusionPipeline`] 和 [`StableDiffusionXLPipeline`]。 首先安装 [DeepCache](https://github.com/horseee/DeepCache): ```bash pip install DeepCache ``` 然后加载并启用 [`DeepCacheSDHelper`](https://github.com/horseee/DeepCache#usage): ```diff import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained('stable-diffusion-v1-5/stable-diffusion-v1-5', torch_dtype=torch.float16).to("cuda") + from DeepCache import DeepCacheSDHelper + helper = DeepCacheSDHelper(pipe=pipe) + helper.set_params( + cache_interval=3, + cache_branch_id=0, + ) + helper.enable() image = pipe("a photo of an astronaut on a moon").images[0] ``` `set_params` 方法接受两个参数:`cache_interval` 和 `cache_branch_id`。`cache_interval` 表示特征缓存的频率,指定为每次缓存操作之间的步数。`cache_branch_id` 标识网络的哪个分支(从最浅层到最深层排序)负责执行缓存过程。 选择较低的 `cache_branch_id` 或较大的 `cache_interval` 可以加快推理速度,但会降低图像质量(这些超参数的消融实验可以在[论文](https://huggingface.co/papers/2312.00858)中找到)。一旦设置了这些参数,使用 `enable` 或 `disable` 方法来激活或停用 `DeepCacheSDHelper`。 <div class="flex justify-center"> <img src="https://github.com/horseee/Diffusion_DeepCache/raw/master/static/images/example.png"> </div> 您可以在 [WandB 报告](https://wandb.ai/horseee/DeepCache/runs/jwlsqqgt?workspace=user-horseee) 中找到更多生成的样本(原始管道 vs DeepCache)和相应的推理延迟。提示是从 [MS-COCO 2017](https://cocodataset.org/#home) 数据集中随机选择的。 ## 基准测试 我们在 NVIDIA RTX A5000 上测试了 DeepCache 使用 50 个推理步骤加速 [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) 的速度,使用不同的配置,包括分辨率、批处理大小、缓存间隔(I)和缓存分支(B)。 | **分辨率** | **批次大小** | **原始** | **DeepCache(I=3, B=0)** | **DeepCache(I=5, B=0)** | **DeepCache(I=5, B=1)** | |----------------|----------------|--------------|-------------------------|-------------------------|-------------------------| | 512| 8| 15.96| 6.88(2.32倍)| 5.03(3.18倍)| 7.27(2.20x)| | | 4| 8.39| 3.60(2.33倍)| 2.62(3.21倍)| 3.75(2.24x)| | | 1| 2.61| 1.12(2.33倍)| 0.81(3.24倍)| 1.11(2.35x)| | 768| 8| 43.58| 18.99(2.29倍)| 13.96(3.12倍)| 21.27(2.05x)| | | 4| 22.24| 9.67(2.30倍)| 7.10(3.13倍)| 10.74(2.07x)| | | 1| 6.33| 2.72(2.33倍)| 1.97(3.21倍)| 2.98(2.12x)| | 1024| 8| 101.95| 45.57(2.24倍)| 33.72(3.02倍)| 53.00(1.92x)| | | 4| 49.25| 21.86(2.25倍)| 16.19(3.04倍)| 25.78(1.91x)| | | 1| 13.83| 6.07(2.28倍)| 4.43(3.12倍)| 7.15(1.93x)|
diffusers/docs/source/zh/optimization/deepcache.md/0
{ "file_path": "diffusers/docs/source/zh/optimization/deepcache.md", "repo_id": "diffusers", "token_count": 2598 }
123
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 有效且高效的扩散 [[open-in-colab]] 让 [`DiffusionPipeline`] 生成特定风格或包含你所想要的内容的图像可能会有些棘手。 通常情况下,你需要多次运行 [`DiffusionPipeline`] 才能得到满意的图像。但是从无到有生成图像是一个计算密集的过程,特别是如果你要一遍又一遍地进行推理运算。 这就是为什么从pipeline中获得最高的 *computational* (speed) 和 *memory* (GPU RAM) 非常重要 ,以减少推理周期之间的时间,从而使迭代速度更快。 本教程将指导您如何通过 [`DiffusionPipeline`] 更快、更好地生成图像。 首先,加载 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) 模型: ```python from diffusers import DiffusionPipeline model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True) ``` 本教程将使用的提示词是 [`portrait photo of a old warrior chief`] ,但是你可以随心所欲的想象和构造自己的提示词: ```python prompt = "portrait photo of a old warrior chief" ``` ## 速度 <Tip> 💡 如果你没有 GPU, 你可以从像 [Colab](https://colab.research.google.com/) 这样的 GPU 提供商获取免费的 GPU ! </Tip> 加速推理的最简单方法之一是将 pipeline 放在 GPU 上 ,就像使用任何 PyTorch 模块一样: ```python pipeline = pipeline.to("cuda") ``` 为了确保您可以使用相同的图像并对其进行改进,使用 [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) 方法,然后设置一个随机数种子 以确保其 [复现性](./using-diffusers/reusing_seeds): ```python import torch generator = torch.Generator("cuda").manual_seed(0) ``` 现在,你可以生成一个图像: ```python image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png"> </div> 在 T4 GPU 上,这个过程大概要30秒(如果你的 GPU 比 T4 好,可能会更快)。在默认情况下,[`DiffusionPipeline`] 使用完整的 `float32` 精度进行 50 步推理。你可以通过降低精度(如 `float16` )或者减少推理步数来加速整个过程 让我们把模型的精度降低至 `float16` ,然后生成一张图像: ```python import torch pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_safetensors=True) pipeline = pipeline.to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png"> </div> 这一次,生成图像只花了约 11 秒,比之前快了近 3 倍! <Tip> 💡 我们强烈建议把 pipeline 精度降低至 `float16` , 到目前为止, 我们很少看到输出质量有任何下降。 </Tip> 另一个选择是减少推理步数。 你可以选择一个更高效的调度器 (*scheduler*) 可以减少推理步数同时保证输出质量。您可以在 [DiffusionPipeline] 中通过调用compatibles方法找到与当前模型兼容的调度器 (*scheduler*)。 ```python pipeline.scheduler.compatibles [ diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler, diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler, diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler, diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler, diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, ] ``` Stable Diffusion 模型默认使用的是 [`PNDMScheduler`] ,通常要大概50步推理, 但是像 [`DPMSolverMultistepScheduler`] 这样更高效的调度器只要大概 20 或 25 步推理. 使用 [`ConfigMixin.from_config`] 方法加载新的调度器: ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) ``` 现在将 `num_inference_steps` 设置为 20: ```python generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png"> </div> 太棒了!你成功把推理时间缩短到 4 秒!⚡️ ## 内存 改善 pipeline 性能的另一个关键是减少内存的使用量,这间接意味着速度更快,因为你经常试图最大化每秒生成的图像数量。要想知道你一次可以生成多少张图片,最简单的方法是尝试不同的batch size,直到出现`OutOfMemoryError` (OOM)。 创建一个函数,为每一批要生成的图像分配提示词和 `Generators` 。请务必为每个`Generator` 分配一个种子,以便于复现良好的结果。 ```python def get_inputs(batch_size=1): generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] prompts = batch_size * [prompt] num_inference_steps = 20 return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps} ``` 设置 `batch_size=4` ,然后看一看我们消耗了多少内存: ```python from diffusers.utils import make_image_grid images = pipeline(**get_inputs(batch_size=4)).images make_image_grid(images, 2, 2) ``` 除非你有一个更大内存的GPU, 否则上述代码会返回 `OOM` 错误! 大部分内存被 cross-attention 层使用。按顺序运行可以节省大量内存,而不是在批处理中进行。你可以为 pipeline 配置 [`~DiffusionPipeline.enable_attention_slicing`] 函数: ```python pipeline.enable_attention_slicing() ``` 现在尝试把 `batch_size` 增加到 8! ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png"> </div> 以前你不能一批生成 4 张图片,而现在你可以在一张图片里面生成八张图片而只需要大概3.5秒!这可能是 T4 GPU 在不牺牲质量的情况运行速度最快的一种方法。 ## 质量 在最后两节中, 你要学习如何通过 `fp16` 来优化 pipeline 的速度, 通过使用性能更高的调度器来减少推理步数, 使用注意力切片(*enabling attention slicing*)方法来节省内存。现在,你将关注的是如何提高图像的质量。 ### 更好的 checkpoints 有个显而易见的方法是使用更好的 checkpoints。 Stable Diffusion 模型是一个很好的起点, 自正式发布以来,还发布了几个改进版本。然而, 使用更新的版本并不意味着你会得到更好的结果。你仍然需要尝试不同的 checkpoints ,并做一些研究 (例如使用 [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/)) 来获得更好的结果。 随着该领域的发展, 有越来越多经过微调的高质量的 checkpoints 用来生成不一样的风格. 在 [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) 和 [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery) 寻找你感兴趣的一种! ### 更好的 pipeline 组件 也可以尝试用新版本替换当前 pipeline 组件。让我们加载最新的 [autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae) 从 Stability AI 加载到 pipeline, 并生成一些图像: ```python from diffusers import AutoencoderKL vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda") pipeline.vae = vae images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png"> </div> ### 更好的提示词工程 用于生成图像的文本非常重要, 因此被称为 *提示词工程*。 在设计提示词工程应注意如下事项: - 我想生成的图像或类似图像如何存储在互联网上? - 我可以提供哪些额外的细节来引导模型朝着我想要的风格生成? 考虑到这一点,让我们改进提示词,以包含颜色和更高质量的细节: ```python prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes" prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" ``` 使用新的提示词生成一批图像: ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png"> </div> 非常的令人印象深刻! Let's tweak the second image - 把 `Generator` 的种子设置为 `1` - 添加一些关于年龄的主题文本: ```python prompts = [ "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", ] generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images make_image_grid(images, 2, 2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png"> </div> ## 最后 在本教程中, 您学习了如何优化[`DiffusionPipeline`]以提高计算和内存效率,以及提高生成输出的质量. 如果你有兴趣让你的 pipeline 更快, 可以看一看以下资源: - 学习 [PyTorch 2.0](./optimization/torch2.0) 和 [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) 可以让推理速度提高 5 - 300% . 在 A100 GPU 上, 推理速度可以提高 50% ! - 如果你没法用 PyTorch 2, 我们建议你安装 [xFormers](./optimization/xformers)。它的内存高效注意力机制(*memory-efficient attention mechanism*)与PyTorch 1.13.1配合使用,速度更快,内存消耗更少。 - 其他的优化技术, 如:模型卸载(*model offloading*), 包含在 [这份指南](./optimization/fp16).
diffusers/docs/source/zh/stable_diffusion.md/0
{ "file_path": "diffusers/docs/source/zh/stable_diffusion.md", "repo_id": "diffusers", "token_count": 6142 }
124
# Advanced diffusion training examples ## Train Dreambooth LoRA with Flux.1 Dev > [!TIP] > 💡 This example follows some of the techniques and recommended practices covered in the community derived guide we made for SDXL training: [LoRA training scripts of the world, unite!](https://huggingface.co/blog/sdxl_lora_advanced_script). > As many of these are architecture agnostic & generally relevant to fine-tuning of diffusion models we suggest to take a look 🤗 [DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text-to-image models like flux, stable diffusion given just a few(3~5) images of a subject. LoRA - Low-Rank Adaption of Large Language Models, was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen* In a nutshell, LoRA allows to adapt pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114) - Rank-decomposition matrices have significantly fewer parameters than the original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted towards new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. The `train_dreambooth_lora_flux_advanced.py` script shows how to implement dreambooth-LoRA, combining the training process shown in `train_dreambooth_lora_flux.py`, with advanced features and techniques, inspired and built upon contributions by [Nataniel Ruiz](https://twitter.com/natanielruizg): [Dreambooth](https://dreambooth.github.io), [Rinon Gal](https://twitter.com/RinonGal): [Textual Inversion](https://textual-inversion.github.io), [Ron Mokady](https://twitter.com/MokadyRon): [Pivotal Tuning](https://huggingface.co/papers/2106.05744), [Simo Ryu](https://twitter.com/cloneofsimo): [cog-sdxl](https://github.com/replicate/cog-sdxl), [ostris](https://x.com/ostrisai):[ai-toolkit](https://github.com/ostris/ai-toolkit), [bghira](https://github.com/bghira):[SimpleTuner](https://github.com/bghira/SimpleTuner), [Kohya](https://twitter.com/kohya_tech/): [sd-scripts](https://github.com/kohya-ss/sd-scripts), [The Last Ben](https://twitter.com/__TheBen): [fast-stable-diffusion](https://github.com/TheLastBen/fast-stable-diffusion) ❤️ > [!NOTE] > 💡If this is your first time training a Dreambooth LoRA, congrats!🥳 > You might want to familiarize yourself more with the techniques: [Dreambooth blog](https://huggingface.co/blog/dreambooth), [Using LoRA for Efficient Stable Diffusion Fine-Tuning blog](https://huggingface.co/blog/lora) ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/advanced_diffusion_training` folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. Lastly, we recommend logging into your HF account so that your trained LoRA is automatically uploaded to the hub: ```bash hf auth login ``` This command will prompt you for a token. Copy-paste yours from your [settings/tokens](https://huggingface.co/settings/tokens),and press Enter. > [!NOTE] > In the examples below we use `wandb` to document the training runs. To do the same, make sure to install `wandb`: > `pip install wandb` > Alternatively, you can use other tools / train without reporting by modifying the flag `--report_to="wandb"`. ### LoRA Rank and Alpha Two key LoRA hyperparameters are LoRA rank and LoRA alpha. - `--rank`: Defines the dimension of the trainable LoRA matrices. A higher rank means more expressiveness and capacity to learn (and more parameters). - `--lora_alpha`: A scaling factor for the LoRA's output. The LoRA update is scaled by lora_alpha / lora_rank. - lora_alpha vs. rank: This ratio dictates the LoRA's effective strength: lora_alpha == rank: Scaling factor is 1. The LoRA is applied with its learned strength. (e.g., alpha=16, rank=16) lora_alpha < rank: Scaling factor < 1. Reduces the LoRA's impact. Useful for subtle changes or to prevent overpowering the base model. (e.g., alpha=8, rank=16) lora_alpha > rank: Scaling factor > 1. Amplifies the LoRA's impact. Allows a lower rank LoRA to have a stronger effect. (e.g., alpha=32, rank=16) > [!TIP] > A common starting point is to set `lora_alpha` equal to `rank`. > Some also set `lora_alpha` to be twice the `rank` (e.g., lora_alpha=32 for lora_rank=16) > to give the LoRA updates more influence without increasing parameter count. > If you find your LoRA is "overcooking" or learning too aggressively, consider setting `lora_alpha` to half of `rank` > (e.g., lora_alpha=8 for rank=16). Experimentation is often key to finding the optimal balance for your use case. ### Target Modules When LoRA was first adapted from language models to diffusion models, it was applied to the cross-attention layers in the Unet that relate the image representations with the prompts that describe them. More recently, SOTA text-to-image diffusion models replaced the Unet with a diffusion Transformer(DiT). With this change, we may also want to explore applying LoRA training onto different types of layers and blocks. To allow more flexibility and control over the targeted modules we added `--lora_layers`- in which you can specify in a comma separated string the exact modules for LoRA training. Here are some examples of target modules you can provide: - for attention only layers: `--lora_layers="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0"` - to train the same modules as in the fal trainer: `--lora_layers="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0,attn.add_k_proj,attn.add_q_proj,attn.add_v_proj,attn.to_add_out,ff.net.0.proj,ff.net.2,ff_context.net.0.proj,ff_context.net.2"` - to train the same modules as in ostris ai-toolkit / replicate trainer: `--lora_blocks="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0,attn.add_k_proj,attn.add_q_proj,attn.add_v_proj,attn.to_add_out,ff.net.0.proj,ff.net.2,ff_context.net.0.proj,ff_context.net.2,norm1_context.linear, norm1.linear,norm.linear,proj_mlp,proj_out"` > [!NOTE] > `--lora_layers` can also be used to specify which **blocks** to apply LoRA training to. To do so, simply add a block prefix to each layer in the comma separated string: > **single DiT blocks**: to target the ith single transformer block, add the prefix `single_transformer_blocks.i`, e.g. - `single_transformer_blocks.i.attn.to_k` > **MMDiT blocks**: to target the ith MMDiT block, add the prefix `transformer_blocks.i`, e.g. - `transformer_blocks.i.attn.to_k` > [!NOTE] > keep in mind that while training more layers can improve quality and expressiveness, it also increases the size of the output LoRA weights. ### Pivotal Tuning (and more) **Training with text encoder(s)** Alongside the Transformer, LoRA fine-tuning of the text encoders is also supported. In addition to the text encoder optimization available with `train_dreambooth_lora_flux_advanced.py`, in the advanced script **pivotal tuning** is also supported. [pivotal tuning](https://huggingface.co/blog/sdxl_lora_advanced_script#pivotal-tuning) combines Textual Inversion with regular diffusion fine-tuning - we insert new tokens into the text encoders of the model, instead of reusing existing ones. We then optimize the newly-inserted token embeddings to represent the new concept. To do so, just specify `--train_text_encoder_ti` while launching training (for regular text encoder optimizations, use `--train_text_encoder`). Please keep the following points in mind: * Flux uses two text encoders - [CLIP](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux#diffusers.FluxPipeline.text_encoder) & [T5](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux#diffusers.FluxPipeline.text_encoder_2) , by default `--train_text_encoder_ti` performs pivotal tuning for the **CLIP** encoder only. To activate pivotal tuning for both encoders, add the flag `--enable_t5_ti`. * When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory. * **pure textual inversion** - to support the full range from pivotal tuning to textual inversion we introduce `--train_transformer_frac` which controls the amount of epochs the transformer LoRA layers are trained. By default, `--train_transformer_frac==1`, to trigger a textual inversion run set `--train_transformer_frac==0`. Values between 0 and 1 are supported as well, and we welcome the community to experiment w/ different settings and share the results! * **token initializer** - similar to the original textual inversion work, you can specify a concept of your choosing as the starting point for training. By default, when enabling `--train_text_encoder_ti`, the new inserted tokens are initialized randomly. You can specify a token in `--initializer_concept` such that the starting point for the trained embeddings will be the embeddings associated with your chosen `--initializer_concept`. ## Training examples Now let's get our dataset. For this example we will use some cool images of 3d rendered icons: https://huggingface.co/datasets/linoyts/3d_icon. Let's first download it locally: ```python from huggingface_hub import snapshot_download local_dir = "./3d_icon" snapshot_download( "LinoyTsaban/3d_icon", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` Let's review some of the advanced features we're going to be using for this example: - **custom captions**: To use custom captioning, first ensure that you have the datasets library installed, otherwise you can install it by ```bash pip install datasets ``` Now we'll simply specify the name of the dataset and caption column (in this case it's "prompt") ``` --dataset_name=./3d_icon --caption_column=prompt ``` You can also load a dataset straight from by specifying it's name in `dataset_name`. Look [here](https://huggingface.co/blog/sdxl_lora_advanced_script#custom-captioning) for more info on creating/loading your own caption dataset. - **optimizer**: for this example, we'll use [prodigy](https://huggingface.co/blog/sdxl_lora_advanced_script#adaptive-optimizers) - an adaptive optimizer - To use Prodigy, please make sure to install the prodigyopt library: `pip install prodigyopt` - **pivotal tuning** ### Example #1: Pivotal tuning **Now, we can launch training:** ```bash export MODEL_NAME="black-forest-labs/FLUX.1-dev" export DATASET_NAME="./3d_icon" export OUTPUT_DIR="3d-icon-Flux-LoRA" accelerate launch train_dreambooth_lora_flux_advanced.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --instance_prompt="3d icon in the style of TOK" \ --output_dir=$OUTPUT_DIR \ --caption_column="prompt" \ --mixed_precision="bf16" \ --resolution=1024 \ --train_batch_size=1 \ --repeats=1 \ --report_to="wandb"\ --gradient_accumulation_steps=1 \ --gradient_checkpointing \ --learning_rate=1.0 \ --text_encoder_lr=1.0 \ --optimizer="prodigy"\ --train_text_encoder_ti\ --train_text_encoder_ti_frac=0.5\ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --rank=8 \ --max_train_steps=700 \ --checkpointing_steps=2000 \ --seed="0" \ --push_to_hub ``` To better track our training experiments, we're using the following flags in the command above: * `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 40GB A100 GPU. ### Example #2: Pivotal tuning with T5 Now let's try that with T5 as well, so instead of only optimizing the CLIP embeddings associated with newly inserted tokens, we'll optimize the T5 embeddings as well. We can do this by simply adding `--enable_t5_ti` to the previous configuration: ```bash export MODEL_NAME="black-forest-labs/FLUX.1-dev" export DATASET_NAME="./3d_icon" export OUTPUT_DIR="3d-icon-Flux-LoRA" accelerate launch train_dreambooth_lora_flux_advanced.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --instance_prompt="3d icon in the style of TOK" \ --output_dir=$OUTPUT_DIR \ --caption_column="prompt" \ --mixed_precision="bf16" \ --resolution=1024 \ --train_batch_size=1 \ --repeats=1 \ --report_to="wandb"\ --gradient_accumulation_steps=1 \ --gradient_checkpointing \ --learning_rate=1.0 \ --text_encoder_lr=1.0 \ --optimizer="prodigy"\ --train_text_encoder_ti\ --enable_t5_ti\ --train_text_encoder_ti_frac=0.5\ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --rank=8 \ --max_train_steps=700 \ --checkpointing_steps=2000 \ --seed="0" \ --push_to_hub ``` ### Example #3: Textual Inversion To explore a pure textual inversion - i.e. only optimizing the text embeddings w/o training transformer LoRA layers, we can set the value for `--train_transformer_frac` - which is responsible for the percent of epochs in which the transformer is trained. By setting `--train_transformer_frac == 0` and enabling `--train_text_encoder_ti` we trigger a textual inversion train run. ```bash export MODEL_NAME="black-forest-labs/FLUX.1-dev" export DATASET_NAME="./3d_icon" export OUTPUT_DIR="3d-icon-Flux-LoRA" accelerate launch train_dreambooth_lora_flux_advanced.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --instance_prompt="3d icon in the style of TOK" \ --output_dir=$OUTPUT_DIR \ --caption_column="prompt" \ --mixed_precision="bf16" \ --resolution=1024 \ --train_batch_size=1 \ --repeats=1 \ --report_to="wandb"\ --gradient_accumulation_steps=1 \ --gradient_checkpointing \ --learning_rate=1.0 \ --text_encoder_lr=1.0 \ --optimizer="prodigy"\ --train_text_encoder_ti\ --enable_t5_ti\ --train_text_encoder_ti_frac=0.5\ --train_transformer_frac=0\ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --rank=8 \ --max_train_steps=700 \ --checkpointing_steps=2000 \ --seed="0" \ --push_to_hub ``` ### Inference - pivotal tuning Once training is done, we can perform inference like so: 1. starting with loading the transformer lora weights ```python import torch from huggingface_hub import hf_hub_download, upload_file from diffusers import AutoPipelineForText2Image from safetensors.torch import load_file username = "linoyts" repo_id = f"{username}/3d-icon-Flux-LoRA" pipe = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to('cuda') pipe.load_lora_weights(repo_id, weight_name="pytorch_lora_weights.safetensors") ``` 2. now we load the pivotal tuning embeddings > [!NOTE] #1 if `--enable_t5_ti` wasn't passed, we only load the embeddings to the CLIP encoder. > [!NOTE] #2 the number of tokens (i.e. <s0>,...,<si>) is either determined by `--num_new_tokens_per_abstraction` or by `--initializer_concept`. Make sure to update inference code accordingly :) ```python text_encoders = [pipe.text_encoder, pipe.text_encoder_2] tokenizers = [pipe.tokenizer, pipe.tokenizer_2] embedding_path = hf_hub_download(repo_id=repo_id, filename="3d-icon-Flux-LoRA_emb.safetensors", repo_type="model") state_dict = load_file(embedding_path) # load embeddings of text_encoder 1 (CLIP ViT-L/14) pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer) # load embeddings of text_encoder 2 (T5 XXL) - ignore this line if you didn't enable `--enable_t5_ti` pipe.load_textual_inversion(state_dict["t5"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2) ``` 3. let's generate images ```python instance_token = "<s0><s1>" prompt = f"a {instance_token} icon of an orange llama eating ramen, in the style of {instance_token}" image = pipe(prompt=prompt, num_inference_steps=25, cross_attention_kwargs={"scale": 1.0}).images[0] image.save("llama.png") ``` ### Inference - pure textual inversion In this case, we don't load transformer layers as before, since we only optimize the text embeddings. The output of a textual inversion train run is a `.safetensors` file containing the trained embeddings for the new tokens either for the CLIP encoder, or for both encoders (CLIP and T5) 1. starting with loading the embeddings. 💡note that here too, if you didn't enable `--enable_t5_ti`, you only load the embeddings to the CLIP encoder ```python import torch from huggingface_hub import hf_hub_download, upload_file from diffusers import AutoPipelineForText2Image from safetensors.torch import load_file username = "linoyts" repo_id = f"{username}/3d-icon-Flux-LoRA" pipe = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to('cuda') text_encoders = [pipe.text_encoder, pipe.text_encoder_2] tokenizers = [pipe.tokenizer, pipe.tokenizer_2] embedding_path = hf_hub_download(repo_id=repo_id, filename="3d-icon-Flux-LoRA_emb.safetensors", repo_type="model") state_dict = load_file(embedding_path) # load embeddings of text_encoder 1 (CLIP ViT-L/14) pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer) # load embeddings of text_encoder 2 (T5 XXL) - ignore this line if you didn't enable `--enable_t5_ti` pipe.load_textual_inversion(state_dict["t5"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2) ``` 2. let's generate images ```python instance_token = "<s0><s1>" prompt = f"a {instance_token} icon of an orange llama eating ramen, in the style of {instance_token}" image = pipe(prompt=prompt, num_inference_steps=25, cross_attention_kwargs={"scale": 1.0}).images[0] image.save("llama.png") ``` ### Comfy UI / AUTOMATIC1111 Inference The new script fully supports textual inversion loading with Comfy UI and AUTOMATIC1111 formats! **AUTOMATIC1111 / SD.Next** \ In AUTOMATIC1111/SD.Next we will load a LoRA and a textual embedding at the same time. - *LoRA*: Besides the diffusers format, the script will also train a WebUI compatible LoRA. It is generated as `{your_lora_name}.safetensors`. You can then include it in your `models/Lora` directory. - *Embedding*: the embedding is the same for diffusers and WebUI. You can download your `{lora_name}_emb.safetensors` file from a trained model, and include it in your `embeddings` directory. You can then run inference by prompting `a y2k_emb webpage about the movie Mean Girls <lora:y2k:0.9>`. You can use the `y2k_emb` token normally, including increasing its weight by doing `(y2k_emb:1.2)`. **ComfyUI** \ In ComfyUI we will load a LoRA and a textual embedding at the same time. - *LoRA*: Besides the diffusers format, the script will also train a ComfyUI compatible LoRA. It is generated as `{your_lora_name}.safetensors`. You can then include it in your `models/Lora` directory. Then you will load the LoRALoader node and hook that up with your model and CLIP. [Official guide for loading LoRAs](https://comfyanonymous.github.io/ComfyUI_examples/lora/) - *Embedding*: the embedding is the same for diffusers and WebUI. You can download your `{lora_name}_emb.safetensors` file from a trained model, and include it in your `models/embeddings` directory and use it in your prompts like `embedding:y2k_emb`. [Official guide for loading embeddings](https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/).
diffusers/examples/advanced_diffusion_training/README_flux.md/0
{ "file_path": "diffusers/examples/advanced_diffusion_training/README_flux.md", "repo_id": "diffusers", "token_count": 6906 }
125
from typing import List, Optional, Tuple, Union import torch from diffusers import DiffusionPipeline from diffusers.configuration_utils import ConfigMixin from diffusers.pipelines.pipeline_utils import ImagePipelineOutput from diffusers.schedulers.scheduling_utils import SchedulerMixin class IADBScheduler(SchedulerMixin, ConfigMixin): """ IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist. For more details, see the original paper: https://huggingface.co/papers/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html """ def step( self, model_output: torch.Tensor, timestep: int, x_alpha: torch.Tensor, ) -> torch.Tensor: """ Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. It is the direction from x0 to x1. timestep (`float`): current timestep in the diffusion chain. x_alpha (`torch.Tensor`): x_alpha sample for the current timestep Returns: `torch.Tensor`: the sample at the previous timestep """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) alpha = timestep / self.num_inference_steps alpha_next = (timestep + 1) / self.num_inference_steps d = model_output x_alpha = x_alpha + (alpha_next - alpha) * d return x_alpha def set_timesteps(self, num_inference_steps: int): self.num_inference_steps = num_inference_steps def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, alpha: torch.Tensor, ) -> torch.Tensor: return original_samples * alpha + noise * (1 - alpha) def __len__(self): return self.config.num_train_timesteps class IADBPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size, int): image_shape = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(num_inference_steps) x_alpha = image.clone() for t in self.progress_bar(range(num_inference_steps)): alpha = t / num_inference_steps # 1. predict noise model_output model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample # 2. step x_alpha = self.scheduler.step(model_output, t, x_alpha) image = (x_alpha * 0.5 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/community/iadb.py/0
{ "file_path": "diffusers/examples/community/iadb.py", "repo_id": "diffusers", "token_count": 2501 }
126
# Copyright 2025 FABRIC authors and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Union import torch from packaging import version from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, UNet2DConditionModel from diffusers.configuration_utils import FrozenDict from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models.attention import BasicTransformerBlock from diffusers.models.attention_processor import LoRAAttnProcessor from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import DiffusionPipeline >>> import torch >>> model_id = "dreamlike-art/dreamlike-photoreal-2.0" >>> pipe = DiffusionPipeline(model_id, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric") >>> pipe = pipe.to("cuda") >>> prompt = "a giant standing in a fantasy landscape best quality" >>> liked = [] # list of images for positive feedback >>> disliked = [] # list of images for negative feedback >>> image = pipe(prompt, num_images=4, liked=liked, disliked=disliked).images[0] ``` """ class FabricCrossAttnProcessor: def __init__(self): self.attntion_probs = None def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, weights=None, lora_scale=1.0, ): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if isinstance(attn.processor, LoRAAttnProcessor): query = attn.to_q(hidden_states) + lora_scale * attn.processor.to_q_lora(hidden_states) else: query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if isinstance(attn.processor, LoRAAttnProcessor): key = attn.to_k(encoder_hidden_states) + lora_scale * attn.processor.to_k_lora(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) + lora_scale * attn.processor.to_v_lora(encoder_hidden_states) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) if weights is not None: if weights.shape[0] != 1: weights = weights.repeat_interleave(attn.heads, dim=0) attention_probs = attention_probs * weights[:, None] attention_probs = attention_probs / attention_probs.sum(dim=-1, keepdim=True) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj if isinstance(attn.processor, LoRAAttnProcessor): hidden_states = attn.to_out[0](hidden_states) + lora_scale * attn.processor.to_out_lora(hidden_states) else: hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class FabricPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion and conditioning the results using feedback images. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`EulerAncestralDiscreteScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, requires_safety_checker: bool = True, ): super().__init__() is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( unet=unet, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def get_unet_hidden_states(self, z_all, t, prompt_embd): cached_hidden_states = [] for module in self.unet.modules(): if isinstance(module, BasicTransformerBlock): def new_forward(self, hidden_states, *args, **kwargs): cached_hidden_states.append(hidden_states.clone().detach().cpu()) return self.old_forward(hidden_states, *args, **kwargs) module.attn1.old_forward = module.attn1.forward module.attn1.forward = new_forward.__get__(module.attn1) # run forward pass to cache hidden states, output can be discarded _ = self.unet(z_all, t, encoder_hidden_states=prompt_embd) # restore original forward pass for module in self.unet.modules(): if isinstance(module, BasicTransformerBlock): module.attn1.forward = module.attn1.old_forward del module.attn1.old_forward return cached_hidden_states def unet_forward_with_cached_hidden_states( self, z_all, t, prompt_embd, cached_pos_hiddens: Optional[List[torch.Tensor]] = None, cached_neg_hiddens: Optional[List[torch.Tensor]] = None, pos_weights=(0.8, 0.8), neg_weights=(0.5, 0.5), ): if cached_pos_hiddens is None and cached_neg_hiddens is None: return self.unet(z_all, t, encoder_hidden_states=prompt_embd) local_pos_weights = torch.linspace(*pos_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist() local_neg_weights = torch.linspace(*neg_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist() for block, pos_weight, neg_weight in zip( self.unet.down_blocks + [self.unet.mid_block] + self.unet.up_blocks, local_pos_weights + [pos_weights[1]] + local_pos_weights[::-1], local_neg_weights + [neg_weights[1]] + local_neg_weights[::-1], ): for module in block.modules(): if isinstance(module, BasicTransformerBlock): def new_forward( self, hidden_states, pos_weight=pos_weight, neg_weight=neg_weight, **kwargs, ): cond_hiddens, uncond_hiddens = hidden_states.chunk(2, dim=0) batch_size, d_model = cond_hiddens.shape[:2] device, dtype = hidden_states.device, hidden_states.dtype weights = torch.ones(batch_size, d_model, device=device, dtype=dtype) out_pos = self.old_forward(hidden_states) out_neg = self.old_forward(hidden_states) if cached_pos_hiddens is not None: cached_pos_hs = cached_pos_hiddens.pop(0).to(hidden_states.device) cond_pos_hs = torch.cat([cond_hiddens, cached_pos_hs], dim=1) pos_weights = weights.clone().repeat(1, 1 + cached_pos_hs.shape[1] // d_model) pos_weights[:, d_model:] = pos_weight attn_with_weights = FabricCrossAttnProcessor() out_pos = attn_with_weights( self, cond_hiddens, encoder_hidden_states=cond_pos_hs, weights=pos_weights, ) else: out_pos = self.old_forward(cond_hiddens) if cached_neg_hiddens is not None: cached_neg_hs = cached_neg_hiddens.pop(0).to(hidden_states.device) uncond_neg_hs = torch.cat([uncond_hiddens, cached_neg_hs], dim=1) neg_weights = weights.clone().repeat(1, 1 + cached_neg_hs.shape[1] // d_model) neg_weights[:, d_model:] = neg_weight attn_with_weights = FabricCrossAttnProcessor() out_neg = attn_with_weights( self, uncond_hiddens, encoder_hidden_states=uncond_neg_hs, weights=neg_weights, ) else: out_neg = self.old_forward(uncond_hiddens) out = torch.cat([out_pos, out_neg], dim=0) return out module.attn1.old_forward = module.attn1.forward module.attn1.forward = new_forward.__get__(module.attn1) out = self.unet(z_all, t, encoder_hidden_states=prompt_embd) # restore original forward pass for module in self.unet.modules(): if isinstance(module, BasicTransformerBlock): module.attn1.forward = module.attn1.old_forward del module.attn1.old_forward return out def preprocess_feedback_images(self, images, vae, dim, device, dtype, generator) -> torch.tensor: images_t = [self.image_to_tensor(img, dim, dtype) for img in images] images_t = torch.stack(images_t).to(device) latents = vae.config.scaling_factor * vae.encode(images_t).latent_dist.sample(generator) return torch.cat([latents], dim=0) def check_inputs( self, prompt, negative_prompt=None, liked=None, disliked=None, height=None, width=None, ): if prompt is None: raise ValueError("Provide `prompt`. Cannot leave both `prompt` undefined.") elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if liked is not None and not isinstance(liked, list): raise ValueError(f"`liked` has to be of type `list` but is {type(liked)}") if disliked is not None and not isinstance(disliked, list): raise ValueError(f"`disliked` has to be of type `list` but is {type(disliked)}") if height is not None and not isinstance(height, int): raise ValueError(f"`height` has to be of type `int` but is {type(height)}") if width is not None and not isinstance(width, int): raise ValueError(f"`width` has to be of type `int` but is {type(width)}") @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Optional[Union[str, List[str]]] = "", negative_prompt: Optional[Union[str, List[str]]] = "lowres, bad anatomy, bad hands, cropped, worst quality", liked: Optional[Union[List[str], List[Image.Image]]] = [], disliked: Optional[Union[List[str], List[Image.Image]]] = [], generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, height: int = 512, width: int = 512, return_dict: bool = True, num_images: int = 4, guidance_scale: float = 7.0, num_inference_steps: int = 20, output_type: Optional[str] = "pil", feedback_start_ratio: float = 0.33, feedback_end_ratio: float = 0.66, min_weight: float = 0.05, max_weight: float = 0.8, neg_scale: float = 0.5, pos_bottleneck_scale: float = 1.0, neg_bottleneck_scale: float = 1.0, latents: Optional[torch.Tensor] = None, ): r""" The call function to the pipeline for generation. Generate a trajectory of images with binary feedback. The feedback can be given as a list of liked and disliked images. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds` instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). liked (`List[Image.Image]` or `List[str]`, *optional*): Encourages images with liked features. disliked (`List[Image.Image]` or `List[str]`, *optional*): Discourages images with disliked features. generator (`torch.Generator` or `List[torch.Generator]` or `int`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) or an `int` to make generation deterministic. height (`int`, *optional*, defaults to 512): Height of the generated image. width (`int`, *optional*, defaults to 512): Width of the generated image. num_images (`int`, *optional*, defaults to 4): The number of images to generate per prompt. guidance_scale (`float`, *optional*, defaults to 7.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. feedback_start_ratio (`float`, *optional*, defaults to `.33`): Start point for providing feedback (between 0 and 1). feedback_end_ratio (`float`, *optional*, defaults to `.66`): End point for providing feedback (between 0 and 1). min_weight (`float`, *optional*, defaults to `.05`): Minimum weight for feedback. max_weight (`float`, *optional*, defaults tp `1.0`): Maximum weight for feedback. neg_scale (`float`, *optional*, defaults to `.5`): Scale factor for negative feedback. Examples: Returns: [`~pipelines.fabric.FabricPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ self.check_inputs(prompt, negative_prompt, liked, disliked) device = self._execution_device dtype = self.unet.dtype if isinstance(prompt, str) and prompt is not None: batch_size = 1 elif isinstance(prompt, list) and prompt is not None: batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if isinstance(negative_prompt, str): negative_prompt = negative_prompt elif isinstance(negative_prompt, list): negative_prompt = negative_prompt else: assert len(negative_prompt) == batch_size shape = ( batch_size * num_images, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) latent_noise = randn_tensor( shape, device=device, dtype=dtype, generator=generator, ) positive_latents = ( self.preprocess_feedback_images(liked, self.vae, (height, width), device, dtype, generator) if liked and len(liked) > 0 else torch.tensor( [], device=device, dtype=dtype, ) ) negative_latents = ( self.preprocess_feedback_images(disliked, self.vae, (height, width), device, dtype, generator) if disliked and len(disliked) > 0 else torch.tensor( [], device=device, dtype=dtype, ) ) do_classifier_free_guidance = guidance_scale > 0.1 (prompt_neg_embs, prompt_pos_embs) = self._encode_prompt( prompt, device, num_images, do_classifier_free_guidance, negative_prompt, ).split([num_images * batch_size, num_images * batch_size]) batched_prompt_embd = torch.cat([prompt_pos_embs, prompt_neg_embs], dim=0) null_tokens = self.tokenizer( [""], return_tensors="pt", max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = null_tokens.attention_mask.to(device) else: attention_mask = None null_prompt_emb = self.text_encoder( input_ids=null_tokens.input_ids.to(device), attention_mask=attention_mask, ).last_hidden_state null_prompt_emb = null_prompt_emb.to(device=device, dtype=dtype) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps latent_noise = latent_noise * self.scheduler.init_noise_sigma num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order ref_start_idx = round(len(timesteps) * feedback_start_ratio) ref_end_idx = round(len(timesteps) * feedback_end_ratio) with self.progress_bar(total=num_inference_steps) as pbar: for i, t in enumerate(timesteps): sigma = self.scheduler.sigma_t[t] if hasattr(self.scheduler, "sigma_t") else 0 if hasattr(self.scheduler, "sigmas"): sigma = self.scheduler.sigmas[i] alpha_hat = 1 / (sigma**2 + 1) z_single = self.scheduler.scale_model_input(latent_noise, t) z_all = torch.cat([z_single] * 2, dim=0) z_ref = torch.cat([positive_latents, negative_latents], dim=0) if i >= ref_start_idx and i <= ref_end_idx: weight_factor = max_weight else: weight_factor = min_weight pos_ws = (weight_factor, weight_factor * pos_bottleneck_scale) neg_ws = (weight_factor * neg_scale, weight_factor * neg_scale * neg_bottleneck_scale) if z_ref.size(0) > 0 and weight_factor > 0: noise = torch.randn_like(z_ref) if isinstance(self.scheduler, EulerAncestralDiscreteScheduler): z_ref_noised = (alpha_hat**0.5 * z_ref + (1 - alpha_hat) ** 0.5 * noise).type(dtype) else: z_ref_noised = self.scheduler.add_noise(z_ref, noise, t) ref_prompt_embd = torch.cat( [null_prompt_emb] * (len(positive_latents) + len(negative_latents)), dim=0 ) cached_hidden_states = self.get_unet_hidden_states(z_ref_noised, t, ref_prompt_embd) n_pos, n_neg = positive_latents.shape[0], negative_latents.shape[0] cached_pos_hs, cached_neg_hs = [], [] for hs in cached_hidden_states: cached_pos, cached_neg = hs.split([n_pos, n_neg], dim=0) cached_pos = cached_pos.view(1, -1, *cached_pos.shape[2:]).expand(num_images, -1, -1) cached_neg = cached_neg.view(1, -1, *cached_neg.shape[2:]).expand(num_images, -1, -1) cached_pos_hs.append(cached_pos) cached_neg_hs.append(cached_neg) if n_pos == 0: cached_pos_hs = None if n_neg == 0: cached_neg_hs = None else: cached_pos_hs, cached_neg_hs = None, None unet_out = self.unet_forward_with_cached_hidden_states( z_all, t, prompt_embd=batched_prompt_embd, cached_pos_hiddens=cached_pos_hs, cached_neg_hiddens=cached_neg_hs, pos_weights=pos_ws, neg_weights=neg_ws, )[0] noise_cond, noise_uncond = unet_out.chunk(2) guidance = noise_cond - noise_uncond noise_pred = noise_uncond + guidance_scale * guidance latent_noise = self.scheduler.step(noise_pred, t, latent_noise)[0] if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): pbar.update() y = self.vae.decode(latent_noise / self.vae.config.scaling_factor, return_dict=False)[0] imgs = self.image_processor.postprocess( y, output_type=output_type, ) if not return_dict: return imgs return StableDiffusionPipelineOutput(imgs, False) def image_to_tensor(self, image: Union[str, Image.Image], dim: tuple, dtype): """ Convert latent PIL image to a torch tensor for further processing. """ if isinstance(image, str): image = Image.open(image) if not image.mode == "RGB": image = image.convert("RGB") image = self.image_processor.preprocess(image, height=dim[0], width=dim[1])[0] return image.type(dtype)
diffusers/examples/community/pipeline_fabric.py/0
{ "file_path": "diffusers/examples/community/pipeline_fabric.py", "repo_id": "diffusers", "token_count": 16558 }
127
# A diffuser version implementation of Zero1to3 (https://github.com/cvlab-columbia/zero123), ICCV 2023 # by Xin Kong import inspect from typing import Any, Callable, Dict, List, Optional, Union import kornia import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection # from ...configuration_utils import FrozenDict # from ...models import AutoencoderKL, UNet2DConditionModel # from ...schedulers import KarrasDiffusionSchedulers # from ...utils import ( # deprecate, # is_accelerate_available, # is_accelerate_version, # logging, # randn_tensor, # replace_example_docstring, # ) # from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin # from . import StableDiffusionPipelineOutput # from .safety_checker import StableDiffusionSafetyChecker from diffusers import AutoencoderKL, DiffusionPipeline, StableDiffusionMixin, UNet2DConditionModel from diffusers.configuration_utils import ConfigMixin, FrozenDict from diffusers.models.modeling_utils import ModelMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name # todo EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt).images[0] ``` """ class CCProjection(ModelMixin, ConfigMixin): def __init__(self, in_channel=772, out_channel=768): super().__init__() self.in_channel = in_channel self.out_channel = out_channel self.projection = torch.nn.Linear(in_channel, out_channel) def forward(self, x): return self.projection(x) class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for single view conditioned novel view generation using Zero1to3. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. image_encoder ([`CLIPVisionModelWithProjection`]): Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. cc_projection ([`CCProjection`]): Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, image_encoder: CLIPVisionModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, cc_projection: CCProjection, requires_safety_checker: bool = True, ): super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, cc_projection=cc_projection, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.register_to_config(requires_safety_checker=requires_safety_checker) # self.model_mode = None def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def CLIP_preprocess(self, x): dtype = x.dtype # following openai's implementation # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741 # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608 if isinstance(x, torch.Tensor): if x.min() < -1.0 or x.max() > 1.0: raise ValueError("Expected input tensor to have values in the range [-1, 1]") x = kornia.geometry.resize( x.to(torch.float32), (224, 224), interpolation="bicubic", align_corners=True, antialias=False ).to(dtype=dtype) x = (x + 1.0) / 2.0 # renormalize according to clip x = kornia.enhance.normalize( x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711]) ) return x # from image_variation def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) assert image.ndim == 4, "Image must have 4 dimensions" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 image = image.to(device=device, dtype=dtype) image = self.CLIP_preprocess(image) # if not isinstance(image, torch.Tensor): # # 0-255 # print("Warning: image is processed by hf's preprocess, which is different from openai original's.") # image = self.feature_extractor(images=image, return_tensors="pt").pixel_values image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype) image_embeddings = image_embeddings.unsqueeze(1) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings def _encode_pose(self, pose, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.cc_projection.parameters()).dtype if isinstance(pose, torch.Tensor): pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype) else: if isinstance(pose[0], list): pose = torch.Tensor(pose) else: pose = torch.Tensor([pose]) x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1) pose_embeddings = ( torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1) .unsqueeze(1) .to(device=device, dtype=dtype) ) # B, 1, 4 # duplicate pose embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = pose_embeddings.shape pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1) pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(pose_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings]) return pose_embeddings def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_classifier_free_guidance): img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False) pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False) prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1) prompt_embeds = self.cc_projection(prompt_embeds) # prompt_embeds = img_prompt_embeds # follow 0123, add negative prompt, after projection if do_classifier_free_guidance: negative_prompt = torch.zeros_like(prompt_embeds) prompt_embeds = torch.cat([negative_prompt, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, image, height, width, callback_steps): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_img_latents(self, image, batch_size, dtype, device, generator=None, do_classifier_free_guidance=False): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) assert image.ndim == 4, "Image must have 4 dimensions" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 image = image.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i]) for i in range(batch_size) # sample ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.mode() # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor if batch_size > init_latents.shape[0]: # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1) num_images_per_prompt = batch_size // init_latents.shape[0] # duplicate image latents for each generation per prompt, using mps friendly method bs_embed, emb_c, emb_h, emb_w = init_latents.shape init_latents = init_latents.unsqueeze(1) init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1) init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w) # init_latents = torch.cat([init_latents]*2) if do_classifier_free_guidance else init_latents # follow zero123 init_latents = ( torch.cat([torch.zeros_like(init_latents), init_latents]) if do_classifier_free_guidance else init_latents ) init_latents = init_latents.to(device=device, dtype=dtype) return init_latents # def load_cc_projection(self, pretrained_weights=None): # self.cc_projection = torch.nn.Linear(772, 768) # torch.nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) # torch.nn.init.zeros_(list(self.cc_projection.parameters())[1]) # if pretrained_weights is not None: # self.cc_projection.load_state_dict(pretrained_weights) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, input_imgs: Union[torch.Tensor, PIL.Image.Image] = None, prompt_imgs: Union[torch.Tensor, PIL.Image.Image] = None, poses: Union[List[float], List[List[float]]] = None, torch_dtype=torch.float32, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 3.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: float = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: input_imgs (`PIL` or `List[PIL]`, *optional*): The single input image for each 3D object prompt_imgs (`PIL` or `List[PIL]`, *optional*): Same as input_imgs, but will be used later as an image prompt condition, encoded by CLIP feature height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct # input_image = hint_imgs self.check_inputs(input_imgs, height, width, callback_steps) # 2. Define call parameters if isinstance(input_imgs, PIL.Image.Image): batch_size = 1 elif isinstance(input_imgs, list): batch_size = len(input_imgs) else: batch_size = input_imgs.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input image with pose as prompt prompt_embeds = self._encode_image_with_pose( prompt_imgs, poses, device, num_images_per_prompt, do_classifier_free_guidance ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables latents = self.prepare_latents( batch_size * num_images_per_prompt, 4, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare image latents img_latents = self.prepare_img_latents( input_imgs, batch_size * num_images_per_prompt, prompt_embeds.dtype, device, generator, do_classifier_free_guidance, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, img_latents], dim=1) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 # latents = self.scheduler.step(noise_pred.to(dtype=torch.float32), t, latents.to(dtype=torch.float32)).prev_sample.to(prompt_embeds.dtype) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 8. Post-processing has_nsfw_concept = None if output_type == "latent": image = latents elif output_type == "pil": # 8. Post-processing image = self.decode_latents(latents) # 10. Convert to PIL image = self.numpy_to_pil(image) else: # 8. Post-processing image = self.decode_latents(latents) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/pipeline_zero1to3.py/0
{ "file_path": "diffusers/examples/community/pipeline_zero1to3.py", "repo_id": "diffusers", "token_count": 17992 }
128
from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.configuration_utils import FrozenDict from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class StableDiffusionMegaPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionMegaSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.register_to_config(requires_safety_checker=requires_safety_checker) @property def components(self) -> Dict[str, Any]: return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")} @torch.no_grad() def inpaint( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image], strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline return StableDiffusionInpaintPipelineLegacy(**self.components)( prompt=prompt, image=image, mask_image=mask_image, strength=strength, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, output_type=output_type, return_dict=return_dict, callback=callback, ) @torch.no_grad() def img2img( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image], strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline return StableDiffusionImg2ImgPipeline(**self.components)( prompt=prompt, image=image, strength=strength, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, ) @torch.no_grad() def text2img( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): # For more information on how this function https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline return StableDiffusionPipeline(**self.components)( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, )
diffusers/examples/community/stable_diffusion_mega.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_mega.py", "repo_id": "diffusers", "token_count": 3877 }
129
# Copyright 2025 The HuggingFace Team. All rights reserved. # Copyright (c) Alibaba, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Based on [AnyText: Multilingual Visual Text Generation And Editing](https://huggingface.co/papers/2311.03054). # Authors: Yuxiang Tuo, Wangmeng Xiang, Jun-Yan He, Yifeng Geng, Xuansong Xie # Code: https://github.com/tyxsspa/AnyText with Apache-2.0 license # # Adapted to Diffusers by [M. Tolga Cangöz](https://github.com/tolgacangoz). import inspect import math import os import re import sys import unicodedata from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import cv2 import numpy as np import PIL.Image import torch import torch.nn.functional as F from huggingface_hub import hf_hub_download from ocr_recog.RecModel import RecModel from PIL import Image, ImageDraw, ImageFont from safetensors.torch import load_file from skimage.transform._geometric import _umeyama as get_sym_mat from torch import nn from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from transformers.modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.models.modeling_utils import ModelMixin from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.constants import HF_MODULES_CACHE from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor class Checker: def __init__(self): pass def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or self._is_control(char): continue if self._is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) def _is_control(self, char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat in ("Cc", "Cf"): return True return False def _is_whitespace(self, char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False checker = Checker() PLACE_HOLDER = "*" logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> # This example requires the `anytext_controlnet.py` file: >>> # !git clone --depth 1 https://github.com/huggingface/diffusers.git >>> # %cd diffusers/examples/research_projects/anytext >>> # Let's choose a font file shared by an HF staff: >>> # !wget https://huggingface.co/spaces/ysharma/TranslateQuotesInImageForwards/resolve/main/arial-unicode-ms.ttf >>> import torch >>> from diffusers import DiffusionPipeline >>> from anytext_controlnet import AnyTextControlNetModel >>> from diffusers.utils import load_image >>> anytext_controlnet = AnyTextControlNetModel.from_pretrained("tolgacangoz/anytext-controlnet", torch_dtype=torch.float16, ... variant="fp16",) >>> pipe = DiffusionPipeline.from_pretrained("tolgacangoz/anytext", font_path="arial-unicode-ms.ttf", ... controlnet=anytext_controlnet, torch_dtype=torch.float16, ... trust_remote_code=False, # One needs to give permission to run this pipeline's code ... ).to("cuda") >>> # generate image >>> prompt = 'photo of caramel macchiato coffee on the table, top-down perspective, with "Any" "Text" written on it using cream' >>> draw_pos = load_image("https://raw.githubusercontent.com/tyxsspa/AnyText/refs/heads/main/example_images/gen9.png") >>> # There are two modes: "generate" and "edit". "edit" mode requires `ori_image` parameter for the image to be edited. >>> image = pipe(prompt, num_inference_steps=20, mode="generate", draw_pos=draw_pos, ... ).images[0] >>> image ``` """ def get_clip_token_for_string(tokenizer, string): batch_encoding = tokenizer( string, truncation=True, max_length=77, return_length=True, return_overflowing_tokens=False, padding="max_length", return_tensors="pt", ) tokens = batch_encoding["input_ids"] assert torch.count_nonzero(tokens - 49407) == 2, ( f"String '{string}' maps to more than a single token. Please use another string" ) return tokens[0, 1] def get_recog_emb(encoder, img_list): _img_list = [(img.repeat(1, 3, 1, 1) * 255)[0] for img in img_list] encoder.predictor.eval() _, preds_neck = encoder.pred_imglist(_img_list, show_debug=False) return preds_neck class EmbeddingManager(ModelMixin, ConfigMixin): @register_to_config def __init__( self, embedder, placeholder_string="*", use_fp16=False, token_dim=768, get_recog_emb=None, ): super().__init__() get_token_for_string = partial(get_clip_token_for_string, embedder.tokenizer) self.proj = nn.Linear(40 * 64, token_dim) proj_dir = hf_hub_download( repo_id="tolgacangoz/anytext", filename="text_embedding_module/proj.safetensors", cache_dir=HF_MODULES_CACHE, ) self.proj.load_state_dict(load_file(proj_dir, device=str(embedder.device))) if use_fp16: self.proj = self.proj.to(dtype=torch.float16) self.placeholder_token = get_token_for_string(placeholder_string) @torch.no_grad() def encode_text(self, text_info): if self.config.get_recog_emb is None: self.config.get_recog_emb = partial(get_recog_emb, self.recog) gline_list = [] for i in range(len(text_info["n_lines"])): # sample index in a batch n_lines = text_info["n_lines"][i] for j in range(n_lines): # line gline_list += [text_info["gly_line"][j][i : i + 1]] if len(gline_list) > 0: recog_emb = self.config.get_recog_emb(gline_list) enc_glyph = self.proj(recog_emb.reshape(recog_emb.shape[0], -1).to(self.proj.weight.dtype)) self.text_embs_all = [] n_idx = 0 for i in range(len(text_info["n_lines"])): # sample index in a batch n_lines = text_info["n_lines"][i] text_embs = [] for j in range(n_lines): # line text_embs += [enc_glyph[n_idx : n_idx + 1]] n_idx += 1 self.text_embs_all += [text_embs] @torch.no_grad() def forward( self, tokenized_text, embedded_text, ): b, device = tokenized_text.shape[0], tokenized_text.device for i in range(b): idx = tokenized_text[i] == self.placeholder_token.to(device) if sum(idx) > 0: if i >= len(self.text_embs_all): logger.warning("truncation for log images...") break text_emb = torch.cat(self.text_embs_all[i], dim=0) if sum(idx) != len(text_emb): logger.warning("truncation for long caption...") text_emb = text_emb.to(embedded_text.device) embedded_text[i][idx] = text_emb[: sum(idx)] return embedded_text def embedding_parameters(self): return self.parameters() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) def min_bounding_rect(img): ret, thresh = cv2.threshold(img, 127, 255, 0) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(contours) == 0: print("Bad contours, using fake bbox...") return np.array([[0, 0], [100, 0], [100, 100], [0, 100]]) max_contour = max(contours, key=cv2.contourArea) rect = cv2.minAreaRect(max_contour) box = cv2.boxPoints(rect) box = np.int0(box) # sort x_sorted = sorted(box, key=lambda x: x[0]) left = x_sorted[:2] right = x_sorted[2:] left = sorted(left, key=lambda x: x[1]) (tl, bl) = left right = sorted(right, key=lambda x: x[1]) (tr, br) = right if tl[1] > bl[1]: (tl, bl) = (bl, tl) if tr[1] > br[1]: (tr, br) = (br, tr) return np.array([tl, tr, br, bl]) def adjust_image(box, img): pts1 = np.float32([box[0], box[1], box[2], box[3]]) width = max(np.linalg.norm(pts1[0] - pts1[1]), np.linalg.norm(pts1[2] - pts1[3])) height = max(np.linalg.norm(pts1[0] - pts1[3]), np.linalg.norm(pts1[1] - pts1[2])) pts2 = np.float32([[0, 0], [width, 0], [width, height], [0, height]]) # get transform matrix M = get_sym_mat(pts1, pts2, estimate_scale=True) C, H, W = img.shape T = np.array([[2 / W, 0, -1], [0, 2 / H, -1], [0, 0, 1]]) theta = np.linalg.inv(T @ M @ np.linalg.inv(T)) theta = torch.from_numpy(theta[:2, :]).unsqueeze(0).type(torch.float32).to(img.device) grid = F.affine_grid(theta, torch.Size([1, C, H, W]), align_corners=True) result = F.grid_sample(img.unsqueeze(0), grid, align_corners=True) result = torch.clamp(result.squeeze(0), 0, 255) # crop result = result[:, : int(height), : int(width)] return result def crop_image(src_img, mask): box = min_bounding_rect(mask) result = adjust_image(box, src_img) if len(result.shape) == 2: result = torch.stack([result] * 3, axis=-1) return result def create_predictor(model_lang="ch", device="cpu", use_fp16=False): model_dir = hf_hub_download( repo_id="tolgacangoz/anytext", filename="text_embedding_module/OCR/ppv3_rec.pth", cache_dir=HF_MODULES_CACHE, ) if not os.path.exists(model_dir): raise ValueError("not find model file path {}".format(model_dir)) if model_lang == "ch": n_class = 6625 elif model_lang == "en": n_class = 97 else: raise ValueError(f"Unsupported OCR recog model_lang: {model_lang}") rec_config = { "in_channels": 3, "backbone": {"type": "MobileNetV1Enhance", "scale": 0.5, "last_conv_stride": [1, 2], "last_pool_type": "avg"}, "neck": { "type": "SequenceEncoder", "encoder_type": "svtr", "dims": 64, "depth": 2, "hidden_dims": 120, "use_guide": True, }, "head": {"type": "CTCHead", "fc_decay": 0.00001, "out_channels": n_class, "return_feats": True}, } rec_model = RecModel(rec_config) state_dict = torch.load(model_dir, map_location=device) rec_model.load_state_dict(state_dict) return rec_model def _check_image_file(path): img_end = ("tiff", "tif", "bmp", "rgb", "jpg", "png", "jpeg") return path.lower().endswith(tuple(img_end)) def get_image_file_list(img_file): imgs_lists = [] if img_file is None or not os.path.exists(img_file): raise Exception("not found any img file in {}".format(img_file)) if os.path.isfile(img_file) and _check_image_file(img_file): imgs_lists.append(img_file) elif os.path.isdir(img_file): for single_file in os.listdir(img_file): file_path = os.path.join(img_file, single_file) if os.path.isfile(file_path) and _check_image_file(file_path): imgs_lists.append(file_path) if len(imgs_lists) == 0: raise Exception("not found any img file in {}".format(img_file)) imgs_lists = sorted(imgs_lists) return imgs_lists class TextRecognizer(object): def __init__(self, args, predictor): self.rec_image_shape = [int(v) for v in args["rec_image_shape"].split(",")] self.rec_batch_num = args["rec_batch_num"] self.predictor = predictor self.chars = self.get_char_dict(args["rec_char_dict_path"]) self.char2id = {x: i for i, x in enumerate(self.chars)} self.is_onnx = not isinstance(self.predictor, torch.nn.Module) self.use_fp16 = args["use_fp16"] # img: CHW def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape assert imgC == img.shape[0] imgW = int((imgH * max_wh_ratio)) h, w = img.shape[1:] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW: resized_w = imgW else: resized_w = int(math.ceil(imgH * ratio)) resized_image = torch.nn.functional.interpolate( img.unsqueeze(0), size=(imgH, resized_w), mode="bilinear", align_corners=True, ) resized_image /= 255.0 resized_image -= 0.5 resized_image /= 0.5 padding_im = torch.zeros((imgC, imgH, imgW), dtype=torch.float32).to(img.device) padding_im[:, :, 0:resized_w] = resized_image[0] return padding_im # img_list: list of tensors with shape chw 0-255 def pred_imglist(self, img_list, show_debug=False): img_num = len(img_list) assert img_num > 0 # Calculate the aspect ratio of all text bars width_list = [] for img in img_list: width_list.append(img.shape[2] / float(img.shape[1])) # Sorting can speed up the recognition process indices = torch.from_numpy(np.argsort(np.array(width_list))) batch_num = self.rec_batch_num preds_all = [None] * img_num preds_neck_all = [None] * img_num for beg_img_no in range(0, img_num, batch_num): end_img_no = min(img_num, beg_img_no + batch_num) norm_img_batch = [] imgC, imgH, imgW = self.rec_image_shape[:3] max_wh_ratio = imgW / imgH for ino in range(beg_img_no, end_img_no): h, w = img_list[indices[ino]].shape[1:] if h > w * 1.2: img = img_list[indices[ino]] img = torch.transpose(img, 1, 2).flip(dims=[1]) img_list[indices[ino]] = img h, w = img.shape[1:] # wh_ratio = w * 1.0 / h # max_wh_ratio = max(max_wh_ratio, wh_ratio) # comment to not use different ratio for ino in range(beg_img_no, end_img_no): norm_img = self.resize_norm_img(img_list[indices[ino]], max_wh_ratio) if self.use_fp16: norm_img = norm_img.half() norm_img = norm_img.unsqueeze(0) norm_img_batch.append(norm_img) norm_img_batch = torch.cat(norm_img_batch, dim=0) if show_debug: for i in range(len(norm_img_batch)): _img = norm_img_batch[i].permute(1, 2, 0).detach().cpu().numpy() _img = (_img + 0.5) * 255 _img = _img[:, :, ::-1] file_name = f"{indices[beg_img_no + i]}" if os.path.exists(file_name + ".jpg"): file_name += "_2" # ori image cv2.imwrite(file_name + ".jpg", _img) if self.is_onnx: input_dict = {} input_dict[self.predictor.get_inputs()[0].name] = norm_img_batch.detach().cpu().numpy() outputs = self.predictor.run(None, input_dict) preds = {} preds["ctc"] = torch.from_numpy(outputs[0]) preds["ctc_neck"] = [torch.zeros(1)] * img_num else: preds = self.predictor(norm_img_batch.to(next(self.predictor.parameters()).device)) for rno in range(preds["ctc"].shape[0]): preds_all[indices[beg_img_no + rno]] = preds["ctc"][rno] preds_neck_all[indices[beg_img_no + rno]] = preds["ctc_neck"][rno] return torch.stack(preds_all, dim=0), torch.stack(preds_neck_all, dim=0) def get_char_dict(self, character_dict_path): character_str = [] with open(character_dict_path, "rb") as fin: lines = fin.readlines() for line in lines: line = line.decode("utf-8").strip("\n").strip("\r\n") character_str.append(line) dict_character = list(character_str) dict_character = ["sos"] + dict_character + [" "] # eos is space return dict_character def get_text(self, order): char_list = [self.chars[text_id] for text_id in order] return "".join(char_list) def decode(self, mat): text_index = mat.detach().cpu().numpy().argmax(axis=1) ignored_tokens = [0] selection = np.ones(len(text_index), dtype=bool) selection[1:] = text_index[1:] != text_index[:-1] for ignored_token in ignored_tokens: selection &= text_index != ignored_token return text_index[selection], np.where(selection)[0] def get_ctcloss(self, preds, gt_text, weight): if not isinstance(weight, torch.Tensor): weight = torch.tensor(weight).to(preds.device) ctc_loss = torch.nn.CTCLoss(reduction="none") log_probs = preds.log_softmax(dim=2).permute(1, 0, 2) # NTC-->TNC targets = [] target_lengths = [] for t in gt_text: targets += [self.char2id.get(i, len(self.chars) - 1) for i in t] target_lengths += [len(t)] targets = torch.tensor(targets).to(preds.device) target_lengths = torch.tensor(target_lengths).to(preds.device) input_lengths = torch.tensor([log_probs.shape[0]] * (log_probs.shape[1])).to(preds.device) loss = ctc_loss(log_probs, targets, input_lengths, target_lengths) loss = loss / input_lengths * weight return loss class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError class FrozenCLIPEmbedderT3(AbstractEncoder, ModelMixin, ConfigMixin): """Uses the CLIP transformer encoder for text (from Hugging Face)""" @register_to_config def __init__( self, device="cpu", max_length=77, freeze=True, use_fp16=False, variant: Optional[str] = None, ): super().__init__() self.tokenizer = CLIPTokenizer.from_pretrained("tolgacangoz/anytext", subfolder="tokenizer") self.transformer = CLIPTextModel.from_pretrained( "tolgacangoz/anytext", subfolder="text_encoder", torch_dtype=torch.float16 if use_fp16 else torch.float32, variant="fp16" if use_fp16 else None, ) if freeze: self.freeze() def embedding_forward( self, input_ids=None, position_ids=None, inputs_embeds=None, embedding_manager=None, ): seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) if embedding_manager is not None: inputs_embeds = embedding_manager(input_ids, inputs_embeds) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings self.transformer.text_model.embeddings.forward = embedding_forward.__get__( self.transformer.text_model.embeddings ) def encoder_forward( self, inputs_embeds, attention_mask=None, causal_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return hidden_states self.transformer.text_model.encoder.forward = encoder_forward.__get__(self.transformer.text_model.encoder) def text_encoder_forward( self, input_ids=None, attention_mask=None, position_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None, embedding_manager=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings( input_ids=input_ids, position_ids=position_ids, embedding_manager=embedding_manager ) # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) last_hidden_state = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = self.final_layer_norm(last_hidden_state) return last_hidden_state self.transformer.text_model.forward = text_encoder_forward.__get__(self.transformer.text_model) def transformer_forward( self, input_ids=None, attention_mask=None, position_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None, embedding_manager=None, ): return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, embedding_manager=embedding_manager, ) self.transformer.forward = transformer_forward.__get__(self.transformer) def freeze(self): self.transformer = self.transformer.eval() for param in self.parameters(): param.requires_grad = False def forward(self, text, **kwargs): batch_encoding = self.tokenizer( text, truncation=False, max_length=self.config.max_length, return_length=True, return_overflowing_tokens=False, padding="longest", return_tensors="pt", ) input_ids = batch_encoding["input_ids"] tokens_list = self.split_chunks(input_ids) z_list = [] for tokens in tokens_list: tokens = tokens.to(self.device) _z = self.transformer(input_ids=tokens, **kwargs) z_list += [_z] return torch.cat(z_list, dim=1) def encode(self, text, **kwargs): return self(text, **kwargs) def split_chunks(self, input_ids, chunk_size=75): tokens_list = [] bs, n = input_ids.shape id_start = input_ids[:, 0].unsqueeze(1) # dim --> [bs, 1] id_end = input_ids[:, -1].unsqueeze(1) if n == 2: # empty caption tokens_list.append(torch.cat((id_start,) + (id_end,) * (chunk_size + 1), dim=1)) trimmed_encoding = input_ids[:, 1:-1] num_full_groups = (n - 2) // chunk_size for i in range(num_full_groups): group = trimmed_encoding[:, i * chunk_size : (i + 1) * chunk_size] group_pad = torch.cat((id_start, group, id_end), dim=1) tokens_list.append(group_pad) remaining_columns = (n - 2) % chunk_size if remaining_columns > 0: remaining_group = trimmed_encoding[:, -remaining_columns:] padding_columns = chunk_size - remaining_group.shape[1] padding = id_end.expand(bs, padding_columns) remaining_group_pad = torch.cat((id_start, remaining_group, padding, id_end), dim=1) tokens_list.append(remaining_group_pad) return tokens_list class TextEmbeddingModule(ModelMixin, ConfigMixin): @register_to_config def __init__(self, font_path, use_fp16=False, device="cpu"): super().__init__() font = ImageFont.truetype(font_path, 60) self.frozen_CLIP_embedder_t3 = FrozenCLIPEmbedderT3(device=device, use_fp16=use_fp16) self.embedding_manager = EmbeddingManager(self.frozen_CLIP_embedder_t3, use_fp16=use_fp16) self.text_predictor = create_predictor(device=device, use_fp16=use_fp16).eval() args = { "rec_image_shape": "3, 48, 320", "rec_batch_num": 6, "rec_char_dict_path": hf_hub_download( repo_id="tolgacangoz/anytext", filename="text_embedding_module/OCR/ppocr_keys_v1.txt", cache_dir=HF_MODULES_CACHE, ), "use_fp16": use_fp16, } self.embedding_manager.recog = TextRecognizer(args, self.text_predictor) self.register_to_config(font=font) @torch.no_grad() def forward( self, prompt, texts, negative_prompt, num_images_per_prompt, mode, draw_pos, sort_priority="↕", max_chars=77, revise_pos=False, h=512, w=512, ): if prompt is None and texts is None: raise ValueError("Prompt or texts must be provided!") # preprocess pos_imgs(if numpy, make sure it's white pos in black bg) if draw_pos is None: pos_imgs = np.zeros((w, h, 1)) if isinstance(draw_pos, PIL.Image.Image): pos_imgs = np.array(draw_pos)[..., ::-1] pos_imgs = 255 - pos_imgs elif isinstance(draw_pos, str): draw_pos = cv2.imread(draw_pos)[..., ::-1] if draw_pos is None: raise ValueError(f"Can't read draw_pos image from {draw_pos}!") pos_imgs = 255 - draw_pos elif isinstance(draw_pos, torch.Tensor): pos_imgs = draw_pos.cpu().numpy() else: if not isinstance(draw_pos, np.ndarray): raise ValueError(f"Unknown format of draw_pos: {type(draw_pos)}") if mode == "edit": pos_imgs = cv2.resize(pos_imgs, (w, h)) pos_imgs = pos_imgs[..., 0:1] pos_imgs = cv2.convertScaleAbs(pos_imgs) _, pos_imgs = cv2.threshold(pos_imgs, 254, 255, cv2.THRESH_BINARY) # separate pos_imgs pos_imgs = self.separate_pos_imgs(pos_imgs, sort_priority) if len(pos_imgs) == 0: pos_imgs = [np.zeros((h, w, 1))] n_lines = len(texts) if len(pos_imgs) < n_lines: if n_lines == 1 and texts[0] == " ": pass # text-to-image without text else: raise ValueError( f"Found {len(pos_imgs)} positions that < needed {n_lines} from prompt, check and try again!" ) elif len(pos_imgs) > n_lines: str_warning = f"Warning: found {len(pos_imgs)} positions that > needed {n_lines} from prompt." logger.warning(str_warning) # get pre_pos, poly_list, hint that needed for anytext pre_pos = [] poly_list = [] for input_pos in pos_imgs: if input_pos.mean() != 0: input_pos = input_pos[..., np.newaxis] if len(input_pos.shape) == 2 else input_pos poly, pos_img = self.find_polygon(input_pos) pre_pos += [pos_img / 255.0] poly_list += [poly] else: pre_pos += [np.zeros((h, w, 1))] poly_list += [None] np_hint = np.sum(pre_pos, axis=0).clip(0, 1) # prepare info dict text_info = {} text_info["glyphs"] = [] text_info["gly_line"] = [] text_info["positions"] = [] text_info["n_lines"] = [len(texts)] * num_images_per_prompt for i in range(len(texts)): text = texts[i] if len(text) > max_chars: str_warning = f'"{text}" length > max_chars: {max_chars}, will be cut off...' logger.warning(str_warning) text = text[:max_chars] gly_scale = 2 if pre_pos[i].mean() != 0: gly_line = self.draw_glyph(self.config.font, text) glyphs = self.draw_glyph2( self.config.font, text, poly_list[i], scale=gly_scale, width=w, height=h, add_space=False ) if revise_pos: resize_gly = cv2.resize(glyphs, (pre_pos[i].shape[1], pre_pos[i].shape[0])) new_pos = cv2.morphologyEx( (resize_gly * 255).astype(np.uint8), cv2.MORPH_CLOSE, kernel=np.ones((resize_gly.shape[0] // 10, resize_gly.shape[1] // 10), dtype=np.uint8), iterations=1, ) new_pos = new_pos[..., np.newaxis] if len(new_pos.shape) == 2 else new_pos contours, _ = cv2.findContours(new_pos, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(contours) != 1: str_warning = f"Fail to revise position {i} to bounding rect, remain position unchanged..." logger.warning(str_warning) else: rect = cv2.minAreaRect(contours[0]) poly = np.int0(cv2.boxPoints(rect)) pre_pos[i] = cv2.drawContours(new_pos, [poly], -1, 255, -1) / 255.0 else: glyphs = np.zeros((h * gly_scale, w * gly_scale, 1)) gly_line = np.zeros((80, 512, 1)) pos = pre_pos[i] text_info["glyphs"] += [self.arr2tensor(glyphs, num_images_per_prompt)] text_info["gly_line"] += [self.arr2tensor(gly_line, num_images_per_prompt)] text_info["positions"] += [self.arr2tensor(pos, num_images_per_prompt)] self.embedding_manager.encode_text(text_info) prompt_embeds = self.frozen_CLIP_embedder_t3.encode([prompt], embedding_manager=self.embedding_manager) self.embedding_manager.encode_text(text_info) negative_prompt_embeds = self.frozen_CLIP_embedder_t3.encode( [negative_prompt or ""], embedding_manager=self.embedding_manager ) return prompt_embeds, negative_prompt_embeds, text_info, np_hint def arr2tensor(self, arr, bs): arr = np.transpose(arr, (2, 0, 1)) _arr = torch.from_numpy(arr.copy()).float().cpu() if self.config.use_fp16: _arr = _arr.half() _arr = torch.stack([_arr for _ in range(bs)], dim=0) return _arr def separate_pos_imgs(self, img, sort_priority, gap=102): num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img) components = [] for label in range(1, num_labels): component = np.zeros_like(img) component[labels == label] = 255 components.append((component, centroids[label])) if sort_priority == "↕": fir, sec = 1, 0 # top-down first elif sort_priority == "↔": fir, sec = 0, 1 # left-right first else: raise ValueError(f"Unknown sort_priority: {sort_priority}") components.sort(key=lambda c: (c[1][fir] // gap, c[1][sec] // gap)) sorted_components = [c[0] for c in components] return sorted_components def find_polygon(self, image, min_rect=False): contours, hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) max_contour = max(contours, key=cv2.contourArea) # get contour with max area if min_rect: # get minimum enclosing rectangle rect = cv2.minAreaRect(max_contour) poly = np.int0(cv2.boxPoints(rect)) else: # get approximate polygon epsilon = 0.01 * cv2.arcLength(max_contour, True) poly = cv2.approxPolyDP(max_contour, epsilon, True) n, _, xy = poly.shape poly = poly.reshape(n, xy) cv2.drawContours(image, [poly], -1, 255, -1) return poly, image def draw_glyph(self, font, text): g_size = 50 W, H = (512, 80) new_font = font.font_variant(size=g_size) img = Image.new(mode="1", size=(W, H), color=0) draw = ImageDraw.Draw(img) left, top, right, bottom = new_font.getbbox(text) text_width = max(right - left, 5) text_height = max(bottom - top, 5) ratio = min(W * 0.9 / text_width, H * 0.9 / text_height) new_font = font.font_variant(size=int(g_size * ratio)) left, top, right, bottom = new_font.getbbox(text) text_width = right - left text_height = bottom - top x = (img.width - text_width) // 2 y = (img.height - text_height) // 2 - top // 2 draw.text((x, y), text, font=new_font, fill="white") img = np.expand_dims(np.array(img), axis=2).astype(np.float64) return img def draw_glyph2(self, font, text, polygon, vertAng=10, scale=1, width=512, height=512, add_space=True): enlarge_polygon = polygon * scale rect = cv2.minAreaRect(enlarge_polygon) box = cv2.boxPoints(rect) box = np.int0(box) w, h = rect[1] angle = rect[2] if angle < -45: angle += 90 angle = -angle if w < h: angle += 90 vert = False if abs(angle) % 90 < vertAng or abs(90 - abs(angle) % 90) % 90 < vertAng: _w = max(box[:, 0]) - min(box[:, 0]) _h = max(box[:, 1]) - min(box[:, 1]) if _h >= _w: vert = True angle = 0 img = np.zeros((height * scale, width * scale, 3), np.uint8) img = Image.fromarray(img) # infer font size image4ratio = Image.new("RGB", img.size, "white") draw = ImageDraw.Draw(image4ratio) _, _, _tw, _th = draw.textbbox(xy=(0, 0), text=text, font=font) text_w = min(w, h) * (_tw / _th) if text_w <= max(w, h): # add space if len(text) > 1 and not vert and add_space: for i in range(1, 100): text_space = self.insert_spaces(text, i) _, _, _tw2, _th2 = draw.textbbox(xy=(0, 0), text=text_space, font=font) if min(w, h) * (_tw2 / _th2) > max(w, h): break text = self.insert_spaces(text, i - 1) font_size = min(w, h) * 0.80 else: shrink = 0.75 if vert else 0.85 font_size = min(w, h) / (text_w / max(w, h)) * shrink new_font = font.font_variant(size=int(font_size)) left, top, right, bottom = new_font.getbbox(text) text_width = right - left text_height = bottom - top layer = Image.new("RGBA", img.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(layer) if not vert: draw.text( (rect[0][0] - text_width // 2, rect[0][1] - text_height // 2 - top), text, font=new_font, fill=(255, 255, 255, 255), ) else: x_s = min(box[:, 0]) + _w // 2 - text_height // 2 y_s = min(box[:, 1]) for c in text: draw.text((x_s, y_s), c, font=new_font, fill=(255, 255, 255, 255)) _, _t, _, _b = new_font.getbbox(c) y_s += _b rotated_layer = layer.rotate(angle, expand=1, center=(rect[0][0], rect[0][1])) x_offset = int((img.width - rotated_layer.width) / 2) y_offset = int((img.height - rotated_layer.height) / 2) img.paste(rotated_layer, (x_offset, y_offset), rotated_layer) img = np.expand_dims(np.array(img.convert("1")), axis=2).astype(np.float64) return img def insert_spaces(self, string, nSpace): if nSpace == 0: return string new_string = "" for char in string: new_string += char + " " * nSpace return new_string[:-nSpace] # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class AuxiliaryLatentModule(ModelMixin, ConfigMixin): @register_to_config def __init__( self, vae, device="cpu", ): super().__init__() @torch.no_grad() def forward( self, text_info, mode, draw_pos, ori_image, num_images_per_prompt, np_hint, h=512, w=512, ): if mode == "generate": edit_image = np.ones((h, w, 3)) * 127.5 # empty mask image elif mode == "edit": if draw_pos is None or ori_image is None: raise ValueError("Reference image and position image are needed for text editing!") if isinstance(ori_image, str): ori_image = cv2.imread(ori_image)[..., ::-1] if ori_image is None: raise ValueError(f"Can't read ori_image image from {ori_image}!") elif isinstance(ori_image, torch.Tensor): ori_image = ori_image.cpu().numpy() elif isinstance(ori_image, PIL.Image.Image): ori_image = np.array(ori_image.convert("RGB")) else: if not isinstance(ori_image, np.ndarray): raise ValueError(f"Unknown format of ori_image: {type(ori_image)}") edit_image = ori_image.clip(1, 255) # for mask reason edit_image = self.check_channels(edit_image) edit_image = self.resize_image( edit_image, max_length=768 ) # make w h multiple of 64, resize if w or h > max_length # get masked_x masked_img = ((edit_image.astype(np.float32) / 127.5) - 1.0) * (1 - np_hint) masked_img = np.transpose(masked_img, (2, 0, 1)) device = next(self.config.vae.parameters()).device dtype = next(self.config.vae.parameters()).dtype masked_img = torch.from_numpy(masked_img.copy()).float().to(device) if dtype == torch.float16: masked_img = masked_img.half() masked_x = ( retrieve_latents(self.config.vae.encode(masked_img[None, ...])) * self.config.vae.config.scaling_factor ).detach() if dtype == torch.float16: masked_x = masked_x.half() text_info["masked_x"] = torch.cat([masked_x for _ in range(num_images_per_prompt)], dim=0) glyphs = torch.cat(text_info["glyphs"], dim=1).sum(dim=1, keepdim=True) positions = torch.cat(text_info["positions"], dim=1).sum(dim=1, keepdim=True) return glyphs, positions, text_info def check_channels(self, image): channels = image.shape[2] if len(image.shape) == 3 else 1 if channels == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) elif channels > 3: image = image[:, :, :3] return image def resize_image(self, img, max_length=768): height, width = img.shape[:2] max_dimension = max(height, width) if max_dimension > max_length: scale_factor = max_length / max_dimension new_width = int(round(width * scale_factor)) new_height = int(round(height * scale_factor)) new_size = (new_width, new_height) img = cv2.resize(img, new_size) height, width = img.shape[:2] img = cv2.resize(img, (width - (width % 64), height - (height % 64))) return img def insert_spaces(self, string, nSpace): if nSpace == 0: return string new_string = "" for char in string: new_string += char + " " * nSpace return new_string[:-nSpace] # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class AnyTextPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): Provides additional conditioning to the `unet` during the denoising process. If you set multiple ControlNets as a list, the outputs from each ControlNet are added together to create one combined additional conditioning. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, font_path: str = None, text_embedding_module: Optional[TextEmbeddingModule] = None, auxiliary_latent_module: Optional[AuxiliaryLatentModule] = None, trust_remote_code: bool = False, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): super().__init__() if font_path is None: raise ValueError("font_path is required!") text_embedding_module = TextEmbeddingModule(font_path=font_path, use_fp16=unet.dtype == torch.float16) auxiliary_latent_module = AuxiliaryLatentModule(vae=vae) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, text_embedding_module=text_embedding_module, auxiliary_latent_module=auxiliary_latent_module, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.register_to_config(requires_safety_checker=requires_safety_checker) def modify_prompt(self, prompt): prompt = prompt.replace("“", '"') prompt = prompt.replace("”", '"') p = '"(.*?)"' strs = re.findall(p, prompt) if len(strs) == 0: strs = [" "] else: for s in strs: prompt = prompt.replace(f'"{s}"', f" {PLACE_HOLDER} ", 1) if self.is_chinese(prompt): if self.trans_pipe is None: return None, None old_prompt = prompt prompt = self.trans_pipe(input=prompt + " .")["translation"][:-1] print(f"Translate: {old_prompt} --> {prompt}") return prompt, strs def is_chinese(self, text): text = checker._clean_text(text) for char in text: cp = ord(char) if checker._is_chinese_char(cp): return True return False # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for i, single_image_embeds in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, # image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): print(controlnet_conditioning_scale) raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError( "A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. " "The conditioning scale must be fixed across the batch." ) elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, mode: Optional[str] = "generate", draw_pos: Optional[Union[str, torch.Tensor]] = None, ori_image: Optional[Union[str, torch.Tensor]] = None, timesteps: List[int] = None, sigmas: List[float] = None, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. When `prompt` is a list, and if a list of images is passed for a single ControlNet, each will be paired with each prompt in the `prompt` list. This also applies to multiple ControlNets, where a list of image lists can be passed to batch for each prompt and each ControlNet. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, # image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions prompt, texts = self.modify_prompt(prompt) # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) draw_pos = draw_pos.to(device=device) if isinstance(draw_pos, torch.Tensor) else draw_pos prompt_embeds, negative_prompt_embeds, text_info, np_hint = self.text_embedding_module( prompt, texts, negative_prompt, num_images_per_prompt, mode, draw_pos, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 3.5 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 4. Prepare image if isinstance(controlnet, ControlNetModel): guided_hint = self.auxiliary_latent_module( text_info=text_info, mode=mode, draw_pos=draw_pos, ori_image=ori_image, num_images_per_prompt=num_images_per_prompt, np_hint=np_hint, ) height, width = 512, 512 else: assert False # 5. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) self._num_timesteps = len(timesteps) # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Add image embeds for IP-Adapter added_cond_kwargs = ( {"image_embeds": image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None ) # 7.2 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Relevant thread: # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input.to(self.controlnet.dtype), t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=guided_hint, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def to(self, *args, **kwargs): super().to(*args, **kwargs) self.text_embedding_module.to(*args, **kwargs) self.auxiliary_latent_module.to(*args, **kwargs) return self
diffusers/examples/research_projects/anytext/anytext.py/0
{ "file_path": "diffusers/examples/research_projects/anytext/anytext.py", "repo_id": "diffusers", "token_count": 50998 }
130
# Consistency Training `train_cm_ct_unconditional.py` trains a consistency model (CM) from scratch following the consistency training (CT) algorithm introduced in [Consistency Models](https://huggingface.co/papers/2303.01469) and refined in [Improved Techniques for Training Consistency Models](https://huggingface.co/papers/2310.14189). Both unconditional and class-conditional training are supported. A usage example is as follows: ```bash accelerate launch examples/research_projects/consistency_training/train_cm_ct_unconditional.py \ --dataset_name="cifar10" \ --dataset_image_column_name="img" \ --output_dir="/path/to/output/dir" \ --mixed_precision=fp16 \ --resolution=32 \ --max_train_steps=1000 --max_train_samples=10000 \ --dataloader_num_workers=8 \ --noise_precond_type="cm" --input_precond_type="cm" \ --train_batch_size=4 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --use_8bit_adam \ --use_ema \ --validation_steps=100 --eval_batch_size=4 \ --checkpointing_steps=100 --checkpoints_total_limit=10 \ --class_conditional --num_classes=10 \ ```
diffusers/examples/research_projects/consistency_training/README.md/0
{ "file_path": "diffusers/examples/research_projects/consistency_training/README.md", "repo_id": "diffusers", "token_count": 415 }
131
## LoRA fine-tuning Flux.1 Dev with quantization > [!NOTE] > This example is educational in nature and fixes some arguments to keep things simple. It should act as a reference to build things further. This example shows how to fine-tune [Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) with LoRA and quantization. We show this by using the [`Norod78/Yarn-art-style`](https://huggingface.co/datasets/Norod78/Yarn-art-style) dataset. Steps below summarize the workflow: * We precompute the text embeddings in `compute_embeddings.py` and serialize them into a parquet file. * Even though optional, we load the T5-xxl in NF4 to further reduce the memory foot-print. * `train_dreambooth_lora_flux_miniature.py` takes care of training: * Since we already precomputed the text embeddings, we don't load the text encoders. * We load the VAE and use it to precompute the image latents and we then delete it. * Load the Flux transformer, quantize it with the [NF4 datatype](https://huggingface.co/papers/2305.14314) through `bitsandbytes`, prepare it for 4bit training. * Add LoRA adapter layers to it and then ensure they are kept in FP32 precision. * Train! To run training in a memory-optimized manner, we additionally use: * 8Bit Adam * Gradient checkpointing We have tested the scripts on a 24GB 4090. It works on a free-tier Colab Notebook, too, but it's extremely slow. ## Training Ensure you have installed the required libraries: ```bash pip install -U transformers accelerate bitsandbytes peft datasets pip install git+https://github.com/huggingface/diffusers -U ``` Now, compute the text embeddings: ```bash python compute_embeddings.py ``` It should create a file named `embeddings.parquet`. We're then ready to launch training. First, authenticate so that you can access the Flux.1 Dev model: ```bash hf auth login ``` Then launch: ```bash accelerate launch --config_file=accelerate.yaml \ train_dreambooth_lora_flux_miniature.py \ --pretrained_model_name_or_path="black-forest-labs/FLUX.1-dev" \ --data_df_path="embeddings.parquet" \ --output_dir="yarn_art_lora_flux_nf4" \ --mixed_precision="fp16" \ --use_8bit_adam \ --weighting_scheme="none" \ --resolution=1024 \ --train_batch_size=1 \ --repeats=1 \ --learning_rate=1e-4 \ --guidance_scale=1 \ --report_to="wandb" \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --cache_latents \ --rank=4 \ --max_train_steps=700 \ --seed="0" ``` We can directly pass a quantized checkpoint path, too: ```diff + --quantized_model_path="hf-internal-testing/flux.1-dev-nf4-pkg" ``` Depending on the machine, training time will vary but for our case, it was 1.5 hours. It maybe possible to speed this up by using `torch.bfloat16`. We support training with the DeepSpeed Zero2 optimizer, too. To use it, first install DeepSpeed: ```bash pip install -Uq deepspeed ``` And then launch: ```bash accelerate launch --config_file=ds2.yaml \ train_dreambooth_lora_flux_miniature.py \ --pretrained_model_name_or_path="black-forest-labs/FLUX.1-dev" \ --data_df_path="embeddings.parquet" \ --output_dir="yarn_art_lora_flux_nf4" \ --mixed_precision="no" \ --use_8bit_adam \ --weighting_scheme="none" \ --resolution=1024 \ --train_batch_size=1 \ --repeats=1 \ --learning_rate=1e-4 \ --guidance_scale=1 \ --report_to="wandb" \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --cache_latents \ --rank=4 \ --max_train_steps=700 \ --seed="0" ``` ## Inference When loading the LoRA params (that were obtained on a quantized base model) and merging them into the base model, it is recommended to first dequantize the base model, merge the LoRA params into it, and then quantize the model again. This is because merging into 4bit quantized models can lead to some rounding errors. Below, we provide an end-to-end example: 1. First, load the original model and merge the LoRA params into it: ```py from diffusers import FluxPipeline import torch ckpt_id = "black-forest-labs/FLUX.1-dev" pipeline = FluxPipeline.from_pretrained( ckpt_id, text_encoder=None, text_encoder_2=None, torch_dtype=torch.float16 ) pipeline.load_lora_weights("yarn_art_lora_flux_nf4", weight_name="pytorch_lora_weights.safetensors") pipeline.fuse_lora() pipeline.unload_lora_weights() pipeline.transformer.save_pretrained("fused_transformer") ``` 2. Quantize the model and run inference ```py from diffusers import AutoPipelineForText2Image, FluxTransformer2DModel, BitsAndBytesConfig import torch ckpt_id = "black-forest-labs/FLUX.1-dev" bnb_4bit_compute_dtype = torch.float16 nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=bnb_4bit_compute_dtype, ) transformer = FluxTransformer2DModel.from_pretrained( "fused_transformer", quantization_config=nf4_config, torch_dtype=bnb_4bit_compute_dtype, ) pipeline = AutoPipelineForText2Image.from_pretrained( ckpt_id, transformer=transformer, torch_dtype=bnb_4bit_compute_dtype ) pipeline.enable_model_cpu_offload() image = pipeline( "a puppy in a pond, yarn art style", num_inference_steps=28, guidance_scale=3.5, height=768 ).images[0] image.save("yarn_merged.png") ``` | Dequantize, merge, quantize | Merging directly into quantized model | |-------|-------| | ![Image A](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/quantized_flux_training/merged.png) | ![Image B](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/quantized_flux_training/unmerged.png) | As we can notice the first column result follows the style more closely.
diffusers/examples/research_projects/flux_lora_quantization/README.md/0
{ "file_path": "diffusers/examples/research_projects/flux_lora_quantization/README.md", "repo_id": "diffusers", "token_count": 2092 }
132
## Diffusers examples with Intel optimizations **This research project is not actively maintained by the diffusers team. For any questions or comments, please make sure to tag @hshen14 .** This aims to provide diffusers examples with Intel optimizations such as Bfloat16 for training/fine-tuning acceleration and 8-bit integer (INT8) for inference acceleration on Intel platforms. ## Accelerating the fine-tuning for textual inversion We accelerate the fine-tuning for textual inversion with Intel Extension for PyTorch. The [examples](textual_inversion) enable both single node and multi-node distributed training with Bfloat16 support on Intel Xeon Scalable Processor. ## Accelerating the inference for Stable Diffusion using Bfloat16 We start the inference acceleration with Bfloat16 using Intel Extension for PyTorch. The [script](inference_bf16.py) is generally designed to support standard Stable Diffusion models with Bfloat16 support. ```bash pip install diffusers transformers accelerate scipy safetensors export KMP_BLOCKTIME=1 export KMP_SETTINGS=1 export KMP_AFFINITY=granularity=fine,compact,1,0 # Intel OpenMP export OMP_NUM_THREADS=< Cores to use > export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libiomp5.so # Jemalloc is a recommended malloc implementation that emphasizes fragmentation avoidance and scalable concurrency support. export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libjemalloc.so export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:9000000000" # Launch with default DDIM numactl --membind <node N> -C <cpu list> python python inference_bf16.py # Launch with DPMSolverMultistepScheduler numactl --membind <node N> -C <cpu list> python python inference_bf16.py --dpm ``` ## Accelerating the inference for Stable Diffusion using INT8 Coming soon ...
diffusers/examples/research_projects/intel_opts/README.md/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/README.md", "repo_id": "diffusers", "token_count": 524 }
133
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 Sana-Sprint team. All rights reserved. # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import io import logging import math import os import shutil from pathlib import Path from typing import Callable, Optional import accelerate import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedDataParallelKwargs, DistributedType, ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from safetensors.torch import load_file from torch.nn.utils.spectral_norm import SpectralNorm from torch.utils.data import DataLoader, Dataset from tqdm.auto import tqdm from transformers import AutoTokenizer, Gemma2Model import diffusers from diffusers import ( AutoencoderDC, SanaPipeline, SanaSprintPipeline, SanaTransformer2DModel, ) from diffusers.models.attention_processor import Attention from diffusers.optimization import get_scheduler from diffusers.training_utils import ( free_memory, ) from diffusers.utils import ( check_min_version, is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_torch_npu_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.33.0.dev0") logger = get_logger(__name__) if is_torch_npu_available(): torch.npu.config.allow_internal_format = False COMPLEX_HUMAN_INSTRUCTION = [ "Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", "- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.", "- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.", "Here are examples of how to transform or refine prompts:", "- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.", "- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.", "Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:", "User Prompt: ", ] class SanaVanillaAttnProcessor: r""" Processor for implementing scaled dot-product attention to support JVP calculation during training. """ def __init__(self): pass @staticmethod def scaled_dot_product_attention( query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None ) -> torch.Tensor: B, H, L, S = *query.size()[:-1], key.size(-2) scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale attn_bias = torch.zeros(B, H, L, S, dtype=query.dtype, device=query.device) if attn_mask is not None: if attn_mask.dtype == torch.bool: attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf")) else: attn_bias += attn_mask attn_weight = query @ key.transpose(-2, -1) * scale_factor attn_weight += attn_bias attn_weight = torch.softmax(attn_weight, dim=-1) attn_weight = torch.dropout(attn_weight, dropout_p, train=True) return attn_weight @ value def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) hidden_states = self.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class Text2ImageDataset(Dataset): """ A PyTorch Dataset class for loading text-image pairs from a HuggingFace dataset. This dataset is designed for text-to-image generation tasks. Args: hf_dataset (datasets.Dataset): A HuggingFace dataset containing 'image' (bytes) and 'llava' (text) fields. Note that 'llava' is the field name for text descriptions in this specific dataset - you may need to adjust this key if using a different HuggingFace dataset with a different text field name. resolution (int, optional): Target resolution for image resizing. Defaults to 1024. Returns: dict: A dictionary containing: - 'text': The text description (str) - 'image': The processed image tensor (torch.Tensor) of shape [3, resolution, resolution] """ def __init__(self, hf_dataset, resolution=1024): self.dataset = hf_dataset self.transform = T.Compose( [ T.Lambda(lambda img: img.convert("RGB")), T.Resize(resolution), # Image.BICUBIC T.CenterCrop(resolution), T.ToTensor(), T.Normalize([0.5], [0.5]), ] ) def __len__(self): return len(self.dataset) def __getitem__(self, idx): item = self.dataset[idx] text = item["llava"] image_bytes = item["image"] # Convert bytes to PIL Image image = Image.open(io.BytesIO(image_bytes)) image_tensor = self.transform(image) return {"text": text, "image": image_tensor} def save_model_card( repo_id: str, images=None, base_model: str = None, validation_prompt=None, repo_folder=None, ): widget_dict = [] if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) widget_dict.append( {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}} ) model_description = f""" # Sana Sprint - {repo_id} <Gallery /> ## Model description These are {repo_id} Sana Sprint weights for {base_model}. The weights were trained using [Sana-Sprint](https://nvlabs.github.io/Sana/Sprint/). ## License TODO """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="other", base_model=base_model, model_description=model_description, widget=widget_dict, ) tags = [ "text-to-image", "diffusers-training", "diffusers", "sana-sprint", "sana-sprint-diffusers", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation( pipeline, args, accelerator, pipeline_args, epoch, is_final_validation=False, ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) if args.enable_vae_tiling: pipeline.vae.enable_tiling(tile_sample_min_height=1024, tile_sample_stride_width=1024) pipeline.text_encoder = pipeline.text_encoder.to(torch.bfloat16) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)] for tracker in accelerator.trackers: phase_name = "test" if is_final_validation else "validation" if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { phase_name: [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() return images def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image. By " "default, the standard Image Dataset maps out 'file_name' " "to 'image'.", ) parser.add_argument( "--caption_column", type=str, default=None, help="The column of the dataset containing the instance prompt for each image", ) parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") parser.add_argument( "--max_sequence_length", type=int, default=300, help="Maximum sequence length to use with with the Gemma model", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=50, help=( "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="sana-dreambooth-lora", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") # ----Image Processing---- parser.add_argument("--file_path", nargs="+", required=True, help="List of parquet files (space-separated)") parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--use_fix_crop_and_size", action="store_true", help="Whether or not to use the fixed crop and size for the teacher model.", default=False, ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--logit_mean", type=float, default=0.2, help="mean to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--logit_std", type=float, default=1.6, help="std to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--logit_mean_discriminator", type=float, default=-0.6, help="Logit mean for discriminator timestep sampling" ) parser.add_argument( "--logit_std_discriminator", type=float, default=1.0, help="Logit std for discriminator timestep sampling" ) parser.add_argument("--ladd_multi_scale", action="store_true", help="Whether to use multi-scale discriminator") parser.add_argument( "--head_block_ids", type=int, nargs="+", default=[2, 8, 14, 19], help="Specify which transformer blocks to use for discriminator heads", ) parser.add_argument("--adv_lambda", type=float, default=0.5, help="Weighting coefficient for adversarial loss") parser.add_argument("--scm_lambda", type=float, default=1.0, help="Weighting coefficient for SCM loss") parser.add_argument("--gradient_clip", type=float, default=0.1, help="Threshold for gradient clipping") parser.add_argument( "--sigma_data", type=float, default=0.5, help="Standard deviation of data distribution is supposed to be 0.5" ) parser.add_argument( "--tangent_warmup_steps", type=int, default=4000, help="Number of warmup steps for tangent vectors" ) parser.add_argument( "--guidance_embeds_scale", type=float, default=0.1, help="Scaling factor for guidance embeddings" ) parser.add_argument( "--scm_cfg_scale", type=float, nargs="+", default=[4, 4.5, 5], help="Range for classifier-free guidance scale" ) parser.add_argument( "--train_largest_timestep", action="store_true", help="Whether to enable special training for large timesteps" ) parser.add_argument("--largest_timestep", type=float, default=1.57080, help="Maximum timestep value") parser.add_argument( "--largest_timestep_prob", type=float, default=0.5, help="Sampling probability for large timesteps" ) parser.add_argument( "--misaligned_pairs_D", action="store_true", help="Add misaligned sample pairs for discriminator" ) parser.add_argument( "--optimizer", type=str, default="AdamW", help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--prodigy_beta3", type=float, default=None, help="coefficients for computing the Prodigy stepsize using running averages. If set to None, " "uses the value of square root of beta2. Ignored if optimizer is adamW", ) parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay") parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params") parser.add_argument( "--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer and Prodigy optimizers.", ) parser.add_argument( "--prodigy_use_bias_correction", type=bool, default=True, help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW", ) parser.add_argument( "--prodigy_safeguard_warmup", type=bool, default=True, help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. " "Ignored if optimizer is adamW", ) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--cache_latents", action="store_true", default=False, help="Cache the VAE latents", ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--upcast_before_saving", action="store_true", default=False, help=( "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " "Defaults to precision dtype used for training to save memory" ), ) parser.add_argument( "--offload", action="store_true", help="Whether to offload the VAE and the text encoder to CPU when they are not used.", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--enable_vae_tiling", action="store_true", help="Enabla vae tiling in log validation") parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank return args class ResidualBlock(nn.Module): def __init__(self, fn: Callable): super().__init__() self.fn = fn def forward(self, x: torch.Tensor) -> torch.Tensor: return (self.fn(x) + x) / np.sqrt(2) class SpectralConv1d(nn.Conv1d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) SpectralNorm.apply(self, name="weight", n_power_iterations=1, dim=0, eps=1e-12) class BatchNormLocal(nn.Module): def __init__(self, num_features: int, affine: bool = True, virtual_bs: int = 8, eps: float = 1e-5): super().__init__() self.virtual_bs = virtual_bs self.eps = eps self.affine = affine if self.affine: self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) def forward(self, x: torch.Tensor) -> torch.Tensor: shape = x.size() # Reshape batch into groups. G = np.ceil(x.size(0) / self.virtual_bs).astype(int) x = x.view(G, -1, x.size(-2), x.size(-1)) # Calculate stats. mean = x.mean([1, 3], keepdim=True) var = x.var([1, 3], keepdim=True, unbiased=False) x = (x - mean) / (torch.sqrt(var + self.eps)) if self.affine: x = x * self.weight[None, :, None] + self.bias[None, :, None] return x.view(shape) def make_block(channels: int, kernel_size: int) -> nn.Module: return nn.Sequential( SpectralConv1d( channels, channels, kernel_size=kernel_size, padding=kernel_size // 2, padding_mode="circular", ), BatchNormLocal(channels), nn.LeakyReLU(0.2, True), ) # Adapted from https://github.com/autonomousvision/stylegan-t/blob/main/networks/discriminator.py class DiscHead(nn.Module): def __init__(self, channels: int, c_dim: int, cmap_dim: int = 64): super().__init__() self.channels = channels self.c_dim = c_dim self.cmap_dim = cmap_dim self.main = nn.Sequential( make_block(channels, kernel_size=1), ResidualBlock(make_block(channels, kernel_size=9)) ) if self.c_dim > 0: self.cmapper = nn.Linear(self.c_dim, cmap_dim) self.cls = SpectralConv1d(channels, cmap_dim, kernel_size=1, padding=0) else: self.cls = SpectralConv1d(channels, 1, kernel_size=1, padding=0) def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor: h = self.main(x) out = self.cls(h) if self.c_dim > 0: cmap = self.cmapper(c).unsqueeze(-1) out = (out * cmap).sum(1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) return out class SanaMSCMDiscriminator(nn.Module): def __init__(self, pretrained_model, is_multiscale=False, head_block_ids=None): super().__init__() self.transformer = pretrained_model self.transformer.requires_grad_(False) if head_block_ids is None or len(head_block_ids) == 0: self.block_hooks = {2, 8, 14, 20, 27} if is_multiscale else {self.transformer.depth - 1} else: self.block_hooks = head_block_ids heads = [] for i in range(len(self.block_hooks)): heads.append(DiscHead(self.transformer.hidden_size, 0, 0)) self.heads = nn.ModuleList(heads) def get_head_inputs(self): return self.head_inputs def forward(self, hidden_states, timestep, encoder_hidden_states=None, **kwargs): feat_list = [] self.head_inputs = [] def get_features(module, input, output): feat_list.append(output) return output hooks = [] for i, block in enumerate(self.transformer.transformer_blocks): if i in self.block_hooks: hooks.append(block.register_forward_hook(get_features)) self.transformer( hidden_states=hidden_states, timestep=timestep, encoder_hidden_states=encoder_hidden_states, return_logvar=False, **kwargs, ) for hook in hooks: hook.remove() res_list = [] for feat, head in zip(feat_list, self.heads): B, N, C = feat.shape feat = feat.transpose(1, 2) # [B, C, N] self.head_inputs.append(feat) res_list.append(head(feat, None).reshape(feat.shape[0], -1)) concat_res = torch.cat(res_list, dim=1) return concat_res @property def model(self): return self.transformer def save_pretrained(self, path): torch.save(self.state_dict(), path) class DiscHeadModel: def __init__(self, disc): self.disc = disc def state_dict(self): return {name: param for name, param in self.disc.state_dict().items() if not name.startswith("transformer.")} def __getattr__(self, name): return getattr(self.disc, name) class SanaTrigFlow(SanaTransformer2DModel): def __init__(self, original_model, guidance=False): self.__dict__ = original_model.__dict__ self.hidden_size = self.config.num_attention_heads * self.config.attention_head_dim self.guidance = guidance if self.guidance: hidden_size = self.config.num_attention_heads * self.config.attention_head_dim self.logvar_linear = torch.nn.Linear(hidden_size, 1) torch.nn.init.xavier_uniform_(self.logvar_linear.weight) torch.nn.init.constant_(self.logvar_linear.bias, 0) def forward( self, hidden_states, encoder_hidden_states, timestep, guidance=None, jvp=False, return_logvar=False, **kwargs ): batch_size = hidden_states.shape[0] latents = hidden_states prompt_embeds = encoder_hidden_states t = timestep # TrigFlow --> Flow Transformation timestep = t.expand(latents.shape[0]).to(prompt_embeds.dtype) latents_model_input = latents flow_timestep = torch.sin(timestep) / (torch.cos(timestep) + torch.sin(timestep)) flow_timestep_expanded = flow_timestep.view(-1, 1, 1, 1) latent_model_input = latents_model_input * torch.sqrt( flow_timestep_expanded**2 + (1 - flow_timestep_expanded) ** 2 ) latent_model_input = latent_model_input.to(prompt_embeds.dtype) # forward in original flow if jvp and self.gradient_checkpointing: self.gradient_checkpointing = False model_out = super().forward( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=flow_timestep, guidance=guidance, **kwargs, )[0] self.gradient_checkpointing = True else: model_out = super().forward( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=flow_timestep, guidance=guidance, **kwargs, )[0] # Flow --> TrigFlow Transformation trigflow_model_out = ( (1 - 2 * flow_timestep_expanded) * latent_model_input + (1 - 2 * flow_timestep_expanded + 2 * flow_timestep_expanded**2) * model_out ) / torch.sqrt(flow_timestep_expanded**2 + (1 - flow_timestep_expanded) ** 2) if self.guidance and guidance is not None: timestep, embedded_timestep = self.time_embed( timestep, guidance=guidance, hidden_dtype=hidden_states.dtype ) else: timestep, embedded_timestep = self.time_embed( timestep, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) if return_logvar: logvar = self.logvar_linear(embedded_timestep) return trigflow_model_out, logvar return (trigflow_model_out,) def compute_density_for_timestep_sampling_scm(batch_size: int, logit_mean: float = None, logit_std: float = None): """Compute the density for sampling the timesteps when doing Sana-Sprint training.""" sigma = torch.randn(batch_size, device="cpu") sigma = (sigma * logit_std + logit_mean).exp() u = torch.atan(sigma / 0.5) # TODO: 0.5 should be a hyper-parameter return u def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) if torch.backends.mps.is_available() and args.mixed_precision == "bf16": # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, ).repo_id # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, ) # Load scheduler and models text_encoder = Gemma2Model.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderDC.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant, ) ori_transformer = SanaTransformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant, guidance_embeds=True, ) ori_transformer.set_attn_processor(SanaVanillaAttnProcessor()) ori_transformer_no_guide = SanaTransformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant, guidance_embeds=False, ) original_state_dict = load_file( f"{args.pretrained_model_name_or_path}/transformer/diffusion_pytorch_model.safetensors" ) param_mapping = { "time_embed.emb.timestep_embedder.linear_1.weight": "time_embed.timestep_embedder.linear_1.weight", "time_embed.emb.timestep_embedder.linear_1.bias": "time_embed.timestep_embedder.linear_1.bias", "time_embed.emb.timestep_embedder.linear_2.weight": "time_embed.timestep_embedder.linear_2.weight", "time_embed.emb.timestep_embedder.linear_2.bias": "time_embed.timestep_embedder.linear_2.bias", } for src_key, dst_key in param_mapping.items(): if src_key in original_state_dict: ori_transformer.load_state_dict({dst_key: original_state_dict[src_key]}, strict=False, assign=True) guidance_embedder_module = ori_transformer.time_embed.guidance_embedder zero_state_dict = {} target_device = accelerator.device param_w1 = guidance_embedder_module.linear_1.weight zero_state_dict["linear_1.weight"] = torch.zeros(param_w1.shape, device=target_device) param_b1 = guidance_embedder_module.linear_1.bias zero_state_dict["linear_1.bias"] = torch.zeros(param_b1.shape, device=target_device) param_w2 = guidance_embedder_module.linear_2.weight zero_state_dict["linear_2.weight"] = torch.zeros(param_w2.shape, device=target_device) param_b2 = guidance_embedder_module.linear_2.bias zero_state_dict["linear_2.bias"] = torch.zeros(param_b2.shape, device=target_device) guidance_embedder_module.load_state_dict(zero_state_dict, strict=False, assign=True) transformer = SanaTrigFlow(ori_transformer, guidance=True).train() pretrained_model = SanaTrigFlow(ori_transformer_no_guide, guidance=False).eval() disc = SanaMSCMDiscriminator( pretrained_model, is_multiscale=args.ladd_multi_scale, head_block_ids=args.head_block_ids, ).train() transformer.requires_grad_(True) pretrained_model.requires_grad_(False) disc.model.requires_grad_(False) disc.heads.requires_grad_(True) vae.requires_grad_(False) text_encoder.requires_grad_(False) # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) # VAE should always be kept in fp32 for SANA (?) vae.to(accelerator.device, dtype=torch.float32) transformer.to(accelerator.device, dtype=weight_dtype) pretrained_model.to(accelerator.device, dtype=weight_dtype) disc.to(accelerator.device, dtype=weight_dtype) # because Gemma2 is particularly suited for bfloat16. text_encoder.to(dtype=torch.bfloat16) if args.enable_npu_flash_attention: if is_torch_npu_available(): logger.info("npu flash attention enabled.") for block in transformer.transformer_blocks: block.attn2.set_use_npu_flash_attention(True) for block in pretrained_model.transformer_blocks: block.attn2.set_use_npu_flash_attention(True) else: raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") # Initialize a text encoding pipeline and keep it to CPU for now. text_encoding_pipeline = SanaPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=None, transformer=None, text_encoder=text_encoder, tokenizer=tokenizer, torch_dtype=torch.bfloat16, ) text_encoding_pipeline = text_encoding_pipeline.to(accelerator.device) if args.gradient_checkpointing: transformer.enable_gradient_checkpointing() def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model if version.parse(accelerate.__version__) >= version.parse("0.16.0"): def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: for model in models: unwrapped_model = unwrap_model(model) # Handle transformer model if isinstance(unwrapped_model, type(unwrap_model(transformer))): model = unwrapped_model model.save_pretrained(os.path.join(output_dir, "transformer")) # Handle discriminator model (only save heads) elif isinstance(unwrapped_model, type(unwrap_model(disc))): # Save only the heads torch.save(unwrapped_model.heads.state_dict(), os.path.join(output_dir, "disc_heads.pt")) else: raise ValueError(f"unexpected save model: {unwrapped_model.__class__}") # make sure to pop weight so that corresponding model is not saved again if weights: weights.pop() def load_model_hook(models, input_dir): transformer_ = None disc_ = None if not accelerator.distributed_type == DistributedType.DEEPSPEED: while len(models) > 0: model = models.pop() unwrapped_model = unwrap_model(model) if isinstance(unwrapped_model, type(unwrap_model(transformer))): transformer_ = model # noqa: F841 elif isinstance(unwrapped_model, type(unwrap_model(disc))): # Load only the heads heads_state_dict = torch.load(os.path.join(input_dir, "disc_heads.pt")) unwrapped_model.heads.load_state_dict(heads_state_dict) disc_ = model # noqa: F841 else: raise ValueError(f"unexpected save model: {unwrapped_model.__class__}") else: # DeepSpeed case transformer_ = SanaTransformer2DModel.from_pretrained(input_dir, subfolder="transformer") # noqa: F841 disc_heads_state_dict = torch.load(os.path.join(input_dir, "disc_heads.pt")) # noqa: F841 # You'll need to handle how to load the heads in DeepSpeed case accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32 and torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimization parameters optimizer_G = optimizer_class( transformer.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) optimizer_D = optimizer_class( disc.heads.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) hf_dataset = load_dataset( args.dataset_name, data_files=args.file_path, split="train", ) train_dataset = Text2ImageDataset( hf_dataset=hf_dataset, resolution=args.resolution, ) train_dataloader = DataLoader( train_dataset, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, pin_memory=True, persistent_workers=True, shuffle=True, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer_G, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. transformer, pretrained_model, disc, optimizer_G, optimizer_D, train_dataloader, lr_scheduler = ( accelerator.prepare( transformer, pretrained_model, disc, optimizer_G, optimizer_D, train_dataloader, lr_scheduler ) ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_name = "sana-sprint" config = { k: str(v) if not isinstance(v, (int, float, str, bool, torch.Tensor)) else v for k, v in vars(args).items() } accelerator.init_trackers(tracker_name, config=config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) phase = "G" vae_config_scaling_factor = vae.config.scaling_factor sigma_data = args.sigma_data negative_prompt = [""] * args.train_batch_size negative_prompt_embeds, negative_prompt_attention_mask, _, _ = text_encoding_pipeline.encode_prompt( prompt=negative_prompt, complex_human_instruction=False, do_classifier_free_guidance=False, ) for epoch in range(first_epoch, args.num_train_epochs): transformer.train() disc.train() for step, batch in enumerate(train_dataloader): # text encoding prompts = batch["text"] with torch.no_grad(): prompt_embeds, prompt_attention_mask, _, _ = text_encoding_pipeline.encode_prompt( prompts, complex_human_instruction=COMPLEX_HUMAN_INSTRUCTION, do_classifier_free_guidance=False ) # Convert images to latent space vae = vae.to(accelerator.device) pixel_values = batch["image"].to(dtype=vae.dtype) model_input = vae.encode(pixel_values).latent model_input = model_input * vae_config_scaling_factor * sigma_data model_input = model_input.to(dtype=weight_dtype) # Sample noise that we'll add to the latents noise = torch.randn_like(model_input) * sigma_data bsz = model_input.shape[0] # Sample a random timestep for each image # for weighting schemes where we sample timesteps non-uniformly u = compute_density_for_timestep_sampling_scm( batch_size=bsz, logit_mean=args.logit_mean, logit_std=args.logit_std, ).to(accelerator.device) # Add noise according to TrigFlow. # zt = cos(t) * x + sin(t) * noise t = u.view(-1, 1, 1, 1) noisy_model_input = torch.cos(t) * model_input + torch.sin(t) * noise scm_cfg_scale = torch.tensor( np.random.choice(args.scm_cfg_scale, size=bsz, replace=True), device=accelerator.device, ) def model_wrapper(scaled_x_t, t): pred, logvar = accelerator.unwrap_model(transformer)( hidden_states=scaled_x_t, timestep=t.flatten(), encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, guidance=(scm_cfg_scale.flatten() * args.guidance_embeds_scale), jvp=True, return_logvar=True, ) return pred, logvar if phase == "G": transformer.train() disc.eval() models_to_accumulate = [transformer] with accelerator.accumulate(models_to_accumulate): with torch.no_grad(): cfg_x_t = torch.cat([noisy_model_input, noisy_model_input], dim=0) cfg_t = torch.cat([t, t], dim=0) cfg_y = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) cfg_y_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) cfg_pretrain_pred = pretrained_model( hidden_states=(cfg_x_t / sigma_data), timestep=cfg_t.flatten(), encoder_hidden_states=cfg_y, encoder_attention_mask=cfg_y_mask, )[0] cfg_dxt_dt = sigma_data * cfg_pretrain_pred dxt_dt_uncond, dxt_dt = cfg_dxt_dt.chunk(2) scm_cfg_scale = scm_cfg_scale.view(-1, 1, 1, 1) dxt_dt = dxt_dt_uncond + scm_cfg_scale * (dxt_dt - dxt_dt_uncond) v_x = torch.cos(t) * torch.sin(t) * dxt_dt / sigma_data v_t = torch.cos(t) * torch.sin(t) # Adapt from https://github.com/xandergos/sCM-mnist/blob/master/train_consistency.py with torch.no_grad(): F_theta, F_theta_grad, logvar = torch.func.jvp( model_wrapper, (noisy_model_input / sigma_data, t), (v_x, v_t), has_aux=True ) F_theta, logvar = transformer( hidden_states=(noisy_model_input / sigma_data), timestep=t.flatten(), encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, guidance=(scm_cfg_scale.flatten() * args.guidance_embeds_scale), return_logvar=True, ) logvar = logvar.view(-1, 1, 1, 1) F_theta_grad = F_theta_grad.detach() F_theta_minus = F_theta.detach() # Warmup steps r = min(1, global_step / args.tangent_warmup_steps) # Calculate gradient g using JVP rearrangement g = -torch.cos(t) * torch.cos(t) * (sigma_data * F_theta_minus - dxt_dt) second_term = -r * (torch.cos(t) * torch.sin(t) * noisy_model_input + sigma_data * F_theta_grad) g = g + second_term # Tangent normalization g_norm = torch.linalg.vector_norm(g, dim=(1, 2, 3), keepdim=True) g = g / (g_norm + 0.1) # 0.1 is the constant c, can be modified but 0.1 was used in the paper sigma = torch.tan(t) * sigma_data weight = 1 / sigma l2_loss = torch.square(F_theta - F_theta_minus - g) # Calculate loss with normalization factor loss = (weight / torch.exp(logvar)) * l2_loss + logvar loss = loss.mean() loss_no_logvar = weight * torch.square(F_theta - F_theta_minus - g) loss_no_logvar = loss_no_logvar.mean() g_norm = g_norm.mean() pred_x_0 = torch.cos(t) * noisy_model_input - torch.sin(t) * F_theta * sigma_data if args.train_largest_timestep: pred_x_0.detach() u = compute_density_for_timestep_sampling_scm( batch_size=bsz, logit_mean=args.logit_mean, logit_std=args.logit_std, ).to(accelerator.device) t_new = u.view(-1, 1, 1, 1) random_mask = torch.rand_like(t_new) < args.largest_timestep_prob t_new = torch.where(random_mask, torch.full_like(t_new, args.largest_timestep), t_new) z_new = torch.randn_like(model_input) * sigma_data x_t_new = torch.cos(t_new) * model_input + torch.sin(t_new) * z_new F_theta = transformer( hidden_states=(x_t_new / sigma_data), timestep=t_new.flatten(), encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, guidance=(scm_cfg_scale.flatten() * args.guidance_embeds_scale), return_logvar=False, jvp=False, )[0] pred_x_0 = torch.cos(t_new) * x_t_new - torch.sin(t_new) * F_theta * sigma_data # Sample timesteps for discriminator timesteps_D = compute_density_for_timestep_sampling_scm( batch_size=bsz, logit_mean=args.logit_mean_discriminator, logit_std=args.logit_std_discriminator, ).to(accelerator.device) t_D = timesteps_D.view(-1, 1, 1, 1) # Add noise to predicted x0 z_D = torch.randn_like(model_input) * sigma_data noised_predicted_x0 = torch.cos(t_D) * pred_x_0 + torch.sin(t_D) * z_D # Calculate adversarial loss pred_fake = disc( hidden_states=(noised_predicted_x0 / sigma_data), timestep=t_D.flatten(), encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, ) adv_loss = -torch.mean(pred_fake) # Total loss = sCM loss + LADD loss total_loss = args.scm_lambda * loss + adv_loss * args.adv_lambda total_loss = total_loss / args.gradient_accumulation_steps accelerator.backward(total_loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(transformer.parameters(), args.gradient_clip) if torch.logical_or(grad_norm.isnan(), grad_norm.isinf()): optimizer_G.zero_grad(set_to_none=True) optimizer_D.zero_grad(set_to_none=True) logger.warning("NaN or Inf detected in grad_norm, skipping iteration...") continue # switch phase to D phase = "D" optimizer_G.step() lr_scheduler.step() optimizer_G.zero_grad(set_to_none=True) elif phase == "D": transformer.eval() disc.train() models_to_accumulate = [disc] with accelerator.accumulate(models_to_accumulate): with torch.no_grad(): scm_cfg_scale = torch.tensor( np.random.choice(args.scm_cfg_scale, size=bsz, replace=True), device=accelerator.device, ) if args.train_largest_timestep: random_mask = torch.rand_like(t) < args.largest_timestep_prob t = torch.where(random_mask, torch.full_like(t, args.largest_timestep_prob), t) z_new = torch.randn_like(model_input) * sigma_data noisy_model_input = torch.cos(t) * model_input + torch.sin(t) * z_new # here F_theta = transformer( hidden_states=(noisy_model_input / sigma_data), timestep=t.flatten(), encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, guidance=(scm_cfg_scale.flatten() * args.guidance_embeds_scale), return_logvar=False, jvp=False, )[0] pred_x_0 = torch.cos(t) * noisy_model_input - torch.sin(t) * F_theta * sigma_data # Sample timesteps for fake and real samples timestep_D_fake = compute_density_for_timestep_sampling_scm( batch_size=bsz, logit_mean=args.logit_mean_discriminator, logit_std=args.logit_std_discriminator, ).to(accelerator.device) timesteps_D_real = timestep_D_fake t_D_fake = timestep_D_fake.view(-1, 1, 1, 1) t_D_real = timesteps_D_real.view(-1, 1, 1, 1) # Add noise to predicted x0 and real x0 z_D_fake = torch.randn_like(model_input) * sigma_data z_D_real = torch.randn_like(model_input) * sigma_data noised_predicted_x0 = torch.cos(t_D_fake) * pred_x_0 + torch.sin(t_D_fake) * z_D_fake noised_latents = torch.cos(t_D_real) * model_input + torch.sin(t_D_real) * z_D_real # Add misaligned pairs if enabled and batch size > 1 if args.misaligned_pairs_D and bsz > 1: # Create shifted pairs shifted_x0 = torch.roll(model_input, 1, 0) timesteps_D_shifted = compute_density_for_timestep_sampling_scm( batch_size=bsz, logit_mean=args.logit_mean_discriminator, logit_std=args.logit_std_discriminator, ).to(accelerator.device) t_D_shifted = timesteps_D_shifted.view(-1, 1, 1, 1) # Add noise to shifted pairs z_D_shifted = torch.randn_like(shifted_x0) * sigma_data noised_shifted_x0 = torch.cos(t_D_shifted) * shifted_x0 + torch.sin(t_D_shifted) * z_D_shifted # Concatenate with original noised samples noised_predicted_x0 = torch.cat([noised_predicted_x0, noised_shifted_x0], dim=0) t_D_fake = torch.cat([t_D_fake, t_D_shifted], dim=0) prompt_embeds = torch.cat([prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([prompt_attention_mask, prompt_attention_mask], dim=0) # Calculate D loss pred_fake = disc( hidden_states=(noised_predicted_x0 / sigma_data), timestep=t_D_fake.flatten(), encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, ) pred_true = disc( hidden_states=(noised_latents / sigma_data), timestep=t_D_real.flatten(), encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, ) # hinge loss loss_real = torch.mean(F.relu(1.0 - pred_true)) loss_gen = torch.mean(F.relu(1.0 + pred_fake)) loss_D = 0.5 * (loss_real + loss_gen) loss_D = loss_D / args.gradient_accumulation_steps accelerator.backward(loss_D) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(disc.parameters(), args.gradient_clip) if torch.logical_or(grad_norm.isnan(), grad_norm.isinf()): optimizer_G.zero_grad(set_to_none=True) optimizer_D.zero_grad(set_to_none=True) logger.warning("NaN or Inf detected in grad_norm, skipping iteration...") continue # switch back to phase G and add global step by one. phase = "G" optimizer_D.step() optimizer_D.zero_grad(set_to_none=True) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = { "scm_loss": loss.detach().item(), "adv_loss": adv_loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], } progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: # create pipeline pipeline = SanaSprintPipeline.from_pretrained( args.pretrained_model_name_or_path, transformer=accelerator.unwrap_model(transformer), revision=args.revision, variant=args.variant, torch_dtype=torch.float32, ) pipeline_args = { "prompt": args.validation_prompt, "complex_human_instruction": COMPLEX_HUMAN_INSTRUCTION, } images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=pipeline_args, epoch=epoch, ) free_memory() images = None del pipeline accelerator.wait_for_everyone() if accelerator.is_main_process: transformer = unwrap_model(transformer) if args.upcast_before_saving: transformer.to(torch.float32) else: transformer = transformer.to(weight_dtype) # Save discriminator heads disc = unwrap_model(disc) disc_heads_state_dict = disc.heads.state_dict() # Save transformer model transformer.save_pretrained(os.path.join(args.output_dir, "transformer")) # Save discriminator heads torch.save(disc_heads_state_dict, os.path.join(args.output_dir, "disc_heads.pt")) # Final inference # Load previous pipeline pipeline = SanaSprintPipeline.from_pretrained( args.pretrained_model_name_or_path, transformer=accelerator.unwrap_model(transformer), revision=args.revision, variant=args.variant, torch_dtype=torch.float32, ) # run inference images = [] if args.validation_prompt and args.num_validation_images > 0: pipeline_args = { "prompt": args.validation_prompt, "complex_human_instruction": COMPLEX_HUMAN_INSTRUCTION, } images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=pipeline_args, epoch=epoch, is_final_validation=True, ) if args.push_to_hub: save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, instance_prompt=args.instance_prompt, validation_prompt=args.validation_prompt, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) images = None del pipeline accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/sana/train_sana_sprint_diffusers.py/0
{ "file_path": "diffusers/examples/research_projects/sana/train_sana_sprint_diffusers.py", "repo_id": "diffusers", "token_count": 33634 }
134
import time import jax import jax.numpy as jnp import numpy as np from flax.jax_utils import replicate from jax import pmap # Let's cache the model compilation, so that it doesn't take as long the next time around. from jax.experimental.compilation_cache import compilation_cache as cc from diffusers import FlaxStableDiffusionXLPipeline cc.initialize_cache("/tmp/sdxl_cache") NUM_DEVICES = jax.device_count() # 1. Let's start by downloading the model and loading it into our pipeline class # Adhering to JAX's functional approach, the model's parameters are returned separately and # will have to be passed to the pipeline during inference pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True ) # 2. We cast all parameters to bfloat16 EXCEPT the scheduler which we leave in # float32 to keep maximal precision scheduler_state = params.pop("scheduler") params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params) params["scheduler"] = scheduler_state # 3. Next, we define the different inputs to the pipeline default_prompt = "a colorful photo of a castle in the middle of a forest with trees and bushes, by Ismail Inceoglu, shadows, high contrast, dynamic shading, hdr, detailed vegetation, digital painting, digital drawing, detailed painting, a detailed digital painting, gothic art, featured on deviantart" default_neg_prompt = "fog, grainy, purple" default_seed = 33 default_guidance_scale = 5.0 default_num_steps = 25 width = 1024 height = 1024 # 4. In order to be able to compile the pipeline # all inputs have to be tensors or strings # Let's tokenize the prompt and negative prompt def tokenize_prompt(prompt, neg_prompt): prompt_ids = pipeline.prepare_inputs(prompt) neg_prompt_ids = pipeline.prepare_inputs(neg_prompt) return prompt_ids, neg_prompt_ids # 5. To make full use of JAX's parallelization capabilities # the parameters and input tensors are duplicated across devices # To make sure every device generates a different image, we create # different seeds for each image. The model parameters won't change # during inference so we do not wrap them into a function p_params = replicate(params) def replicate_all(prompt_ids, neg_prompt_ids, seed): p_prompt_ids = replicate(prompt_ids) p_neg_prompt_ids = replicate(neg_prompt_ids) rng = jax.random.PRNGKey(seed) rng = jax.random.split(rng, NUM_DEVICES) return p_prompt_ids, p_neg_prompt_ids, rng # 6. To compile the pipeline._generate function, we must pass all parameters # to the function and tell JAX which are static arguments, that is, arguments that # are known at compile time and won't change. In our case, it is num_inference_steps, # height, width and return_latents. # Once the function is compiled, these parameters are omitted from future calls and # cannot be changed without modifying the code and recompiling. def aot_compile( prompt=default_prompt, negative_prompt=default_neg_prompt, seed=default_seed, guidance_scale=default_guidance_scale, num_inference_steps=default_num_steps, ): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] return ( pmap(pipeline._generate, static_broadcasted_argnums=[3, 4, 5, 9]) .lower( prompt_ids, p_params, rng, num_inference_steps, # num_inference_steps height, # height width, # width g, None, neg_prompt_ids, False, # return_latents ) .compile() ) start = time.time() print("Compiling ...") p_generate = aot_compile() print(f"Compiled in {time.time() - start}") # 7. Let's now put it all together in a generate function. def generate(prompt, negative_prompt, seed=default_seed, guidance_scale=default_guidance_scale): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] images = p_generate(prompt_ids, p_params, rng, g, None, neg_prompt_ids) # convert the images to PIL images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) return pipeline.numpy_to_pil(np.array(images)) # 8. The first forward pass after AOT compilation still takes a while longer than # subsequent passes, this is because on the first pass, JAX uses Python dispatch, which # Fills the C++ dispatch cache. # When using jit, this extra step is done automatically, but when using AOT compilation, # it doesn't happen until the function call is made. start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"First inference in {time.time() - start}") # 9. From this point forward, any calls to generate should result in a faster inference # time and it won't change. start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"Inference in {time.time() - start}") for i, image in enumerate(images): image.save(f"castle_{i}.png")
diffusers/examples/research_projects/sdxl_flax/sdxl_single_aot.py/0
{ "file_path": "diffusers/examples/research_projects/sdxl_flax/sdxl_single_aot.py", "repo_id": "diffusers", "token_count": 1963 }
135
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class T2IAdapter(ExamplesTestsAccelerate): def test_t2i_adapter_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/t2i_adapter/train_t2i_adapter_sdxl.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-xl-pipe --adapter_model_name_or_path=hf-internal-testing/tiny-adapter --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=9 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "diffusion_pytorch_model.safetensors")))
diffusers/examples/t2i_adapter/test_t2i_adapter.py/0
{ "file_path": "diffusers/examples/t2i_adapter/test_t2i_adapter.py", "repo_id": "diffusers", "token_count": 682 }
136
## Textual Inversion fine-tuning example for SDXL ```sh export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export DATA_DIR="./cat" accelerate launch textual_inversion_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" \ --initializer_token="toy" \ --mixed_precision="bf16" \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=500 \ --learning_rate=5.0e-04 \ --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --save_as_full_pipeline \ --output_dir="./textual_inversion_cat_sdxl" ``` Training of both text encoders is supported. ### Inference Example Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionXLPipeline`. Make sure to include the `placeholder_token` in your prompt. ```python from diffusers import StableDiffusionXLPipeline import torch model_id = "./textual_inversion_cat_sdxl" pipe = StableDiffusionXLPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda") prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("cat-backpack.png") image = pipe(prompt="", prompt_2=prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("cat-backpack-prompt_2.png") ```
diffusers/examples/textual_inversion/README_sdxl.md/0
{ "file_path": "diffusers/examples/textual_inversion/README_sdxl.md", "repo_id": "diffusers", "token_count": 537 }
137
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import math import os import shutil import time from pathlib import Path import accelerate import numpy as np import PIL import PIL.Image import timm import torch import torch.nn.functional as F from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedType, ProjectConfiguration, set_seed from datasets import load_dataset from discriminator import Discriminator from huggingface_hub import create_repo from packaging import version from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from torchvision import transforms from tqdm import tqdm from diffusers import VQModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel from diffusers.utils import check_min_version, is_wandb_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.36.0.dev0") logger = get_logger(__name__, log_level="INFO") class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def _map_layer_to_idx(backbone, layers, offset=0): """Maps set of layer names to indices of model. Ported from anomalib Returns: Feature map extracted from the CNN """ idx = [] features = timm.create_model( backbone, pretrained=False, features_only=False, exportable=True, ) for i in layers: try: idx.append(list(dict(features.named_children()).keys()).index(i) - offset) except ValueError: raise ValueError( f"Layer {i} not found in model {backbone}. Select layer from {list(dict(features.named_children()).keys())}. The network architecture is {features}" ) return idx def get_perceptual_loss(pixel_values, fmap, timm_model, timm_model_resolution, timm_model_normalization): img_timm_model_input = timm_model_normalization(F.interpolate(pixel_values, timm_model_resolution)) fmap_timm_model_input = timm_model_normalization(F.interpolate(fmap, timm_model_resolution)) if pixel_values.shape[1] == 1: # handle grayscale for timm_model img_timm_model_input, fmap_timm_model_input = ( t.repeat(1, 3, 1, 1) for t in (img_timm_model_input, fmap_timm_model_input) ) img_timm_model_feats = timm_model(img_timm_model_input) recon_timm_model_feats = timm_model(fmap_timm_model_input) perceptual_loss = F.mse_loss(img_timm_model_feats[0], recon_timm_model_feats[0]) for i in range(1, len(img_timm_model_feats)): perceptual_loss += F.mse_loss(img_timm_model_feats[i], recon_timm_model_feats[i]) perceptual_loss /= len(img_timm_model_feats) return perceptual_loss def grad_layer_wrt_loss(loss, layer): return torch.autograd.grad( outputs=loss, inputs=layer, grad_outputs=torch.ones_like(loss), retain_graph=True, )[0].detach() def gradient_penalty(images, output, weight=10): gradients = torch.autograd.grad( outputs=output, inputs=images, grad_outputs=torch.ones(output.size(), device=images.device), create_graph=True, retain_graph=True, only_inputs=True, )[0] bsz = gradients.shape[0] gradients = torch.reshape(gradients, (bsz, -1)) return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean() @torch.no_grad() def log_validation(model, args, validation_transform, accelerator, global_step): logger.info("Generating images...") dtype = torch.float32 if accelerator.mixed_precision == "fp16": dtype = torch.float16 elif accelerator.mixed_precision == "bf16": dtype = torch.bfloat16 original_images = [] for image_path in args.validation_images: image = PIL.Image.open(image_path) if not image.mode == "RGB": image = image.convert("RGB") image = validation_transform(image).to(accelerator.device, dtype=dtype) original_images.append(image[None]) # Generate images model.eval() images = [] for original_image in original_images: image = accelerator.unwrap_model(model)(original_image).sample images.append(image) model.train() original_images = torch.cat(original_images, dim=0) images = torch.cat(images, dim=0) # Convert to PIL images images = torch.clamp(images, 0.0, 1.0) original_images = torch.clamp(original_images, 0.0, 1.0) images *= 255.0 original_images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) original_images = original_images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) images = np.concatenate([original_images, images], axis=2) images = [Image.fromarray(image) for image in images] # Log images for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, global_step, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: Original, Generated") for i, image in enumerate(images) ] }, step=global_step, ) torch.cuda.empty_cache() return images def log_grad_norm(model, accelerator, global_step): for name, param in model.named_parameters(): if param.grad is not None: grads = param.grad.detach().data grad_norm = (grads.norm(p=2) / grads.numel()).item() accelerator.log({"grad_norm/" + name: grad_norm}, step=global_step) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--log_grad_norm_steps", type=int, default=500, help=("Print logs of gradient norms every X steps."), ) parser.add_argument( "--log_steps", type=int, default=50, help=("Print logs every X steps."), ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running reconstruction on images in" " `args.validation_images` and logging the reconstructed images." ), ) parser.add_argument( "--vae_loss", type=str, default="l2", help="The loss function for vae reconstruction loss.", ) parser.add_argument( "--timm_model_offset", type=int, default=0, help="Offset of timm layers to indices.", ) parser.add_argument( "--timm_model_layers", type=str, default="head", help="The layers to get output from in the timm model.", ) parser.add_argument( "--timm_model_backend", type=str, default="vgg19", help="Timm model used to get the lpips loss", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--model_config_name_or_path", type=str, default=None, help="The config of the Vq model to train, leave as None to use standard Vq model configuration.", ) parser.add_argument( "--discriminator_config_name_or_path", type=str, default=None, help="The config of the discriminator model to train, leave as None to use standard Vq model configuration.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_images", type=str, default=None, nargs="+", help=("A set of validation images evaluated every `--validation_steps` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="vqgan-output", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--discr_learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--discr_lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--tracker_project_name", type=str, default="vqgan-training", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args def main(): ######################### # SETUP Accelerator # ######################### args = parse_args() # Enable TF32 on Ampere GPUs if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if accelerator.distributed_type == DistributedType.DEEPSPEED: accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = args.train_batch_size ##################################### # SETUP LOGGING, SEED and CONFIG # ##################################### if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_images") accelerator.init_trackers(args.tracker_project_name, tracker_config) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id ######################### # MODELS and OPTIMIZER # ######################### logger.info("Loading models and optimizer") if args.model_config_name_or_path is None and args.pretrained_model_name_or_path is None: # Taken from config of movq at kandinsky-community/kandinsky-2-2-decoder but without the attention layers model = VQModel( act_fn="silu", block_out_channels=[ 128, 256, 512, ], down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], in_channels=3, latent_channels=4, layers_per_block=2, norm_num_groups=32, norm_type="spatial", num_vq_embeddings=16384, out_channels=3, sample_size=32, scaling_factor=0.18215, up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], vq_embed_dim=4, ) elif args.pretrained_model_name_or_path is not None: model = VQModel.from_pretrained(args.pretrained_model_name_or_path) else: config = VQModel.load_config(args.model_config_name_or_path) model = VQModel.from_config(config) if args.use_ema: ema_model = EMAModel(model.parameters(), model_cls=VQModel, model_config=model.config) if args.discriminator_config_name_or_path is None: discriminator = Discriminator() else: config = Discriminator.load_config(args.discriminator_config_name_or_path) discriminator = Discriminator.from_config(config) idx = _map_layer_to_idx(args.timm_model_backend, args.timm_model_layers.split("|"), args.timm_model_offset) timm_model = timm.create_model( args.timm_model_backend, pretrained=True, features_only=True, exportable=True, out_indices=idx, ) timm_model = timm_model.to(accelerator.device) timm_model.requires_grad = False timm_model.eval() timm_transform = create_transform(**resolve_data_config(timm_model.pretrained_cfg, model=timm_model)) try: # Gets the resolution of the timm transformation after centercrop timm_centercrop_transform = timm_transform.transforms[1] assert isinstance(timm_centercrop_transform, transforms.CenterCrop), ( f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." ) timm_model_resolution = timm_centercrop_transform.size[0] # Gets final normalization timm_model_normalization = timm_transform.transforms[-1] assert isinstance(timm_model_normalization, transforms.Normalize), ( f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." ) except AssertionError as e: raise NotImplementedError(e) # Enable flash attention if asked if args.enable_xformers_memory_efficient_attention: model.enable_xformers_memory_efficient_attention() # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_model.save_pretrained(os.path.join(output_dir, "vqmodel_ema")) vqmodel = models[0] discriminator = models[1] vqmodel.save_pretrained(os.path.join(output_dir, "vqmodel")) discriminator.save_pretrained(os.path.join(output_dir, "discriminator")) weights.pop() weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "vqmodel_ema"), VQModel) ema_model.load_state_dict(load_model.state_dict()) ema_model.to(accelerator.device) del load_model discriminator = models.pop() load_model = Discriminator.from_pretrained(input_dir, subfolder="discriminator") discriminator.register_to_config(**load_model.config) discriminator.load_state_dict(load_model.state_dict()) del load_model vqmodel = models.pop() load_model = VQModel.from_pretrained(input_dir, subfolder="vqmodel") vqmodel.register_to_config(**load_model.config) vqmodel.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) learning_rate = args.learning_rate if args.scale_lr: learning_rate = ( learning_rate * args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( list(model.parameters()), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) discr_optimizer = optimizer_cls( list(discriminator.parameters()), lr=args.discr_learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ################################## # DATLOADER and LR-SCHEDULER # ################################# logger.info("Creating dataloaders and lr_scheduler") args.train_batch_size * accelerator.num_processes total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps # DataLoaders creation: if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. assert args.image_column is not None image_column = args.image_column if image_column not in column_names: raise ValueError(f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}") # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), ] ) validation_transform = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.ToTensor(), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() return {"pixel_values": pixel_values} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_training_steps=args.max_train_steps, num_warmup_steps=args.lr_warmup_steps, ) discr_lr_scheduler = get_scheduler( args.discr_lr_scheduler, optimizer=discr_optimizer, num_training_steps=args.max_train_steps, num_warmup_steps=args.lr_warmup_steps, ) # Prepare everything with accelerator logger.info("Preparing model, optimizer and dataloaders") # The dataloader are already aware of distributed training, so we don't need to prepare them. model, discriminator, optimizer, discr_optimizer, lr_scheduler, discr_lr_scheduler = accelerator.prepare( model, discriminator, optimizer, discr_optimizer, lr_scheduler, discr_lr_scheduler ) if args.use_ema: ema_model.to(accelerator.device) # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Potentially load in the weights and states from a previous save resume_from_checkpoint = args.resume_from_checkpoint if resume_from_checkpoint: if resume_from_checkpoint != "latest": path = resume_from_checkpoint else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None path = os.path.join(args.output_dir, path) if path is None: accelerator.print(f"Checkpoint '{resume_from_checkpoint}' does not exist. Starting a new training run.") resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(path) accelerator.wait_for_everyone() global_step = int(os.path.basename(path).split("-")[1]) first_epoch = global_step // num_update_steps_per_epoch batch_time_m = AverageMeter() data_time_m = AverageMeter() end = time.time() progress_bar = tqdm( range(0, args.max_train_steps), initial=global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) # As stated above, we are not doing epoch based training here, but just using this for book keeping and being able to # reuse the same training loop with other datasets/loaders. avg_gen_loss, avg_discr_loss = None, None for epoch in range(first_epoch, args.num_train_epochs): model.train() discriminator.train() for i, batch in enumerate(train_dataloader): pixel_values = batch["pixel_values"] pixel_values = pixel_values.to(accelerator.device, non_blocking=True) data_time_m.update(time.time() - end) generator_step = ((i // args.gradient_accumulation_steps) % 2) == 0 # Train Step # The behavior of accelerator.accumulate is to # 1. Check if gradients are synced(reached gradient-accumulation_steps) # 2. If so sync gradients by stopping the not syncing process if generator_step: optimizer.zero_grad(set_to_none=True) else: discr_optimizer.zero_grad(set_to_none=True) # encode images to the latent space and get the commit loss from vq tokenization # Return commit loss fmap, commit_loss = model(pixel_values, return_dict=False) if generator_step: with accelerator.accumulate(model): # reconstruction loss. Pixel level differences between input vs output if args.vae_loss == "l2": loss = F.mse_loss(pixel_values, fmap) else: loss = F.l1_loss(pixel_values, fmap) # perceptual loss. The high level feature mean squared error loss perceptual_loss = get_perceptual_loss( pixel_values, fmap, timm_model, timm_model_resolution=timm_model_resolution, timm_model_normalization=timm_model_normalization, ) # generator loss gen_loss = -discriminator(fmap).mean() last_dec_layer = accelerator.unwrap_model(model).decoder.conv_out.weight norm_grad_wrt_perceptual_loss = grad_layer_wrt_loss(perceptual_loss, last_dec_layer).norm(p=2) norm_grad_wrt_gen_loss = grad_layer_wrt_loss(gen_loss, last_dec_layer).norm(p=2) adaptive_weight = norm_grad_wrt_perceptual_loss / norm_grad_wrt_gen_loss.clamp(min=1e-8) adaptive_weight = adaptive_weight.clamp(max=1e4) loss += commit_loss loss += perceptual_loss loss += adaptive_weight * gen_loss # Gather the losses across all processes for logging (if we use distributed training). avg_gen_loss = accelerator.gather(loss.repeat(args.train_batch_size)).float().mean() accelerator.backward(loss) if args.max_grad_norm is not None and accelerator.sync_gradients: accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() # log gradient norm before zeroing it if ( accelerator.sync_gradients and global_step % args.log_grad_norm_steps == 0 and accelerator.is_main_process ): log_grad_norm(model, accelerator, global_step) else: # Return discriminator loss with accelerator.accumulate(discriminator): fmap.detach_() pixel_values.requires_grad_() real = discriminator(pixel_values) fake = discriminator(fmap) loss = (F.relu(1 + fake) + F.relu(1 - real)).mean() gp = gradient_penalty(pixel_values, real) loss += gp avg_discr_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() accelerator.backward(loss) if args.max_grad_norm is not None and accelerator.sync_gradients: accelerator.clip_grad_norm_(discriminator.parameters(), args.max_grad_norm) discr_optimizer.step() discr_lr_scheduler.step() if ( accelerator.sync_gradients and global_step % args.log_grad_norm_steps == 0 and accelerator.is_main_process ): log_grad_norm(discriminator, accelerator, global_step) batch_time_m.update(time.time() - end) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: global_step += 1 progress_bar.update(1) if args.use_ema: ema_model.step(model.parameters()) if accelerator.sync_gradients and not generator_step and accelerator.is_main_process: # wait for both generator and discriminator to settle # Log metrics if global_step % args.log_steps == 0: samples_per_second_per_gpu = ( args.gradient_accumulation_steps * args.train_batch_size / batch_time_m.val ) logs = { "step_discr_loss": avg_discr_loss.item(), "lr": lr_scheduler.get_last_lr()[0], "samples/sec/gpu": samples_per_second_per_gpu, "data_time": data_time_m.val, "batch_time": batch_time_m.val, } if avg_gen_loss is not None: logs["step_gen_loss"] = avg_gen_loss.item() accelerator.log(logs, step=global_step) # resetting batch / data time meters per log window batch_time_m.reset() data_time_m.reset() # Save model checkpoint if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") # Generate images if global_step % args.validation_steps == 0: if args.use_ema: # Store the VQGAN parameters temporarily and load the EMA parameters to perform inference. ema_model.store(model.parameters()) ema_model.copy_to(model.parameters()) log_validation(model, args, validation_transform, accelerator, global_step) if args.use_ema: # Switch back to the original VQGAN parameters. ema_model.restore(model.parameters()) end = time.time() # Stop training if max steps is reached if global_step >= args.max_train_steps: break # End for accelerator.wait_for_everyone() # Save the final trained checkpoint if accelerator.is_main_process: model = accelerator.unwrap_model(model) discriminator = accelerator.unwrap_model(discriminator) if args.use_ema: ema_model.copy_to(model.parameters()) model.save_pretrained(os.path.join(args.output_dir, "vqmodel")) discriminator.save_pretrained(os.path.join(args.output_dir, "discriminator")) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/vqgan/train_vqgan.py/0
{ "file_path": "diffusers/examples/vqgan/train_vqgan.py", "repo_id": "diffusers", "token_count": 19252 }
138
import math import os import urllib import warnings from argparse import ArgumentParser import torch import torch.nn as nn import torch.nn.functional as F from huggingface_hub.utils import insecure_hashlib from safetensors.torch import load_file as stl from tqdm import tqdm from diffusers import AutoencoderKL, ConsistencyDecoderVAE, DiffusionPipeline, StableDiffusionPipeline, UNet2DModel from diffusers.models.autoencoders.vae import Encoder from diffusers.models.embeddings import TimestepEmbedding from diffusers.models.unets.unet_2d_blocks import ResnetDownsampleBlock2D, ResnetUpsampleBlock2D, UNetMidBlock2D args = ArgumentParser() args.add_argument("--save_pretrained", required=False, default=None, type=str) args.add_argument("--test_image", required=True, type=str) args = args.parse_args() def _extract_into_tensor(arr, timesteps, broadcast_shape): # from: https://github.com/openai/guided-diffusion/blob/22e0df8183507e13a7813f8d38d51b072ca1e67c/guided_diffusion/gaussian_diffusion.py#L895 """ res = arr[timesteps].float() dims_to_append = len(broadcast_shape) - len(res.shape) return res[(...,) + (None,) * dims_to_append] def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): # from: https://github.com/openai/guided-diffusion/blob/22e0df8183507e13a7813f8d38d51b072ca1e67c/guided_diffusion/gaussian_diffusion.py#L45 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return torch.tensor(betas) def _download(url: str, root: str): os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): if insecure_hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: return download_target else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024, ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) if insecure_hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: raise RuntimeError("Model has been downloaded but the SHA256 checksum does not match") return download_target class ConsistencyDecoder: def __init__(self, device="cuda:0", download_root=os.path.expanduser("~/.cache/clip")): self.n_distilled_steps = 64 download_target = _download( "https://openaipublic.azureedge.net/diff-vae/c9cebd3132dd9c42936d803e33424145a748843c8f716c0814838bdc8a2fe7cb/decoder.pt", download_root, ) self.ckpt = torch.jit.load(download_target).to(device) self.device = device sigma_data = 0.5 betas = betas_for_alpha_bar(1024, lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2).to(device) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod) self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod) sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / alphas_cumprod) sigmas = torch.sqrt(1.0 / alphas_cumprod - 1) self.c_skip = sqrt_recip_alphas_cumprod * sigma_data**2 / (sigmas**2 + sigma_data**2) self.c_out = sigmas * sigma_data / (sigmas**2 + sigma_data**2) ** 0.5 self.c_in = sqrt_recip_alphas_cumprod / (sigmas**2 + sigma_data**2) ** 0.5 @staticmethod def round_timesteps(timesteps, total_timesteps, n_distilled_steps, truncate_start=True): with torch.no_grad(): space = torch.div(total_timesteps, n_distilled_steps, rounding_mode="floor") rounded_timesteps = (torch.div(timesteps, space, rounding_mode="floor") + 1) * space if truncate_start: rounded_timesteps[rounded_timesteps == total_timesteps] -= space else: rounded_timesteps[rounded_timesteps == total_timesteps] -= space rounded_timesteps[rounded_timesteps == 0] += space return rounded_timesteps @staticmethod def ldm_transform_latent(z, extra_scale_factor=1): channel_means = [0.38862467, 0.02253063, 0.07381133, -0.0171294] channel_stds = [0.9654121, 1.0440036, 0.76147926, 0.77022034] if len(z.shape) != 4: raise ValueError() z = z * 0.18215 channels = [z[:, i] for i in range(z.shape[1])] channels = [extra_scale_factor * (c - channel_means[i]) / channel_stds[i] for i, c in enumerate(channels)] return torch.stack(channels, dim=1) @torch.no_grad() def __call__( self, features: torch.Tensor, schedule=[1.0, 0.5], generator=None, ): features = self.ldm_transform_latent(features) ts = self.round_timesteps( torch.arange(0, 1024), 1024, self.n_distilled_steps, truncate_start=False, ) shape = ( features.size(0), 3, 8 * features.size(2), 8 * features.size(3), ) x_start = torch.zeros(shape, device=features.device, dtype=features.dtype) schedule_timesteps = [int((1024 - 1) * s) for s in schedule] for i in schedule_timesteps: t = ts[i].item() t_ = torch.tensor([t] * features.shape[0]).to(self.device) # noise = torch.randn_like(x_start) noise = torch.randn(x_start.shape, dtype=x_start.dtype, generator=generator).to(device=x_start.device) x_start = ( _extract_into_tensor(self.sqrt_alphas_cumprod, t_, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t_, x_start.shape) * noise ) c_in = _extract_into_tensor(self.c_in, t_, x_start.shape) import torch.nn.functional as F from diffusers import UNet2DModel if isinstance(self.ckpt, UNet2DModel): input = torch.concat([c_in * x_start, F.upsample_nearest(features, scale_factor=8)], dim=1) model_output = self.ckpt(input, t_).sample else: model_output = self.ckpt(c_in * x_start, t_, features=features) B, C = x_start.shape[:2] model_output, _ = torch.split(model_output, C, dim=1) pred_xstart = ( _extract_into_tensor(self.c_out, t_, x_start.shape) * model_output + _extract_into_tensor(self.c_skip, t_, x_start.shape) * x_start ).clamp(-1, 1) x_start = pred_xstart return x_start def save_image(image, name): import numpy as np from PIL import Image image = image[0].cpu().numpy() image = (image + 1.0) * 127.5 image = image.clip(0, 255).astype(np.uint8) image = Image.fromarray(image.transpose(1, 2, 0)) image.save(name) def load_image(uri, size=None, center_crop=False): import numpy as np from PIL import Image image = Image.open(uri) if center_crop: image = image.crop( ( (image.width - min(image.width, image.height)) // 2, (image.height - min(image.width, image.height)) // 2, (image.width + min(image.width, image.height)) // 2, (image.height + min(image.width, image.height)) // 2, ) ) if size is not None: image = image.resize(size) image = torch.tensor(np.array(image).transpose(2, 0, 1)).unsqueeze(0).float() image = image / 127.5 - 1.0 return image class TimestepEmbedding_(nn.Module): def __init__(self, n_time=1024, n_emb=320, n_out=1280) -> None: super().__init__() self.emb = nn.Embedding(n_time, n_emb) self.f_1 = nn.Linear(n_emb, n_out) self.f_2 = nn.Linear(n_out, n_out) def forward(self, x) -> torch.Tensor: x = self.emb(x) x = self.f_1(x) x = F.silu(x) return self.f_2(x) class ImageEmbedding(nn.Module): def __init__(self, in_channels=7, out_channels=320) -> None: super().__init__() self.f = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) def forward(self, x) -> torch.Tensor: return self.f(x) class ImageUnembedding(nn.Module): def __init__(self, in_channels=320, out_channels=6) -> None: super().__init__() self.gn = nn.GroupNorm(32, in_channels) self.f = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) def forward(self, x) -> torch.Tensor: return self.f(F.silu(self.gn(x))) class ConvResblock(nn.Module): def __init__(self, in_features=320, out_features=320) -> None: super().__init__() self.f_t = nn.Linear(1280, out_features * 2) self.gn_1 = nn.GroupNorm(32, in_features) self.f_1 = nn.Conv2d(in_features, out_features, kernel_size=3, padding=1) self.gn_2 = nn.GroupNorm(32, out_features) self.f_2 = nn.Conv2d(out_features, out_features, kernel_size=3, padding=1) skip_conv = in_features != out_features self.f_s = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0) if skip_conv else nn.Identity() def forward(self, x, t): x_skip = x t = self.f_t(F.silu(t)) t = t.chunk(2, dim=1) t_1 = t[0].unsqueeze(dim=2).unsqueeze(dim=3) + 1 t_2 = t[1].unsqueeze(dim=2).unsqueeze(dim=3) gn_1 = F.silu(self.gn_1(x)) f_1 = self.f_1(gn_1) gn_2 = self.gn_2(f_1) return self.f_s(x_skip) + self.f_2(F.silu(gn_2 * t_1 + t_2)) # Also ConvResblock class Downsample(nn.Module): def __init__(self, in_channels=320) -> None: super().__init__() self.f_t = nn.Linear(1280, in_channels * 2) self.gn_1 = nn.GroupNorm(32, in_channels) self.f_1 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) self.gn_2 = nn.GroupNorm(32, in_channels) self.f_2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) def forward(self, x, t) -> torch.Tensor: x_skip = x t = self.f_t(F.silu(t)) t_1, t_2 = t.chunk(2, dim=1) t_1 = t_1.unsqueeze(2).unsqueeze(3) + 1 t_2 = t_2.unsqueeze(2).unsqueeze(3) gn_1 = F.silu(self.gn_1(x)) avg_pool2d = F.avg_pool2d(gn_1, kernel_size=(2, 2), stride=None) f_1 = self.f_1(avg_pool2d) gn_2 = self.gn_2(f_1) f_2 = self.f_2(F.silu(t_2 + (t_1 * gn_2))) return f_2 + F.avg_pool2d(x_skip, kernel_size=(2, 2), stride=None) # Also ConvResblock class Upsample(nn.Module): def __init__(self, in_channels=1024) -> None: super().__init__() self.f_t = nn.Linear(1280, in_channels * 2) self.gn_1 = nn.GroupNorm(32, in_channels) self.f_1 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) self.gn_2 = nn.GroupNorm(32, in_channels) self.f_2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) def forward(self, x, t) -> torch.Tensor: x_skip = x t = self.f_t(F.silu(t)) t_1, t_2 = t.chunk(2, dim=1) t_1 = t_1.unsqueeze(2).unsqueeze(3) + 1 t_2 = t_2.unsqueeze(2).unsqueeze(3) gn_1 = F.silu(self.gn_1(x)) upsample = F.upsample_nearest(gn_1, scale_factor=2) f_1 = self.f_1(upsample) gn_2 = self.gn_2(f_1) f_2 = self.f_2(F.silu(t_2 + (t_1 * gn_2))) return f_2 + F.upsample_nearest(x_skip, scale_factor=2) class ConvUNetVAE(nn.Module): def __init__(self) -> None: super().__init__() self.embed_image = ImageEmbedding() self.embed_time = TimestepEmbedding_() down_0 = nn.ModuleList( [ ConvResblock(320, 320), ConvResblock(320, 320), ConvResblock(320, 320), Downsample(320), ] ) down_1 = nn.ModuleList( [ ConvResblock(320, 640), ConvResblock(640, 640), ConvResblock(640, 640), Downsample(640), ] ) down_2 = nn.ModuleList( [ ConvResblock(640, 1024), ConvResblock(1024, 1024), ConvResblock(1024, 1024), Downsample(1024), ] ) down_3 = nn.ModuleList( [ ConvResblock(1024, 1024), ConvResblock(1024, 1024), ConvResblock(1024, 1024), ] ) self.down = nn.ModuleList( [ down_0, down_1, down_2, down_3, ] ) self.mid = nn.ModuleList( [ ConvResblock(1024, 1024), ConvResblock(1024, 1024), ] ) up_3 = nn.ModuleList( [ ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), Upsample(1024), ] ) up_2 = nn.ModuleList( [ ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 + 640, 1024), Upsample(1024), ] ) up_1 = nn.ModuleList( [ ConvResblock(1024 + 640, 640), ConvResblock(640 * 2, 640), ConvResblock(640 * 2, 640), ConvResblock(320 + 640, 640), Upsample(640), ] ) up_0 = nn.ModuleList( [ ConvResblock(320 + 640, 320), ConvResblock(320 * 2, 320), ConvResblock(320 * 2, 320), ConvResblock(320 * 2, 320), ] ) self.up = nn.ModuleList( [ up_0, up_1, up_2, up_3, ] ) self.output = ImageUnembedding() def forward(self, x, t, features) -> torch.Tensor: converted = hasattr(self, "converted") and self.converted x = torch.cat([x, F.upsample_nearest(features, scale_factor=8)], dim=1) if converted: t = self.time_embedding(self.time_proj(t)) else: t = self.embed_time(t) x = self.embed_image(x) skips = [x] for i, down in enumerate(self.down): if converted and i in [0, 1, 2, 3]: x, skips_ = down(x, t) for skip in skips_: skips.append(skip) else: for block in down: x = block(x, t) skips.append(x) print(x.float().abs().sum()) if converted: x = self.mid(x, t) else: for i in range(2): x = self.mid[i](x, t) print(x.float().abs().sum()) for i, up in enumerate(self.up[::-1]): if converted and i in [0, 1, 2, 3]: skip_4 = skips.pop() skip_3 = skips.pop() skip_2 = skips.pop() skip_1 = skips.pop() skips_ = (skip_1, skip_2, skip_3, skip_4) x = up(x, skips_, t) else: for block in up: if isinstance(block, ConvResblock): x = torch.concat([x, skips.pop()], dim=1) x = block(x, t) return self.output(x) def rename_state_dict_key(k): k = k.replace("blocks.", "") for i in range(5): k = k.replace(f"down_{i}_", f"down.{i}.") k = k.replace(f"conv_{i}.", f"{i}.") k = k.replace(f"up_{i}_", f"up.{i}.") k = k.replace(f"mid_{i}", f"mid.{i}") k = k.replace("upsamp.", "4.") k = k.replace("downsamp.", "3.") k = k.replace("f_t.w", "f_t.weight").replace("f_t.b", "f_t.bias") k = k.replace("f_1.w", "f_1.weight").replace("f_1.b", "f_1.bias") k = k.replace("f_2.w", "f_2.weight").replace("f_2.b", "f_2.bias") k = k.replace("f_s.w", "f_s.weight").replace("f_s.b", "f_s.bias") k = k.replace("f.w", "f.weight").replace("f.b", "f.bias") k = k.replace("gn_1.g", "gn_1.weight").replace("gn_1.b", "gn_1.bias") k = k.replace("gn_2.g", "gn_2.weight").replace("gn_2.b", "gn_2.bias") k = k.replace("gn.g", "gn.weight").replace("gn.b", "gn.bias") return k def rename_state_dict(sd, embedding): sd = {rename_state_dict_key(k): v for k, v in sd.items()} sd["embed_time.emb.weight"] = embedding["weight"] return sd # encode with stable diffusion vae pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe.vae.cuda() # construct original decoder with jitted model decoder_consistency = ConsistencyDecoder(device="cuda:0") # construct UNet code, overwrite the decoder with conv_unet_vae model = ConvUNetVAE() model.load_state_dict( rename_state_dict( stl("consistency_decoder.safetensors"), stl("embedding.safetensors"), ) ) model = model.cuda() decoder_consistency.ckpt = model image = load_image(args.test_image, size=(256, 256), center_crop=True) latent = pipe.vae.encode(image.half().cuda()).latent_dist.sample() # decode with gan sample_gan = pipe.vae.decode(latent).sample.detach() save_image(sample_gan, "gan.png") # decode with conv_unet_vae sample_consistency_orig = decoder_consistency(latent, generator=torch.Generator("cpu").manual_seed(0)) save_image(sample_consistency_orig, "con_orig.png") ########### conversion print("CONVERSION") print("DOWN BLOCK ONE") block_one_sd_orig = model.down[0].state_dict() block_one_sd_new = {} for i in range(3): block_one_sd_new[f"resnets.{i}.norm1.weight"] = block_one_sd_orig.pop(f"{i}.gn_1.weight") block_one_sd_new[f"resnets.{i}.norm1.bias"] = block_one_sd_orig.pop(f"{i}.gn_1.bias") block_one_sd_new[f"resnets.{i}.conv1.weight"] = block_one_sd_orig.pop(f"{i}.f_1.weight") block_one_sd_new[f"resnets.{i}.conv1.bias"] = block_one_sd_orig.pop(f"{i}.f_1.bias") block_one_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_one_sd_orig.pop(f"{i}.f_t.weight") block_one_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_one_sd_orig.pop(f"{i}.f_t.bias") block_one_sd_new[f"resnets.{i}.norm2.weight"] = block_one_sd_orig.pop(f"{i}.gn_2.weight") block_one_sd_new[f"resnets.{i}.norm2.bias"] = block_one_sd_orig.pop(f"{i}.gn_2.bias") block_one_sd_new[f"resnets.{i}.conv2.weight"] = block_one_sd_orig.pop(f"{i}.f_2.weight") block_one_sd_new[f"resnets.{i}.conv2.bias"] = block_one_sd_orig.pop(f"{i}.f_2.bias") block_one_sd_new["downsamplers.0.norm1.weight"] = block_one_sd_orig.pop("3.gn_1.weight") block_one_sd_new["downsamplers.0.norm1.bias"] = block_one_sd_orig.pop("3.gn_1.bias") block_one_sd_new["downsamplers.0.conv1.weight"] = block_one_sd_orig.pop("3.f_1.weight") block_one_sd_new["downsamplers.0.conv1.bias"] = block_one_sd_orig.pop("3.f_1.bias") block_one_sd_new["downsamplers.0.time_emb_proj.weight"] = block_one_sd_orig.pop("3.f_t.weight") block_one_sd_new["downsamplers.0.time_emb_proj.bias"] = block_one_sd_orig.pop("3.f_t.bias") block_one_sd_new["downsamplers.0.norm2.weight"] = block_one_sd_orig.pop("3.gn_2.weight") block_one_sd_new["downsamplers.0.norm2.bias"] = block_one_sd_orig.pop("3.gn_2.bias") block_one_sd_new["downsamplers.0.conv2.weight"] = block_one_sd_orig.pop("3.f_2.weight") block_one_sd_new["downsamplers.0.conv2.bias"] = block_one_sd_orig.pop("3.f_2.bias") assert len(block_one_sd_orig) == 0 block_one = ResnetDownsampleBlock2D( in_channels=320, out_channels=320, temb_channels=1280, num_layers=3, add_downsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_one.load_state_dict(block_one_sd_new) print("DOWN BLOCK TWO") block_two_sd_orig = model.down[1].state_dict() block_two_sd_new = {} for i in range(3): block_two_sd_new[f"resnets.{i}.norm1.weight"] = block_two_sd_orig.pop(f"{i}.gn_1.weight") block_two_sd_new[f"resnets.{i}.norm1.bias"] = block_two_sd_orig.pop(f"{i}.gn_1.bias") block_two_sd_new[f"resnets.{i}.conv1.weight"] = block_two_sd_orig.pop(f"{i}.f_1.weight") block_two_sd_new[f"resnets.{i}.conv1.bias"] = block_two_sd_orig.pop(f"{i}.f_1.bias") block_two_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_two_sd_orig.pop(f"{i}.f_t.weight") block_two_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_two_sd_orig.pop(f"{i}.f_t.bias") block_two_sd_new[f"resnets.{i}.norm2.weight"] = block_two_sd_orig.pop(f"{i}.gn_2.weight") block_two_sd_new[f"resnets.{i}.norm2.bias"] = block_two_sd_orig.pop(f"{i}.gn_2.bias") block_two_sd_new[f"resnets.{i}.conv2.weight"] = block_two_sd_orig.pop(f"{i}.f_2.weight") block_two_sd_new[f"resnets.{i}.conv2.bias"] = block_two_sd_orig.pop(f"{i}.f_2.bias") if i == 0: block_two_sd_new[f"resnets.{i}.conv_shortcut.weight"] = block_two_sd_orig.pop(f"{i}.f_s.weight") block_two_sd_new[f"resnets.{i}.conv_shortcut.bias"] = block_two_sd_orig.pop(f"{i}.f_s.bias") block_two_sd_new["downsamplers.0.norm1.weight"] = block_two_sd_orig.pop("3.gn_1.weight") block_two_sd_new["downsamplers.0.norm1.bias"] = block_two_sd_orig.pop("3.gn_1.bias") block_two_sd_new["downsamplers.0.conv1.weight"] = block_two_sd_orig.pop("3.f_1.weight") block_two_sd_new["downsamplers.0.conv1.bias"] = block_two_sd_orig.pop("3.f_1.bias") block_two_sd_new["downsamplers.0.time_emb_proj.weight"] = block_two_sd_orig.pop("3.f_t.weight") block_two_sd_new["downsamplers.0.time_emb_proj.bias"] = block_two_sd_orig.pop("3.f_t.bias") block_two_sd_new["downsamplers.0.norm2.weight"] = block_two_sd_orig.pop("3.gn_2.weight") block_two_sd_new["downsamplers.0.norm2.bias"] = block_two_sd_orig.pop("3.gn_2.bias") block_two_sd_new["downsamplers.0.conv2.weight"] = block_two_sd_orig.pop("3.f_2.weight") block_two_sd_new["downsamplers.0.conv2.bias"] = block_two_sd_orig.pop("3.f_2.bias") assert len(block_two_sd_orig) == 0 block_two = ResnetDownsampleBlock2D( in_channels=320, out_channels=640, temb_channels=1280, num_layers=3, add_downsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_two.load_state_dict(block_two_sd_new) print("DOWN BLOCK THREE") block_three_sd_orig = model.down[2].state_dict() block_three_sd_new = {} for i in range(3): block_three_sd_new[f"resnets.{i}.norm1.weight"] = block_three_sd_orig.pop(f"{i}.gn_1.weight") block_three_sd_new[f"resnets.{i}.norm1.bias"] = block_three_sd_orig.pop(f"{i}.gn_1.bias") block_three_sd_new[f"resnets.{i}.conv1.weight"] = block_three_sd_orig.pop(f"{i}.f_1.weight") block_three_sd_new[f"resnets.{i}.conv1.bias"] = block_three_sd_orig.pop(f"{i}.f_1.bias") block_three_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_three_sd_orig.pop(f"{i}.f_t.weight") block_three_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_three_sd_orig.pop(f"{i}.f_t.bias") block_three_sd_new[f"resnets.{i}.norm2.weight"] = block_three_sd_orig.pop(f"{i}.gn_2.weight") block_three_sd_new[f"resnets.{i}.norm2.bias"] = block_three_sd_orig.pop(f"{i}.gn_2.bias") block_three_sd_new[f"resnets.{i}.conv2.weight"] = block_three_sd_orig.pop(f"{i}.f_2.weight") block_three_sd_new[f"resnets.{i}.conv2.bias"] = block_three_sd_orig.pop(f"{i}.f_2.bias") if i == 0: block_three_sd_new[f"resnets.{i}.conv_shortcut.weight"] = block_three_sd_orig.pop(f"{i}.f_s.weight") block_three_sd_new[f"resnets.{i}.conv_shortcut.bias"] = block_three_sd_orig.pop(f"{i}.f_s.bias") block_three_sd_new["downsamplers.0.norm1.weight"] = block_three_sd_orig.pop("3.gn_1.weight") block_three_sd_new["downsamplers.0.norm1.bias"] = block_three_sd_orig.pop("3.gn_1.bias") block_three_sd_new["downsamplers.0.conv1.weight"] = block_three_sd_orig.pop("3.f_1.weight") block_three_sd_new["downsamplers.0.conv1.bias"] = block_three_sd_orig.pop("3.f_1.bias") block_three_sd_new["downsamplers.0.time_emb_proj.weight"] = block_three_sd_orig.pop("3.f_t.weight") block_three_sd_new["downsamplers.0.time_emb_proj.bias"] = block_three_sd_orig.pop("3.f_t.bias") block_three_sd_new["downsamplers.0.norm2.weight"] = block_three_sd_orig.pop("3.gn_2.weight") block_three_sd_new["downsamplers.0.norm2.bias"] = block_three_sd_orig.pop("3.gn_2.bias") block_three_sd_new["downsamplers.0.conv2.weight"] = block_three_sd_orig.pop("3.f_2.weight") block_three_sd_new["downsamplers.0.conv2.bias"] = block_three_sd_orig.pop("3.f_2.bias") assert len(block_three_sd_orig) == 0 block_three = ResnetDownsampleBlock2D( in_channels=640, out_channels=1024, temb_channels=1280, num_layers=3, add_downsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_three.load_state_dict(block_three_sd_new) print("DOWN BLOCK FOUR") block_four_sd_orig = model.down[3].state_dict() block_four_sd_new = {} for i in range(3): block_four_sd_new[f"resnets.{i}.norm1.weight"] = block_four_sd_orig.pop(f"{i}.gn_1.weight") block_four_sd_new[f"resnets.{i}.norm1.bias"] = block_four_sd_orig.pop(f"{i}.gn_1.bias") block_four_sd_new[f"resnets.{i}.conv1.weight"] = block_four_sd_orig.pop(f"{i}.f_1.weight") block_four_sd_new[f"resnets.{i}.conv1.bias"] = block_four_sd_orig.pop(f"{i}.f_1.bias") block_four_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_four_sd_orig.pop(f"{i}.f_t.weight") block_four_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_four_sd_orig.pop(f"{i}.f_t.bias") block_four_sd_new[f"resnets.{i}.norm2.weight"] = block_four_sd_orig.pop(f"{i}.gn_2.weight") block_four_sd_new[f"resnets.{i}.norm2.bias"] = block_four_sd_orig.pop(f"{i}.gn_2.bias") block_four_sd_new[f"resnets.{i}.conv2.weight"] = block_four_sd_orig.pop(f"{i}.f_2.weight") block_four_sd_new[f"resnets.{i}.conv2.bias"] = block_four_sd_orig.pop(f"{i}.f_2.bias") assert len(block_four_sd_orig) == 0 block_four = ResnetDownsampleBlock2D( in_channels=1024, out_channels=1024, temb_channels=1280, num_layers=3, add_downsample=False, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_four.load_state_dict(block_four_sd_new) print("MID BLOCK 1") mid_block_one_sd_orig = model.mid.state_dict() mid_block_one_sd_new = {} for i in range(2): mid_block_one_sd_new[f"resnets.{i}.norm1.weight"] = mid_block_one_sd_orig.pop(f"{i}.gn_1.weight") mid_block_one_sd_new[f"resnets.{i}.norm1.bias"] = mid_block_one_sd_orig.pop(f"{i}.gn_1.bias") mid_block_one_sd_new[f"resnets.{i}.conv1.weight"] = mid_block_one_sd_orig.pop(f"{i}.f_1.weight") mid_block_one_sd_new[f"resnets.{i}.conv1.bias"] = mid_block_one_sd_orig.pop(f"{i}.f_1.bias") mid_block_one_sd_new[f"resnets.{i}.time_emb_proj.weight"] = mid_block_one_sd_orig.pop(f"{i}.f_t.weight") mid_block_one_sd_new[f"resnets.{i}.time_emb_proj.bias"] = mid_block_one_sd_orig.pop(f"{i}.f_t.bias") mid_block_one_sd_new[f"resnets.{i}.norm2.weight"] = mid_block_one_sd_orig.pop(f"{i}.gn_2.weight") mid_block_one_sd_new[f"resnets.{i}.norm2.bias"] = mid_block_one_sd_orig.pop(f"{i}.gn_2.bias") mid_block_one_sd_new[f"resnets.{i}.conv2.weight"] = mid_block_one_sd_orig.pop(f"{i}.f_2.weight") mid_block_one_sd_new[f"resnets.{i}.conv2.bias"] = mid_block_one_sd_orig.pop(f"{i}.f_2.bias") assert len(mid_block_one_sd_orig) == 0 mid_block_one = UNetMidBlock2D( in_channels=1024, temb_channels=1280, num_layers=1, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, add_attention=False, ) mid_block_one.load_state_dict(mid_block_one_sd_new) print("UP BLOCK ONE") up_block_one_sd_orig = model.up[-1].state_dict() up_block_one_sd_new = {} for i in range(4): up_block_one_sd_new[f"resnets.{i}.norm1.weight"] = up_block_one_sd_orig.pop(f"{i}.gn_1.weight") up_block_one_sd_new[f"resnets.{i}.norm1.bias"] = up_block_one_sd_orig.pop(f"{i}.gn_1.bias") up_block_one_sd_new[f"resnets.{i}.conv1.weight"] = up_block_one_sd_orig.pop(f"{i}.f_1.weight") up_block_one_sd_new[f"resnets.{i}.conv1.bias"] = up_block_one_sd_orig.pop(f"{i}.f_1.bias") up_block_one_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_one_sd_orig.pop(f"{i}.f_t.weight") up_block_one_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_one_sd_orig.pop(f"{i}.f_t.bias") up_block_one_sd_new[f"resnets.{i}.norm2.weight"] = up_block_one_sd_orig.pop(f"{i}.gn_2.weight") up_block_one_sd_new[f"resnets.{i}.norm2.bias"] = up_block_one_sd_orig.pop(f"{i}.gn_2.bias") up_block_one_sd_new[f"resnets.{i}.conv2.weight"] = up_block_one_sd_orig.pop(f"{i}.f_2.weight") up_block_one_sd_new[f"resnets.{i}.conv2.bias"] = up_block_one_sd_orig.pop(f"{i}.f_2.bias") up_block_one_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_one_sd_orig.pop(f"{i}.f_s.weight") up_block_one_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_one_sd_orig.pop(f"{i}.f_s.bias") up_block_one_sd_new["upsamplers.0.norm1.weight"] = up_block_one_sd_orig.pop("4.gn_1.weight") up_block_one_sd_new["upsamplers.0.norm1.bias"] = up_block_one_sd_orig.pop("4.gn_1.bias") up_block_one_sd_new["upsamplers.0.conv1.weight"] = up_block_one_sd_orig.pop("4.f_1.weight") up_block_one_sd_new["upsamplers.0.conv1.bias"] = up_block_one_sd_orig.pop("4.f_1.bias") up_block_one_sd_new["upsamplers.0.time_emb_proj.weight"] = up_block_one_sd_orig.pop("4.f_t.weight") up_block_one_sd_new["upsamplers.0.time_emb_proj.bias"] = up_block_one_sd_orig.pop("4.f_t.bias") up_block_one_sd_new["upsamplers.0.norm2.weight"] = up_block_one_sd_orig.pop("4.gn_2.weight") up_block_one_sd_new["upsamplers.0.norm2.bias"] = up_block_one_sd_orig.pop("4.gn_2.bias") up_block_one_sd_new["upsamplers.0.conv2.weight"] = up_block_one_sd_orig.pop("4.f_2.weight") up_block_one_sd_new["upsamplers.0.conv2.bias"] = up_block_one_sd_orig.pop("4.f_2.bias") assert len(up_block_one_sd_orig) == 0 up_block_one = ResnetUpsampleBlock2D( in_channels=1024, prev_output_channel=1024, out_channels=1024, temb_channels=1280, num_layers=4, add_upsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_one.load_state_dict(up_block_one_sd_new) print("UP BLOCK TWO") up_block_two_sd_orig = model.up[-2].state_dict() up_block_two_sd_new = {} for i in range(4): up_block_two_sd_new[f"resnets.{i}.norm1.weight"] = up_block_two_sd_orig.pop(f"{i}.gn_1.weight") up_block_two_sd_new[f"resnets.{i}.norm1.bias"] = up_block_two_sd_orig.pop(f"{i}.gn_1.bias") up_block_two_sd_new[f"resnets.{i}.conv1.weight"] = up_block_two_sd_orig.pop(f"{i}.f_1.weight") up_block_two_sd_new[f"resnets.{i}.conv1.bias"] = up_block_two_sd_orig.pop(f"{i}.f_1.bias") up_block_two_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_two_sd_orig.pop(f"{i}.f_t.weight") up_block_two_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_two_sd_orig.pop(f"{i}.f_t.bias") up_block_two_sd_new[f"resnets.{i}.norm2.weight"] = up_block_two_sd_orig.pop(f"{i}.gn_2.weight") up_block_two_sd_new[f"resnets.{i}.norm2.bias"] = up_block_two_sd_orig.pop(f"{i}.gn_2.bias") up_block_two_sd_new[f"resnets.{i}.conv2.weight"] = up_block_two_sd_orig.pop(f"{i}.f_2.weight") up_block_two_sd_new[f"resnets.{i}.conv2.bias"] = up_block_two_sd_orig.pop(f"{i}.f_2.bias") up_block_two_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_two_sd_orig.pop(f"{i}.f_s.weight") up_block_two_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_two_sd_orig.pop(f"{i}.f_s.bias") up_block_two_sd_new["upsamplers.0.norm1.weight"] = up_block_two_sd_orig.pop("4.gn_1.weight") up_block_two_sd_new["upsamplers.0.norm1.bias"] = up_block_two_sd_orig.pop("4.gn_1.bias") up_block_two_sd_new["upsamplers.0.conv1.weight"] = up_block_two_sd_orig.pop("4.f_1.weight") up_block_two_sd_new["upsamplers.0.conv1.bias"] = up_block_two_sd_orig.pop("4.f_1.bias") up_block_two_sd_new["upsamplers.0.time_emb_proj.weight"] = up_block_two_sd_orig.pop("4.f_t.weight") up_block_two_sd_new["upsamplers.0.time_emb_proj.bias"] = up_block_two_sd_orig.pop("4.f_t.bias") up_block_two_sd_new["upsamplers.0.norm2.weight"] = up_block_two_sd_orig.pop("4.gn_2.weight") up_block_two_sd_new["upsamplers.0.norm2.bias"] = up_block_two_sd_orig.pop("4.gn_2.bias") up_block_two_sd_new["upsamplers.0.conv2.weight"] = up_block_two_sd_orig.pop("4.f_2.weight") up_block_two_sd_new["upsamplers.0.conv2.bias"] = up_block_two_sd_orig.pop("4.f_2.bias") assert len(up_block_two_sd_orig) == 0 up_block_two = ResnetUpsampleBlock2D( in_channels=640, prev_output_channel=1024, out_channels=1024, temb_channels=1280, num_layers=4, add_upsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_two.load_state_dict(up_block_two_sd_new) print("UP BLOCK THREE") up_block_three_sd_orig = model.up[-3].state_dict() up_block_three_sd_new = {} for i in range(4): up_block_three_sd_new[f"resnets.{i}.norm1.weight"] = up_block_three_sd_orig.pop(f"{i}.gn_1.weight") up_block_three_sd_new[f"resnets.{i}.norm1.bias"] = up_block_three_sd_orig.pop(f"{i}.gn_1.bias") up_block_three_sd_new[f"resnets.{i}.conv1.weight"] = up_block_three_sd_orig.pop(f"{i}.f_1.weight") up_block_three_sd_new[f"resnets.{i}.conv1.bias"] = up_block_three_sd_orig.pop(f"{i}.f_1.bias") up_block_three_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_three_sd_orig.pop(f"{i}.f_t.weight") up_block_three_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_three_sd_orig.pop(f"{i}.f_t.bias") up_block_three_sd_new[f"resnets.{i}.norm2.weight"] = up_block_three_sd_orig.pop(f"{i}.gn_2.weight") up_block_three_sd_new[f"resnets.{i}.norm2.bias"] = up_block_three_sd_orig.pop(f"{i}.gn_2.bias") up_block_three_sd_new[f"resnets.{i}.conv2.weight"] = up_block_three_sd_orig.pop(f"{i}.f_2.weight") up_block_three_sd_new[f"resnets.{i}.conv2.bias"] = up_block_three_sd_orig.pop(f"{i}.f_2.bias") up_block_three_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_three_sd_orig.pop(f"{i}.f_s.weight") up_block_three_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_three_sd_orig.pop(f"{i}.f_s.bias") up_block_three_sd_new["upsamplers.0.norm1.weight"] = up_block_three_sd_orig.pop("4.gn_1.weight") up_block_three_sd_new["upsamplers.0.norm1.bias"] = up_block_three_sd_orig.pop("4.gn_1.bias") up_block_three_sd_new["upsamplers.0.conv1.weight"] = up_block_three_sd_orig.pop("4.f_1.weight") up_block_three_sd_new["upsamplers.0.conv1.bias"] = up_block_three_sd_orig.pop("4.f_1.bias") up_block_three_sd_new["upsamplers.0.time_emb_proj.weight"] = up_block_three_sd_orig.pop("4.f_t.weight") up_block_three_sd_new["upsamplers.0.time_emb_proj.bias"] = up_block_three_sd_orig.pop("4.f_t.bias") up_block_three_sd_new["upsamplers.0.norm2.weight"] = up_block_three_sd_orig.pop("4.gn_2.weight") up_block_three_sd_new["upsamplers.0.norm2.bias"] = up_block_three_sd_orig.pop("4.gn_2.bias") up_block_three_sd_new["upsamplers.0.conv2.weight"] = up_block_three_sd_orig.pop("4.f_2.weight") up_block_three_sd_new["upsamplers.0.conv2.bias"] = up_block_three_sd_orig.pop("4.f_2.bias") assert len(up_block_three_sd_orig) == 0 up_block_three = ResnetUpsampleBlock2D( in_channels=320, prev_output_channel=1024, out_channels=640, temb_channels=1280, num_layers=4, add_upsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_three.load_state_dict(up_block_three_sd_new) print("UP BLOCK FOUR") up_block_four_sd_orig = model.up[-4].state_dict() up_block_four_sd_new = {} for i in range(4): up_block_four_sd_new[f"resnets.{i}.norm1.weight"] = up_block_four_sd_orig.pop(f"{i}.gn_1.weight") up_block_four_sd_new[f"resnets.{i}.norm1.bias"] = up_block_four_sd_orig.pop(f"{i}.gn_1.bias") up_block_four_sd_new[f"resnets.{i}.conv1.weight"] = up_block_four_sd_orig.pop(f"{i}.f_1.weight") up_block_four_sd_new[f"resnets.{i}.conv1.bias"] = up_block_four_sd_orig.pop(f"{i}.f_1.bias") up_block_four_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_four_sd_orig.pop(f"{i}.f_t.weight") up_block_four_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_four_sd_orig.pop(f"{i}.f_t.bias") up_block_four_sd_new[f"resnets.{i}.norm2.weight"] = up_block_four_sd_orig.pop(f"{i}.gn_2.weight") up_block_four_sd_new[f"resnets.{i}.norm2.bias"] = up_block_four_sd_orig.pop(f"{i}.gn_2.bias") up_block_four_sd_new[f"resnets.{i}.conv2.weight"] = up_block_four_sd_orig.pop(f"{i}.f_2.weight") up_block_four_sd_new[f"resnets.{i}.conv2.bias"] = up_block_four_sd_orig.pop(f"{i}.f_2.bias") up_block_four_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_four_sd_orig.pop(f"{i}.f_s.weight") up_block_four_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_four_sd_orig.pop(f"{i}.f_s.bias") assert len(up_block_four_sd_orig) == 0 up_block_four = ResnetUpsampleBlock2D( in_channels=320, prev_output_channel=640, out_channels=320, temb_channels=1280, num_layers=4, add_upsample=False, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_four.load_state_dict(up_block_four_sd_new) print("initial projection (conv_in)") conv_in_sd_orig = model.embed_image.state_dict() conv_in_sd_new = {} conv_in_sd_new["weight"] = conv_in_sd_orig.pop("f.weight") conv_in_sd_new["bias"] = conv_in_sd_orig.pop("f.bias") assert len(conv_in_sd_orig) == 0 block_out_channels = [320, 640, 1024, 1024] in_channels = 7 conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) conv_in.load_state_dict(conv_in_sd_new) print("out projection (conv_out) (conv_norm_out)") out_channels = 6 norm_num_groups = 32 norm_eps = 1e-5 act_fn = "silu" conv_out_kernel = 3 conv_out_padding = (conv_out_kernel - 1) // 2 conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) # uses torch.functional in orig # conv_act = get_activation(act_fn) conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding) conv_norm_out.load_state_dict(model.output.gn.state_dict()) conv_out.load_state_dict(model.output.f.state_dict()) print("timestep projection (time_proj) (time_embedding)") f1_sd = model.embed_time.f_1.state_dict() f2_sd = model.embed_time.f_2.state_dict() time_embedding_sd = { "linear_1.weight": f1_sd.pop("weight"), "linear_1.bias": f1_sd.pop("bias"), "linear_2.weight": f2_sd.pop("weight"), "linear_2.bias": f2_sd.pop("bias"), } assert len(f1_sd) == 0 assert len(f2_sd) == 0 time_embedding_type = "learned" num_train_timesteps = 1024 time_embedding_dim = 1280 time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0]) timestep_input_dim = block_out_channels[0] time_embedding = TimestepEmbedding(timestep_input_dim, time_embedding_dim) time_proj.load_state_dict(model.embed_time.emb.state_dict()) time_embedding.load_state_dict(time_embedding_sd) print("CONVERT") time_embedding.to("cuda") time_proj.to("cuda") conv_in.to("cuda") block_one.to("cuda") block_two.to("cuda") block_three.to("cuda") block_four.to("cuda") mid_block_one.to("cuda") up_block_one.to("cuda") up_block_two.to("cuda") up_block_three.to("cuda") up_block_four.to("cuda") conv_norm_out.to("cuda") conv_out.to("cuda") model.time_proj = time_proj model.time_embedding = time_embedding model.embed_image = conv_in model.down[0] = block_one model.down[1] = block_two model.down[2] = block_three model.down[3] = block_four model.mid = mid_block_one model.up[-1] = up_block_one model.up[-2] = up_block_two model.up[-3] = up_block_three model.up[-4] = up_block_four model.output.gn = conv_norm_out model.output.f = conv_out model.converted = True sample_consistency_new = decoder_consistency(latent, generator=torch.Generator("cpu").manual_seed(0)) save_image(sample_consistency_new, "con_new.png") assert (sample_consistency_orig == sample_consistency_new).all() print("making unet") unet = UNet2DModel( in_channels=in_channels, out_channels=out_channels, down_block_types=( "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", ), up_block_types=( "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ), block_out_channels=block_out_channels, layers_per_block=3, norm_num_groups=norm_num_groups, norm_eps=norm_eps, resnet_time_scale_shift="scale_shift", time_embedding_type="learned", num_train_timesteps=num_train_timesteps, add_attention=False, ) unet_state_dict = {} def add_state_dict(prefix, mod): for k, v in mod.state_dict().items(): unet_state_dict[f"{prefix}.{k}"] = v add_state_dict("conv_in", conv_in) add_state_dict("time_proj", time_proj) add_state_dict("time_embedding", time_embedding) add_state_dict("down_blocks.0", block_one) add_state_dict("down_blocks.1", block_two) add_state_dict("down_blocks.2", block_three) add_state_dict("down_blocks.3", block_four) add_state_dict("mid_block", mid_block_one) add_state_dict("up_blocks.0", up_block_one) add_state_dict("up_blocks.1", up_block_two) add_state_dict("up_blocks.2", up_block_three) add_state_dict("up_blocks.3", up_block_four) add_state_dict("conv_norm_out", conv_norm_out) add_state_dict("conv_out", conv_out) unet.load_state_dict(unet_state_dict) print("running with diffusers unet") unet.to("cuda") decoder_consistency.ckpt = unet sample_consistency_new_2 = decoder_consistency(latent, generator=torch.Generator("cpu").manual_seed(0)) save_image(sample_consistency_new_2, "con_new_2.png") assert (sample_consistency_orig == sample_consistency_new_2).all() print("running with diffusers model") Encoder.old_constructor = Encoder.__init__ def new_constructor(self, **kwargs): self.old_constructor(**kwargs) self.constructor_arguments = kwargs Encoder.__init__ = new_constructor vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae") consistency_vae = ConsistencyDecoderVAE( encoder_args=vae.encoder.constructor_arguments, decoder_args=unet.config, scaling_factor=vae.config.scaling_factor, block_out_channels=vae.config.block_out_channels, latent_channels=vae.config.latent_channels, ) consistency_vae.encoder.load_state_dict(vae.encoder.state_dict()) consistency_vae.quant_conv.load_state_dict(vae.quant_conv.state_dict()) consistency_vae.decoder_unet.load_state_dict(unet.state_dict()) consistency_vae.to(dtype=torch.float16, device="cuda") sample_consistency_new_3 = consistency_vae.decode( 0.18215 * latent, generator=torch.Generator("cpu").manual_seed(0) ).sample print("max difference") print((sample_consistency_orig - sample_consistency_new_3).abs().max()) print("total difference") print((sample_consistency_orig - sample_consistency_new_3).abs().sum()) # assert (sample_consistency_orig == sample_consistency_new_3).all() print("running with diffusers pipeline") pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", vae=consistency_vae, torch_dtype=torch.float16 ) pipe.to("cuda") pipe("horse", generator=torch.Generator("cpu").manual_seed(0)).images[0].save("horse.png") if args.save_pretrained is not None: consistency_vae.save_pretrained(args.save_pretrained)
diffusers/scripts/convert_consistency_decoder.py/0
{ "file_path": "diffusers/scripts/convert_consistency_decoder.py", "repo_id": "diffusers", "token_count": 21910 }
139
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the AudioLDM2 checkpoints.""" import argparse import re from typing import List, Union import torch import yaml from transformers import ( AutoFeatureExtractor, AutoTokenizer, ClapConfig, ClapModel, GPT2Config, GPT2Model, SpeechT5HifiGan, SpeechT5HifiGanConfig, T5Config, T5EncoderModel, ) from diffusers import ( AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import is_safetensors_available from diffusers.utils.import_utils import BACKENDS_MAPPING # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.shave_segments def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_resnet_paths def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_vae_resnet_paths def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("nin_shortcut", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_attention_paths def renew_attention_paths(old_list): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("q.weight", "to_q.weight") new_item = new_item.replace("q.bias", "to_q.bias") new_item = new_item.replace("k.weight", "to_k.weight") new_item = new_item.replace("k.bias", "to_k.bias") new_item = new_item.replace("v.weight", "to_v.weight") new_item = new_item.replace("v.bias", "to_v.bias") new_item = new_item.replace("proj_out.weight", "to_out.0.weight") new_item = new_item.replace("proj_out.bias", "to_out.0.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] else: checkpoint[new_path] = old_checkpoint[path["old"]] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["to_q.weight", "to_k.weight", "to_v.weight"] proj_key = "to_out.0.weight" for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys or ".".join(key.split(".")[-3:]) == proj_key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key].squeeze() def create_unet_diffusers_config(original_config, image_size: int): """ Creates a UNet config for diffusers based on the config of the original AudioLDM2 model. """ unet_params = original_config["model"]["params"]["unet_config"]["params"] vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" up_block_types.append(block_type) resolution //= 2 vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) cross_attention_dim = list(unet_params["context_dim"]) if "context_dim" in unet_params else block_out_channels if len(cross_attention_dim) > 1: # require two or more cross-attention layers per-block, each of different dimension cross_attention_dim = [cross_attention_dim for _ in range(len(block_out_channels))] config = { "sample_size": image_size // vae_scale_factor, "in_channels": unet_params["in_channels"], "out_channels": unet_params["out_channels"], "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params["num_res_blocks"], "transformer_layers_per_block": unet_params["transformer_depth"], "cross_attention_dim": tuple(cross_attention_dim), } return config # Adapted from diffusers.pipelines.stable_diffusion.convert_from_ckpt.create_vae_diffusers_config def create_vae_diffusers_config(original_config, checkpoint, image_size: int): """ Creates a VAE config for diffusers based on the config of the original AudioLDM2 model. Compared to the original Stable Diffusion conversion, this function passes a *learnt* VAE scaling factor to the diffusers VAE. """ vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] _ = original_config["model"]["params"]["first_stage_config"]["params"]["embed_dim"] block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) scaling_factor = checkpoint["scale_factor"] if "scale_by_std" in original_config["model"]["params"] else 0.18215 config = { "sample_size": image_size, "in_channels": vae_params["in_channels"], "out_channels": vae_params["out_ch"], "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "latent_channels": vae_params["z_channels"], "layers_per_block": vae_params["num_res_blocks"], "scaling_factor": float(scaling_factor), } return config # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.create_diffusers_schedular def create_diffusers_schedular(original_config): schedular = DDIMScheduler( num_train_timesteps=original_config["model"]["params"]["timesteps"], beta_start=original_config["model"]["params"]["linear_start"], beta_end=original_config["model"]["params"]["linear_end"], beta_schedule="scaled_linear", ) return schedular def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False): """ Takes a state dict and a config, and returns a converted UNet checkpoint. """ # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) unet_key = "model.diffusion_model." # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: print(f"Checkpoint {path} has both EMA and non-EMA weights.") print( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: print( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) # strip the unet prefix from the weight names for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}." in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}." in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}." in key] for layer_id in range(num_output_blocks) } # Check how many Transformer blocks we have per layer if isinstance(config.get("cross_attention_dim"), (list, tuple)): if isinstance(config["cross_attention_dim"][0], (list, tuple)): # in this case we have multiple cross-attention layers per-block num_attention_layers = len(config.get("cross_attention_dim")[0]) else: num_attention_layers = 1 if config.get("extra_self_attn_layer"): num_attention_layers += 1 for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.0" not in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = [ { "old": f"input_blocks.{i}.{1 + layer_id}", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id * num_attention_layers + layer_id}", } for layer_id in range(num_attention_layers) ] assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=meta_path, config=config ) resnet_0 = middle_blocks[0] resnet_1 = middle_blocks[num_middle_blocks - 1] resnet_0_paths = renew_resnet_paths(resnet_0) meta_path = {"old": "middle_block.0", "new": "mid_block.resnets.0"} assign_to_checkpoint( resnet_0_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_1_paths = renew_resnet_paths(resnet_1) meta_path = {"old": f"middle_block.{len(middle_blocks) - 1}", "new": "mid_block.resnets.1"} assign_to_checkpoint( resnet_1_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(1, num_middle_blocks - 1): attentions = middle_blocks[i] attentions_paths = renew_attention_paths(attentions) meta_path = {"old": f"middle_block.{i}", "new": f"mid_block.attentions.{i - 1}"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.0" not in key] paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] attentions.remove(f"output_blocks.{i}.{index}.conv.bias") attentions.remove(f"output_blocks.{i}.{index}.conv.weight") # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = [ { "old": f"output_blocks.{i}.{1 + layer_id}", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id * num_attention_layers + layer_id}", } for layer_id in range(num_attention_layers) ] assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=meta_path, config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] return new_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} vae_key = "first_stage_model." keys = list(checkpoint.keys()) for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint CLAP_KEYS_TO_MODIFY_MAPPING = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } CLAP_KEYS_TO_IGNORE = [ "text_transform", "audio_transform", "stft", "logmel_extractor", "tscam_conv", "head", "attn_mask", ] CLAP_EXPECTED_MISSING_KEYS = ["text_model.embeddings.token_type_ids"] def convert_open_clap_checkpoint(checkpoint): """ Takes a state dict and returns a converted CLAP checkpoint. """ # extract state dict for CLAP text embedding model, discarding the audio component model_state_dict = {} model_key = "clap.model." keys = list(checkpoint.keys()) for key in keys: if key.startswith(model_key): model_state_dict[key.replace(model_key, "")] = checkpoint.get(key) new_checkpoint = {} sequential_layers_pattern = r".*sequential.(\d+).*" text_projection_pattern = r".*_projection.(\d+).*" for key, value in model_state_dict.items(): # check if key should be ignored in mapping - if so map it to a key name that we'll filter out at the end for key_to_ignore in CLAP_KEYS_TO_IGNORE: if key_to_ignore in key: key = "spectrogram" # check if any key needs to be modified for key_to_modify, new_key in CLAP_KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) if re.match(sequential_layers_pattern, key): # replace sequential layers with list sequential_layer = re.match(sequential_layers_pattern, key).group(1) key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer) // 3}.linear.") elif re.match(text_projection_pattern, key): projecton_layer = int(re.match(text_projection_pattern, key).group(1)) # Because in CLAP they use `nn.Sequential`... transformers_projection_layer = 1 if projecton_layer == 0 else 2 key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.") if "audio" and "qkv" in key: # split qkv into query key and value mixed_qkv = value qkv_dim = mixed_qkv.size(0) // 3 query_layer = mixed_qkv[:qkv_dim] key_layer = mixed_qkv[qkv_dim : qkv_dim * 2] value_layer = mixed_qkv[qkv_dim * 2 :] new_checkpoint[key.replace("qkv", "query")] = query_layer new_checkpoint[key.replace("qkv", "key")] = key_layer new_checkpoint[key.replace("qkv", "value")] = value_layer elif key != "spectrogram": new_checkpoint[key] = value return new_checkpoint def create_transformers_vocoder_config(original_config): """ Creates a config for transformers SpeechT5HifiGan based on the config of the vocoder model. """ vocoder_params = original_config["model"]["params"]["vocoder_config"]["params"] config = { "model_in_dim": vocoder_params["num_mels"], "sampling_rate": vocoder_params["sampling_rate"], "upsample_initial_channel": vocoder_params["upsample_initial_channel"], "upsample_rates": list(vocoder_params["upsample_rates"]), "upsample_kernel_sizes": list(vocoder_params["upsample_kernel_sizes"]), "resblock_kernel_sizes": list(vocoder_params["resblock_kernel_sizes"]), "resblock_dilation_sizes": [ list(resblock_dilation) for resblock_dilation in vocoder_params["resblock_dilation_sizes"] ], "normalize_before": False, } return config def extract_sub_model(checkpoint, key_prefix): """ Takes a state dict and returns the state dict for a particular sub-model. """ sub_model_state_dict = {} keys = list(checkpoint.keys()) for key in keys: if key.startswith(key_prefix): sub_model_state_dict[key.replace(key_prefix, "")] = checkpoint.get(key) return sub_model_state_dict def convert_hifigan_checkpoint(checkpoint, config): """ Takes a state dict and config, and returns a converted HiFiGAN vocoder checkpoint. """ # extract state dict for vocoder vocoder_state_dict = extract_sub_model(checkpoint, key_prefix="first_stage_model.vocoder.") # fix upsampler keys, everything else is correct already for i in range(len(config.upsample_rates)): vocoder_state_dict[f"upsampler.{i}.weight"] = vocoder_state_dict.pop(f"ups.{i}.weight") vocoder_state_dict[f"upsampler.{i}.bias"] = vocoder_state_dict.pop(f"ups.{i}.bias") if not config.normalize_before: # if we don't set normalize_before then these variables are unused, so we set them to their initialised values vocoder_state_dict["mean"] = torch.zeros(config.model_in_dim) vocoder_state_dict["scale"] = torch.ones(config.model_in_dim) return vocoder_state_dict def convert_projection_checkpoint(checkpoint): projection_state_dict = {} conditioner_state_dict = extract_sub_model(checkpoint, key_prefix="cond_stage_models.0.") projection_state_dict["sos_embed"] = conditioner_state_dict["start_of_sequence_tokens.weight"][0] projection_state_dict["sos_embed_1"] = conditioner_state_dict["start_of_sequence_tokens.weight"][1] projection_state_dict["eos_embed"] = conditioner_state_dict["end_of_sequence_tokens.weight"][0] projection_state_dict["eos_embed_1"] = conditioner_state_dict["end_of_sequence_tokens.weight"][1] projection_state_dict["projection.weight"] = conditioner_state_dict["input_sequence_embed_linear.0.weight"] projection_state_dict["projection.bias"] = conditioner_state_dict["input_sequence_embed_linear.0.bias"] projection_state_dict["projection_1.weight"] = conditioner_state_dict["input_sequence_embed_linear.1.weight"] projection_state_dict["projection_1.bias"] = conditioner_state_dict["input_sequence_embed_linear.1.bias"] return projection_state_dict # Adapted from https://github.com/haoheliu/AudioLDM2/blob/81ad2c6ce015c1310387695e2dae975a7d2ed6fd/audioldm2/utils.py#L143 DEFAULT_CONFIG = { "model": { "params": { "linear_start": 0.0015, "linear_end": 0.0195, "timesteps": 1000, "channels": 8, "scale_by_std": True, "unet_config": { "target": "audioldm2.latent_diffusion.openaimodel.UNetModel", "params": { "context_dim": [None, 768, 1024], "in_channels": 8, "out_channels": 8, "model_channels": 128, "attention_resolutions": [8, 4, 2], "num_res_blocks": 2, "channel_mult": [1, 2, 3, 5], "num_head_channels": 32, "transformer_depth": 1, }, }, "first_stage_config": { "target": "audioldm2.variational_autoencoder.autoencoder.AutoencoderKL", "params": { "embed_dim": 8, "ddconfig": { "z_channels": 8, "resolution": 256, "in_channels": 1, "out_ch": 1, "ch": 128, "ch_mult": [1, 2, 4], "num_res_blocks": 2, }, }, }, "cond_stage_config": { "crossattn_audiomae_generated": { "target": "audioldm2.latent_diffusion.modules.encoders.modules.SequenceGenAudioMAECond", "params": { "sequence_gen_length": 8, "sequence_input_embed_dim": [512, 1024], }, } }, "vocoder_config": { "target": "audioldm2.first_stage_model.vocoder", "params": { "upsample_rates": [5, 4, 2, 2, 2], "upsample_kernel_sizes": [16, 16, 8, 4, 4], "upsample_initial_channel": 1024, "resblock_kernel_sizes": [3, 7, 11], "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], "num_mels": 64, "sampling_rate": 16000, }, }, }, }, } def load_pipeline_from_original_AudioLDM2_ckpt( checkpoint_path: str, original_config_file: str = None, image_size: int = 1024, prediction_type: str = None, extract_ema: bool = False, scheduler_type: str = "ddim", cross_attention_dim: Union[List, List[List]] = None, transformer_layers_per_block: int = None, device: str = None, from_safetensors: bool = False, ) -> AudioLDM2Pipeline: """ Load an AudioLDM2 pipeline object from a `.ckpt`/`.safetensors` file and (ideally) a `.yaml` config file. Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is recommended that you override the default values and/or supply an `original_config_file` wherever possible. Args: checkpoint_path (`str`): Path to `.ckpt` file. original_config_file (`str`): Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically set to the AudioLDM2 base config. image_size (`int`, *optional*, defaults to 1024): The image size that the model was trained on. prediction_type (`str`, *optional*): The prediction type that the model was trained on. If `None`, will be automatically inferred by looking for a key in the config. For the default config, the prediction type is `'epsilon'`. scheduler_type (`str`, *optional*, defaults to 'ddim'): Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", "ddim"]`. cross_attention_dim (`list`, *optional*, defaults to `None`): The dimension of the cross-attention layers. If `None`, the cross-attention dimension will be automatically inferred. Set to `[768, 1024]` for the base model, or `[768, 1024, None]` for the large model. transformer_layers_per_block (`int`, *optional*, defaults to `None`): The number of transformer layers in each transformer block. If `None`, number of layers will be " "automatically inferred. Set to `1` for the base model, or `2` for the large model. extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning. device (`str`, *optional*, defaults to `None`): The device to use. Pass `None` to determine automatically. from_safetensors (`str`, *optional*, defaults to `False`): If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch. return: An AudioLDM2Pipeline object representing the passed-in `.ckpt`/`.safetensors` file. """ if from_safetensors: if not is_safetensors_available(): raise ValueError(BACKENDS_MAPPING["safetensors"][1]) from safetensors import safe_open checkpoint = {} with safe_open(checkpoint_path, framework="pt", device="cpu") as f: for key in f.keys(): checkpoint[key] = f.get_tensor(key) else: if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" checkpoint = torch.load(checkpoint_path, map_location=device) else: checkpoint = torch.load(checkpoint_path, map_location=device) if "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] if original_config_file is None: original_config = DEFAULT_CONFIG else: original_config = yaml.safe_load(original_config_file) if image_size is not None: original_config["model"]["params"]["unet_config"]["params"]["image_size"] = image_size if cross_attention_dim is not None: original_config["model"]["params"]["unet_config"]["params"]["context_dim"] = cross_attention_dim if transformer_layers_per_block is not None: original_config["model"]["params"]["unet_config"]["params"]["transformer_depth"] = transformer_layers_per_block if ( "parameterization" in original_config["model"]["params"] and original_config["model"]["params"]["parameterization"] == "v" ): if prediction_type is None: prediction_type = "v_prediction" else: if prediction_type is None: prediction_type = "epsilon" num_train_timesteps = original_config["model"]["params"]["timesteps"] beta_start = original_config["model"]["params"]["linear_start"] beta_end = original_config["model"]["params"]["linear_end"] scheduler = DDIMScheduler( beta_end=beta_end, beta_schedule="scaled_linear", beta_start=beta_start, num_train_timesteps=num_train_timesteps, steps_offset=1, clip_sample=False, set_alpha_to_one=False, prediction_type=prediction_type, ) # make sure scheduler works correctly with DDIM scheduler.register_to_config(clip_sample=False) if scheduler_type == "pndm": config = dict(scheduler.config) config["skip_prk_steps"] = True scheduler = PNDMScheduler.from_config(config) elif scheduler_type == "lms": scheduler = LMSDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "heun": scheduler = HeunDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "euler": scheduler = EulerDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "euler-ancestral": scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "dpm": scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) elif scheduler_type == "ddim": scheduler = scheduler else: raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") # Convert the UNet2DModel unet_config = create_unet_diffusers_config(original_config, image_size=image_size) unet = AudioLDM2UNet2DConditionModel(**unet_config) converted_unet_checkpoint = convert_ldm_unet_checkpoint( checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema ) unet.load_state_dict(converted_unet_checkpoint) # Convert the VAE model vae_config = create_vae_diffusers_config(original_config, checkpoint=checkpoint, image_size=image_size) converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) # Convert the joint audio-text encoding model clap_config = ClapConfig.from_pretrained("laion/clap-htsat-unfused") clap_config.audio_config.update( { "patch_embeds_hidden_size": 128, "hidden_size": 1024, "depths": [2, 2, 12, 2], } ) # AudioLDM2 uses the same tokenizer and feature extractor as the original CLAP model clap_tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") clap_feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused") converted_clap_model = convert_open_clap_checkpoint(checkpoint) clap_model = ClapModel(clap_config) missing_keys, unexpected_keys = clap_model.load_state_dict(converted_clap_model, strict=False) # we expect not to have token_type_ids in our original state dict so let's ignore them missing_keys = list(set(missing_keys) - set(CLAP_EXPECTED_MISSING_KEYS)) if len(unexpected_keys) > 0: raise ValueError(f"Unexpected keys when loading CLAP model: {unexpected_keys}") if len(missing_keys) > 0: raise ValueError(f"Missing keys when loading CLAP model: {missing_keys}") # Convert the vocoder model vocoder_config = create_transformers_vocoder_config(original_config) vocoder_config = SpeechT5HifiGanConfig(**vocoder_config) converted_vocoder_checkpoint = convert_hifigan_checkpoint(checkpoint, vocoder_config) vocoder = SpeechT5HifiGan(vocoder_config) vocoder.load_state_dict(converted_vocoder_checkpoint) # Convert the Flan-T5 encoder model: AudioLDM2 uses the same configuration and tokenizer as the original Flan-T5 large model t5_config = T5Config.from_pretrained("google/flan-t5-large") converted_t5_checkpoint = extract_sub_model(checkpoint, key_prefix="cond_stage_models.1.model.") t5_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large") # hard-coded in the original implementation (i.e. not retrievable from the config) t5_tokenizer.model_max_length = 128 t5_model = T5EncoderModel(t5_config) t5_model.load_state_dict(converted_t5_checkpoint) # Convert the GPT2 encoder model: AudioLDM2 uses the same configuration as the original GPT2 base model gpt2_config = GPT2Config.from_pretrained("gpt2") gpt2_model = GPT2Model(gpt2_config) gpt2_model.config.max_new_tokens = original_config["model"]["params"]["cond_stage_config"][ "crossattn_audiomae_generated" ]["params"]["sequence_gen_length"] converted_gpt2_checkpoint = extract_sub_model(checkpoint, key_prefix="cond_stage_models.0.model.") gpt2_model.load_state_dict(converted_gpt2_checkpoint) # Convert the extra embedding / projection layers projection_model = AudioLDM2ProjectionModel(clap_config.projection_dim, t5_config.d_model, gpt2_config.n_embd) converted_projection_checkpoint = convert_projection_checkpoint(checkpoint) projection_model.load_state_dict(converted_projection_checkpoint) # Instantiate the diffusers pipeline pipe = AudioLDM2Pipeline( vae=vae, text_encoder=clap_model, text_encoder_2=t5_model, projection_model=projection_model, language_model=gpt2_model, tokenizer=clap_tokenizer, tokenizer_2=t5_tokenizer, feature_extractor=clap_feature_extractor, unet=unet, scheduler=scheduler, vocoder=vocoder, ) return pipe if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--cross_attention_dim", default=None, type=int, nargs="+", help="The dimension of the cross-attention layers. If `None`, the cross-attention dimension will be " "automatically inferred. Set to `768+1024` for the base model, or `768+1024+640` for the large model", ) parser.add_argument( "--transformer_layers_per_block", default=None, type=int, help="The number of transformer layers in each transformer block. If `None`, number of layers will be " "automatically inferred. Set to `1` for the base model, or `2` for the large model.", ) parser.add_argument( "--scheduler_type", default="ddim", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--image_size", default=1048, type=int, help="The image size that the model was trained on.", ) parser.add_argument( "--prediction_type", default=None, type=str, help=("The prediction type that the model was trained on."), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") args = parser.parse_args() pipe = load_pipeline_from_original_AudioLDM2_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, cross_attention_dim=args.cross_attention_dim, transformer_layers_per_block=args.transformer_layers_per_block, from_safetensors=args.from_safetensors, device=args.device, ) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
diffusers/scripts/convert_original_audioldm2_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_original_audioldm2_to_diffusers.py", "repo_id": "diffusers", "token_count": 21165 }
140
import random import torch from huggingface_hub import HfApi from diffusers import UNet2DModel api = HfApi() results = {} # fmt: off results["google_ddpm_cifar10_32"] = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) results["google_ddpm_ema_bedroom_256"] = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) results["CompVis_ldm_celebahq_256"] = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) results["google_ncsnpp_ffhq_1024"] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) results["google_ncsnpp_bedroom_256"] = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) results["google_ncsnpp_celebahq_256"] = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) results["google_ncsnpp_church_256"] = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) results["google_ncsnpp_ffhq_256"] = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) results["google_ddpm_cat_256"] = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) results["google_ddpm_celebahq_256"] = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) results["google_ddpm_ema_celebahq_256"] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) results["google_ddpm_church_256"] = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) results["google_ddpm_bedroom_256"] = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) results["google_ddpm_ema_church_256"] = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) results["google_ddpm_ema_cat_256"] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on models = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.id == "CompVis/ldm-celebahq-256": local_checkpoint = "/home/patrick/google_checkpoints/" + mod.id.split("/")[-1] print(f"Started running {mod.id}!!!") if mod.id.startswith("CompVis"): model = UNet2DModel.from_pretrained(local_checkpoint, subfolder="unet") else: model = UNet2DModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) noise = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) time_step = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): logits = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.id.split("/")).split("-"))], atol=1e-3 ) print(f"{mod.id} has passed successfully!!!")
diffusers/scripts/generate_logits.py/0
{ "file_path": "diffusers/scripts/generate_logits.py", "repo_id": "diffusers", "token_count": 3530 }
141
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union from ..utils import is_torch_available if is_torch_available(): from .adaptive_projected_guidance import AdaptiveProjectedGuidance from .auto_guidance import AutoGuidance from .classifier_free_guidance import ClassifierFreeGuidance from .classifier_free_zero_star_guidance import ClassifierFreeZeroStarGuidance from .frequency_decoupled_guidance import FrequencyDecoupledGuidance from .perturbed_attention_guidance import PerturbedAttentionGuidance from .skip_layer_guidance import SkipLayerGuidance from .smoothed_energy_guidance import SmoothedEnergyGuidance from .tangential_classifier_free_guidance import TangentialClassifierFreeGuidance GuiderType = Union[ AdaptiveProjectedGuidance, AutoGuidance, ClassifierFreeGuidance, ClassifierFreeZeroStarGuidance, FrequencyDecoupledGuidance, PerturbedAttentionGuidance, SkipLayerGuidance, SmoothedEnergyGuidance, TangentialClassifierFreeGuidance, ]
diffusers/src/diffusers/guiders/__init__.py/0
{ "file_path": "diffusers/src/diffusers/guiders/__init__.py", "repo_id": "diffusers", "token_count": 543 }
142
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import os from contextlib import contextmanager, nullcontext from dataclasses import dataclass from enum import Enum from typing import Dict, List, Optional, Set, Tuple, Union import safetensors.torch import torch from ..utils import get_logger, is_accelerate_available from ._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS from .hooks import HookRegistry, ModelHook if is_accelerate_available(): from accelerate.hooks import AlignDevicesHook, CpuOffload from accelerate.utils import send_to_device logger = get_logger(__name__) # pylint: disable=invalid-name # fmt: off _GROUP_OFFLOADING = "group_offloading" _LAYER_EXECUTION_TRACKER = "layer_execution_tracker" _LAZY_PREFETCH_GROUP_OFFLOADING = "lazy_prefetch_group_offloading" _GROUP_ID_LAZY_LEAF = "lazy_leafs" # fmt: on class GroupOffloadingType(str, Enum): BLOCK_LEVEL = "block_level" LEAF_LEVEL = "leaf_level" @dataclass class GroupOffloadingConfig: onload_device: torch.device offload_device: torch.device offload_type: GroupOffloadingType non_blocking: bool record_stream: bool low_cpu_mem_usage: bool num_blocks_per_group: Optional[int] = None offload_to_disk_path: Optional[str] = None stream: Optional[Union[torch.cuda.Stream, torch.Stream]] = None class ModuleGroup: def __init__( self, modules: List[torch.nn.Module], offload_device: torch.device, onload_device: torch.device, offload_leader: torch.nn.Module, onload_leader: Optional[torch.nn.Module] = None, parameters: Optional[List[torch.nn.Parameter]] = None, buffers: Optional[List[torch.Tensor]] = None, non_blocking: bool = False, stream: Union[torch.cuda.Stream, torch.Stream, None] = None, record_stream: Optional[bool] = False, low_cpu_mem_usage: bool = False, onload_self: bool = True, offload_to_disk_path: Optional[str] = None, group_id: Optional[int] = None, ) -> None: self.modules = modules self.offload_device = offload_device self.onload_device = onload_device self.offload_leader = offload_leader self.onload_leader = onload_leader self.parameters = parameters or [] self.buffers = buffers or [] self.non_blocking = non_blocking or stream is not None self.stream = stream self.record_stream = record_stream self.onload_self = onload_self self.low_cpu_mem_usage = low_cpu_mem_usage self.offload_to_disk_path = offload_to_disk_path self._is_offloaded_to_disk = False if self.offload_to_disk_path is not None: # Instead of `group_id or str(id(self))` we do this because `group_id` can be "" as well. self.group_id = group_id if group_id is not None else str(id(self)) short_hash = _compute_group_hash(self.group_id) self.safetensors_file_path = os.path.join(self.offload_to_disk_path, f"group_{short_hash}.safetensors") all_tensors = [] for module in self.modules: all_tensors.extend(list(module.parameters())) all_tensors.extend(list(module.buffers())) all_tensors.extend(self.parameters) all_tensors.extend(self.buffers) all_tensors = list(dict.fromkeys(all_tensors)) # Remove duplicates self.tensor_to_key = {tensor: f"tensor_{i}" for i, tensor in enumerate(all_tensors)} self.key_to_tensor = {v: k for k, v in self.tensor_to_key.items()} self.cpu_param_dict = {} else: self.cpu_param_dict = self._init_cpu_param_dict() self._torch_accelerator_module = ( getattr(torch, torch.accelerator.current_accelerator().type) if hasattr(torch, "accelerator") else torch.cuda ) def _init_cpu_param_dict(self): cpu_param_dict = {} if self.stream is None: return cpu_param_dict for module in self.modules: for param in module.parameters(): cpu_param_dict[param] = param.data.cpu() if self.low_cpu_mem_usage else param.data.cpu().pin_memory() for buffer in module.buffers(): cpu_param_dict[buffer] = ( buffer.data.cpu() if self.low_cpu_mem_usage else buffer.data.cpu().pin_memory() ) for param in self.parameters: cpu_param_dict[param] = param.data.cpu() if self.low_cpu_mem_usage else param.data.cpu().pin_memory() for buffer in self.buffers: cpu_param_dict[buffer] = buffer.data.cpu() if self.low_cpu_mem_usage else buffer.data.cpu().pin_memory() return cpu_param_dict @contextmanager def _pinned_memory_tensors(self): try: pinned_dict = { param: tensor.pin_memory() if not tensor.is_pinned() else tensor for param, tensor in self.cpu_param_dict.items() } yield pinned_dict finally: pinned_dict = None def _transfer_tensor_to_device(self, tensor, source_tensor): tensor.data = source_tensor.to(self.onload_device, non_blocking=self.non_blocking) if self.record_stream: tensor.data.record_stream(self._torch_accelerator_module.current_stream()) def _process_tensors_from_modules(self, pinned_memory=None): for group_module in self.modules: for param in group_module.parameters(): source = pinned_memory[param] if pinned_memory else param.data self._transfer_tensor_to_device(param, source) for buffer in group_module.buffers(): source = pinned_memory[buffer] if pinned_memory else buffer.data self._transfer_tensor_to_device(buffer, source) for param in self.parameters: source = pinned_memory[param] if pinned_memory else param.data self._transfer_tensor_to_device(param, source) for buffer in self.buffers: source = pinned_memory[buffer] if pinned_memory else buffer.data self._transfer_tensor_to_device(buffer, source) def _onload_from_disk(self): if self.stream is not None: # Wait for previous Host->Device transfer to complete self.stream.synchronize() context = nullcontext() if self.stream is None else self._torch_accelerator_module.stream(self.stream) current_stream = self._torch_accelerator_module.current_stream() if self.record_stream else None with context: # Load to CPU (if using streams) or directly to target device, pin, and async copy to device device = str(self.onload_device) if self.stream is None else "cpu" loaded_tensors = safetensors.torch.load_file(self.safetensors_file_path, device=device) if self.stream is not None: for key, tensor_obj in self.key_to_tensor.items(): pinned_tensor = loaded_tensors[key].pin_memory() tensor_obj.data = pinned_tensor.to(self.onload_device, non_blocking=self.non_blocking) if self.record_stream: tensor_obj.data.record_stream(current_stream) else: onload_device = ( self.onload_device.type if isinstance(self.onload_device, torch.device) else self.onload_device ) loaded_tensors = safetensors.torch.load_file(self.safetensors_file_path, device=onload_device) for key, tensor_obj in self.key_to_tensor.items(): tensor_obj.data = loaded_tensors[key] def _onload_from_memory(self): if self.stream is not None: # Wait for previous Host->Device transfer to complete self.stream.synchronize() context = nullcontext() if self.stream is None else self._torch_accelerator_module.stream(self.stream) with context: if self.stream is not None: with self._pinned_memory_tensors() as pinned_memory: self._process_tensors_from_modules(pinned_memory) else: self._process_tensors_from_modules(None) def _offload_to_disk(self): # TODO: we can potentially optimize this code path by checking if the _all_ the desired # safetensor files exist on the disk and if so, skip this step entirely, reducing IO # overhead. Currently, we just check if the given `safetensors_file_path` exists and if not # we perform a write. # Check if the file has been saved in this session or if it already exists on disk. if not self._is_offloaded_to_disk and not os.path.exists(self.safetensors_file_path): os.makedirs(os.path.dirname(self.safetensors_file_path), exist_ok=True) tensors_to_save = {key: tensor.data.to(self.offload_device) for tensor, key in self.tensor_to_key.items()} safetensors.torch.save_file(tensors_to_save, self.safetensors_file_path) # The group is now considered offloaded to disk for the rest of the session. self._is_offloaded_to_disk = True # We do this to free up the RAM which is still holding the up tensor data. for tensor_obj in self.tensor_to_key.keys(): tensor_obj.data = torch.empty_like(tensor_obj.data, device=self.offload_device) def _offload_to_memory(self): if self.stream is not None: if not self.record_stream: self._torch_accelerator_module.current_stream().synchronize() for group_module in self.modules: for param in group_module.parameters(): param.data = self.cpu_param_dict[param] for param in self.parameters: param.data = self.cpu_param_dict[param] for buffer in self.buffers: buffer.data = self.cpu_param_dict[buffer] else: for group_module in self.modules: group_module.to(self.offload_device, non_blocking=False) for param in self.parameters: param.data = param.data.to(self.offload_device, non_blocking=False) for buffer in self.buffers: buffer.data = buffer.data.to(self.offload_device, non_blocking=False) @torch.compiler.disable() def onload_(self): r"""Onloads the group of parameters to the onload_device.""" if self.offload_to_disk_path is not None: self._onload_from_disk() else: self._onload_from_memory() @torch.compiler.disable() def offload_(self): r"""Offloads the group of parameters to the offload_device.""" if self.offload_to_disk_path: self._offload_to_disk() else: self._offload_to_memory() class GroupOffloadingHook(ModelHook): r""" A hook that offloads groups of torch.nn.Module to the CPU for storage and onloads to accelerator device for computation. Each group has one "onload leader" module that is responsible for onloading, and an "offload leader" module that is responsible for offloading. If prefetching is enabled, the onload leader of the previous module group is responsible for onloading the current module group. """ _is_stateful = False def __init__(self, group: ModuleGroup, *, config: GroupOffloadingConfig) -> None: self.group = group self.next_group: Optional[ModuleGroup] = None self.config = config def initialize_hook(self, module: torch.nn.Module) -> torch.nn.Module: if self.group.offload_leader == module: self.group.offload_() return module def pre_forward(self, module: torch.nn.Module, *args, **kwargs): # If there wasn't an onload_leader assigned, we assume that the submodule that first called its forward # method is the onload_leader of the group. if self.group.onload_leader is None: self.group.onload_leader = module # If the current module is the onload_leader of the group, we onload the group if it is supposed # to onload itself. In the case of using prefetching with streams, we onload the next group if # it is not supposed to onload itself. if self.group.onload_leader == module: if self.group.onload_self: self.group.onload_() should_onload_next_group = self.next_group is not None and not self.next_group.onload_self if should_onload_next_group: self.next_group.onload_() should_synchronize = ( not self.group.onload_self and self.group.stream is not None and not should_onload_next_group ) if should_synchronize: # If this group didn't onload itself, it means it was asynchronously onloaded by the # previous group. We need to synchronize the side stream to ensure parameters # are completely loaded to proceed with forward pass. Without this, uninitialized # weights will be used in the computation, leading to incorrect results # Also, we should only do this synchronization if we don't already do it from the sync call in # self.next_group.onload_, hence the `not should_onload_next_group` check. self.group.stream.synchronize() args = send_to_device(args, self.group.onload_device, non_blocking=self.group.non_blocking) kwargs = send_to_device(kwargs, self.group.onload_device, non_blocking=self.group.non_blocking) return args, kwargs def post_forward(self, module: torch.nn.Module, output): if self.group.offload_leader == module: self.group.offload_() return output class LazyPrefetchGroupOffloadingHook(ModelHook): r""" A hook, used in conjunction with GroupOffloadingHook, that applies lazy prefetching to groups of torch.nn.Module. This hook is used to determine the order in which the layers are executed during the forward pass. Once the layer invocation order is known, assignments of the next_group attribute for prefetching can be made, which allows prefetching groups in the correct order. """ _is_stateful = False def __init__(self): self.execution_order: List[Tuple[str, torch.nn.Module]] = [] self._layer_execution_tracker_module_names = set() def initialize_hook(self, module): def make_execution_order_update_callback(current_name, current_submodule): def callback(): if not torch.compiler.is_compiling(): logger.debug(f"Adding {current_name} to the execution order") self.execution_order.append((current_name, current_submodule)) return callback # To every submodule that contains a group offloading hook (at this point, no prefetching is enabled for any # of the groups), we add a layer execution tracker hook that will be used to determine the order in which the # layers are executed during the forward pass. for name, submodule in module.named_modules(): if name == "" or not hasattr(submodule, "_diffusers_hook"): continue registry = HookRegistry.check_if_exists_or_initialize(submodule) group_offloading_hook = registry.get_hook(_GROUP_OFFLOADING) if group_offloading_hook is not None: # For the first forward pass, we have to load in a blocking manner group_offloading_hook.group.non_blocking = False layer_tracker_hook = LayerExecutionTrackerHook(make_execution_order_update_callback(name, submodule)) registry.register_hook(layer_tracker_hook, _LAYER_EXECUTION_TRACKER) self._layer_execution_tracker_module_names.add(name) return module def post_forward(self, module, output): # At this point, for the current modules' submodules, we know the execution order of the layers. We can now # remove the layer execution tracker hooks and apply prefetching by setting the next_group attribute for each # group offloading hook. num_executed = len(self.execution_order) execution_order_module_names = {name for name, _ in self.execution_order} # It may be possible that some layers were not executed during the forward pass. This can happen if the layer # is not used in the forward pass, or if the layer is not executed due to some other reason. In such cases, we # may not be able to apply prefetching in the correct order, which can lead to device-mismatch related errors # if the missing layers end up being executed in the future. if execution_order_module_names != self._layer_execution_tracker_module_names: unexecuted_layers = list(self._layer_execution_tracker_module_names - execution_order_module_names) if not torch.compiler.is_compiling(): logger.warning( "It seems like some layers were not executed during the forward pass. This may lead to problems when " "applying lazy prefetching with automatic tracing and lead to device-mismatch related errors. Please " "make sure that all layers are executed during the forward pass. The following layers were not executed:\n" f"{unexecuted_layers=}" ) # Remove the layer execution tracker hooks from the submodules base_module_registry = module._diffusers_hook registries = [submodule._diffusers_hook for _, submodule in self.execution_order] group_offloading_hooks = [registry.get_hook(_GROUP_OFFLOADING) for registry in registries] for i in range(num_executed): registries[i].remove_hook(_LAYER_EXECUTION_TRACKER, recurse=False) # Remove the current lazy prefetch group offloading hook so that it doesn't interfere with the next forward pass base_module_registry.remove_hook(_LAZY_PREFETCH_GROUP_OFFLOADING, recurse=False) # LazyPrefetchGroupOffloadingHook is only used with streams, so we know that non_blocking should be True. # We disable non_blocking for the first forward pass, but need to enable it for the subsequent passes to # see the benefits of prefetching. for hook in group_offloading_hooks: hook.group.non_blocking = True # Set required attributes for prefetching if num_executed > 0: base_module_group_offloading_hook = base_module_registry.get_hook(_GROUP_OFFLOADING) base_module_group_offloading_hook.next_group = group_offloading_hooks[0].group base_module_group_offloading_hook.next_group.onload_self = False for i in range(num_executed - 1): name1, _ = self.execution_order[i] name2, _ = self.execution_order[i + 1] if not torch.compiler.is_compiling(): logger.debug(f"Applying lazy prefetch group offloading from {name1} to {name2}") group_offloading_hooks[i].next_group = group_offloading_hooks[i + 1].group group_offloading_hooks[i].next_group.onload_self = False return output class LayerExecutionTrackerHook(ModelHook): r""" A hook that tracks the order in which the layers are executed during the forward pass by calling back to the LazyPrefetchGroupOffloadingHook to update the execution order. """ _is_stateful = False def __init__(self, execution_order_update_callback): self.execution_order_update_callback = execution_order_update_callback def pre_forward(self, module, *args, **kwargs): self.execution_order_update_callback() return args, kwargs def apply_group_offloading( module: torch.nn.Module, onload_device: Union[str, torch.device], offload_device: Union[str, torch.device] = torch.device("cpu"), offload_type: Union[str, GroupOffloadingType] = "block_level", num_blocks_per_group: Optional[int] = None, non_blocking: bool = False, use_stream: bool = False, record_stream: bool = False, low_cpu_mem_usage: bool = False, offload_to_disk_path: Optional[str] = None, ) -> None: r""" Applies group offloading to the internal layers of a torch.nn.Module. To understand what group offloading is, and where it is beneficial, we need to first provide some context on how other supported offloading methods work. Typically, offloading is done at two levels: - Module-level: In Diffusers, this can be enabled using the `ModelMixin::enable_model_cpu_offload()` method. It works by offloading each component of a pipeline to the CPU for storage, and onloading to the accelerator device when needed for computation. This method is more memory-efficient than keeping all components on the accelerator, but the memory requirements are still quite high. For this method to work, one needs memory equivalent to size of the model in runtime dtype + size of largest intermediate activation tensors to be able to complete the forward pass. - Leaf-level: In Diffusers, this can be enabled using the `ModelMixin::enable_sequential_cpu_offload()` method. It works by offloading the lowest leaf-level parameters of the computation graph to the CPU for storage, and onloading only the leafs to the accelerator device for computation. This uses the lowest amount of accelerator memory, but can be slower due to the excessive number of device synchronizations. Group offloading is a middle ground between the two methods. It works by offloading groups of internal layers, (either `torch.nn.ModuleList` or `torch.nn.Sequential`). This method uses lower memory than module-level offloading. It is also faster than leaf-level/sequential offloading, as the number of device synchronizations is reduced. Another supported feature (for CUDA devices with support for asynchronous data transfer streams) is the ability to overlap data transfer and computation to reduce the overall execution time compared to sequential offloading. This is enabled using layer prefetching with streams, i.e., the layer that is to be executed next starts onloading to the accelerator device while the current layer is being executed - this increases the memory requirements slightly. Note that this implementation also supports leaf-level offloading but can be made much faster when using streams. Args: module (`torch.nn.Module`): The module to which group offloading is applied. onload_device (`torch.device`): The device to which the group of modules are onloaded. offload_device (`torch.device`, defaults to `torch.device("cpu")`): The device to which the group of modules are offloaded. This should typically be the CPU. Default is CPU. offload_type (`str` or `GroupOffloadingType`, defaults to "block_level"): The type of offloading to be applied. Can be one of "block_level" or "leaf_level". Default is "block_level". offload_to_disk_path (`str`, *optional*, defaults to `None`): The path to the directory where parameters will be offloaded. Setting this option can be useful in limited RAM environment settings where a reasonable speed-memory trade-off is desired. num_blocks_per_group (`int`, *optional*): The number of blocks per group when using offload_type="block_level". This is required when using offload_type="block_level". non_blocking (`bool`, defaults to `False`): If True, offloading and onloading is done with non-blocking data transfer. use_stream (`bool`, defaults to `False`): If True, offloading and onloading is done asynchronously using a CUDA stream. This can be useful for overlapping computation and data transfer. record_stream (`bool`, defaults to `False`): When enabled with `use_stream`, it marks the current tensor as having been used by this stream. It is faster at the expense of slightly more memory usage. Refer to the [PyTorch official docs](https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html) more details. low_cpu_mem_usage (`bool`, defaults to `False`): If True, the CPU memory usage is minimized by pinning tensors on-the-fly instead of pre-pinning them. This option only matters when using streamed CPU offloading (i.e. `use_stream=True`). This can be useful when the CPU memory is a bottleneck but may counteract the benefits of using streams. Example: ```python >>> from diffusers import CogVideoXTransformer3DModel >>> from diffusers.hooks import apply_group_offloading >>> transformer = CogVideoXTransformer3DModel.from_pretrained( ... "THUDM/CogVideoX-5b", subfolder="transformer", torch_dtype=torch.bfloat16 ... ) >>> apply_group_offloading( ... transformer, ... onload_device=torch.device("cuda"), ... offload_device=torch.device("cpu"), ... offload_type="block_level", ... num_blocks_per_group=2, ... use_stream=True, ... ) ``` """ onload_device = torch.device(onload_device) if isinstance(onload_device, str) else onload_device offload_device = torch.device(offload_device) if isinstance(offload_device, str) else offload_device offload_type = GroupOffloadingType(offload_type) stream = None if use_stream: if torch.cuda.is_available(): stream = torch.cuda.Stream() elif hasattr(torch, "xpu") and torch.xpu.is_available(): stream = torch.Stream() else: raise ValueError("Using streams for data transfer requires a CUDA device, or an Intel XPU device.") if not use_stream and record_stream: raise ValueError("`record_stream` cannot be True when `use_stream=False`.") if offload_type == GroupOffloadingType.BLOCK_LEVEL and num_blocks_per_group is None: raise ValueError("`num_blocks_per_group` must be provided when using `offload_type='block_level'.") _raise_error_if_accelerate_model_or_sequential_hook_present(module) config = GroupOffloadingConfig( onload_device=onload_device, offload_device=offload_device, offload_type=offload_type, num_blocks_per_group=num_blocks_per_group, non_blocking=non_blocking, stream=stream, record_stream=record_stream, low_cpu_mem_usage=low_cpu_mem_usage, offload_to_disk_path=offload_to_disk_path, ) _apply_group_offloading(module, config) def _apply_group_offloading(module: torch.nn.Module, config: GroupOffloadingConfig) -> None: if config.offload_type == GroupOffloadingType.BLOCK_LEVEL: _apply_group_offloading_block_level(module, config) elif config.offload_type == GroupOffloadingType.LEAF_LEVEL: _apply_group_offloading_leaf_level(module, config) else: assert False def _apply_group_offloading_block_level(module: torch.nn.Module, config: GroupOffloadingConfig) -> None: r""" This function applies offloading to groups of torch.nn.ModuleList or torch.nn.Sequential blocks. In comparison to the "leaf_level" offloading, which is more fine-grained, this offloading is done at the top-level blocks. """ if config.stream is not None and config.num_blocks_per_group != 1: logger.warning( f"Using streams is only supported for num_blocks_per_group=1. Got {config.num_blocks_per_group=}. Setting it to 1." ) config.num_blocks_per_group = 1 # Create module groups for ModuleList and Sequential blocks modules_with_group_offloading = set() unmatched_modules = [] matched_module_groups = [] for name, submodule in module.named_children(): if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)): unmatched_modules.append((name, submodule)) modules_with_group_offloading.add(name) continue for i in range(0, len(submodule), config.num_blocks_per_group): current_modules = submodule[i : i + config.num_blocks_per_group] group_id = f"{name}_{i}_{i + len(current_modules) - 1}" group = ModuleGroup( modules=current_modules, offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=current_modules[-1], onload_leader=current_modules[0], non_blocking=config.non_blocking, stream=config.stream, record_stream=config.record_stream, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=group_id, ) matched_module_groups.append(group) for j in range(i, i + len(current_modules)): modules_with_group_offloading.add(f"{name}.{j}") # Apply group offloading hooks to the module groups for i, group in enumerate(matched_module_groups): for group_module in group.modules: _apply_group_offloading_hook(group_module, group, config=config) # Parameters and Buffers of the top-level module need to be offloaded/onloaded separately # when the forward pass of this module is called. This is because the top-level module is not # part of any group (as doing so would lead to no VRAM savings). parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) parameters = [param for _, param in parameters] buffers = [buffer for _, buffer in buffers] # Create a group for the unmatched submodules of the top-level module so that they are on the correct # device when the forward pass is called. unmatched_modules = [unmatched_module for _, unmatched_module in unmatched_modules] unmatched_group = ModuleGroup( modules=unmatched_modules, offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=module, onload_leader=module, parameters=parameters, buffers=buffers, non_blocking=False, stream=None, record_stream=False, onload_self=True, group_id=f"{module.__class__.__name__}_unmatched_group", ) if config.stream is None: _apply_group_offloading_hook(module, unmatched_group, config=config) else: _apply_lazy_group_offloading_hook(module, unmatched_group, config=config) def _apply_group_offloading_leaf_level(module: torch.nn.Module, config: GroupOffloadingConfig) -> None: r""" This function applies offloading to groups of leaf modules in a torch.nn.Module. This method has minimal memory requirements. However, it can be slower compared to other offloading methods due to the excessive number of device synchronizations. When using devices that support streams to overlap data transfer and computation, this method can reduce memory usage without any performance degradation. """ # Create module groups for leaf modules and apply group offloading hooks modules_with_group_offloading = set() for name, submodule in module.named_modules(): if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): continue group = ModuleGroup( modules=[submodule], offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=submodule, onload_leader=submodule, non_blocking=config.non_blocking, stream=config.stream, record_stream=config.record_stream, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=name, ) _apply_group_offloading_hook(submodule, group, config=config) modules_with_group_offloading.add(name) # Parameters and Buffers at all non-leaf levels need to be offloaded/onloaded separately when the forward pass # of the module is called module_dict = dict(module.named_modules()) parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) # Find closest module parent for each parameter and buffer, and attach group hooks parent_to_parameters = {} for name, param in parameters: parent_name = _find_parent_module_in_module_dict(name, module_dict) if parent_name in parent_to_parameters: parent_to_parameters[parent_name].append(param) else: parent_to_parameters[parent_name] = [param] parent_to_buffers = {} for name, buffer in buffers: parent_name = _find_parent_module_in_module_dict(name, module_dict) if parent_name in parent_to_buffers: parent_to_buffers[parent_name].append(buffer) else: parent_to_buffers[parent_name] = [buffer] parent_names = set(parent_to_parameters.keys()) | set(parent_to_buffers.keys()) for name in parent_names: parameters = parent_to_parameters.get(name, []) buffers = parent_to_buffers.get(name, []) parent_module = module_dict[name] group = ModuleGroup( modules=[], offload_device=config.offload_device, onload_device=config.onload_device, offload_leader=parent_module, onload_leader=parent_module, offload_to_disk_path=config.offload_to_disk_path, parameters=parameters, buffers=buffers, non_blocking=config.non_blocking, stream=config.stream, record_stream=config.record_stream, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=name, ) _apply_group_offloading_hook(parent_module, group, config=config) if config.stream is not None: # When using streams, we need to know the layer execution order for applying prefetching (to overlap data transfer # and computation). Since we don't know the order beforehand, we apply a lazy prefetching hook that will find the # execution order and apply prefetching in the correct order. unmatched_group = ModuleGroup( modules=[], offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=module, onload_leader=module, parameters=None, buffers=None, non_blocking=False, stream=None, record_stream=False, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=_GROUP_ID_LAZY_LEAF, ) _apply_lazy_group_offloading_hook(module, unmatched_group, config=config) def _apply_group_offloading_hook( module: torch.nn.Module, group: ModuleGroup, *, config: GroupOffloadingConfig, ) -> None: registry = HookRegistry.check_if_exists_or_initialize(module) # We may have already registered a group offloading hook if the module had a torch.nn.Parameter whose parent # is the current module. In such cases, we don't want to overwrite the existing group offloading hook. if registry.get_hook(_GROUP_OFFLOADING) is None: hook = GroupOffloadingHook(group, config=config) registry.register_hook(hook, _GROUP_OFFLOADING) def _apply_lazy_group_offloading_hook( module: torch.nn.Module, group: ModuleGroup, *, config: GroupOffloadingConfig, ) -> None: registry = HookRegistry.check_if_exists_or_initialize(module) # We may have already registered a group offloading hook if the module had a torch.nn.Parameter whose parent # is the current module. In such cases, we don't want to overwrite the existing group offloading hook. if registry.get_hook(_GROUP_OFFLOADING) is None: hook = GroupOffloadingHook(group, config=config) registry.register_hook(hook, _GROUP_OFFLOADING) lazy_prefetch_hook = LazyPrefetchGroupOffloadingHook() registry.register_hook(lazy_prefetch_hook, _LAZY_PREFETCH_GROUP_OFFLOADING) def _gather_parameters_with_no_group_offloading_parent( module: torch.nn.Module, modules_with_group_offloading: Set[str] ) -> List[torch.nn.Parameter]: parameters = [] for name, parameter in module.named_parameters(): has_parent_with_group_offloading = False atoms = name.split(".") while len(atoms) > 0: parent_name = ".".join(atoms) if parent_name in modules_with_group_offloading: has_parent_with_group_offloading = True break atoms.pop() if not has_parent_with_group_offloading: parameters.append((name, parameter)) return parameters def _gather_buffers_with_no_group_offloading_parent( module: torch.nn.Module, modules_with_group_offloading: Set[str] ) -> List[torch.Tensor]: buffers = [] for name, buffer in module.named_buffers(): has_parent_with_group_offloading = False atoms = name.split(".") while len(atoms) > 0: parent_name = ".".join(atoms) if parent_name in modules_with_group_offloading: has_parent_with_group_offloading = True break atoms.pop() if not has_parent_with_group_offloading: buffers.append((name, buffer)) return buffers def _find_parent_module_in_module_dict(name: str, module_dict: Dict[str, torch.nn.Module]) -> str: atoms = name.split(".") while len(atoms) > 0: parent_name = ".".join(atoms) if parent_name in module_dict: return parent_name atoms.pop() return "" def _raise_error_if_accelerate_model_or_sequential_hook_present(module: torch.nn.Module) -> None: if not is_accelerate_available(): return for name, submodule in module.named_modules(): if not hasattr(submodule, "_hf_hook"): continue if isinstance(submodule._hf_hook, (AlignDevicesHook, CpuOffload)): raise ValueError( f"Cannot apply group offloading to a module that is already applying an alternative " f"offloading strategy from Accelerate. If you want to apply group offloading, please " f"disable the existing offloading strategy first. Offending module: {name} ({type(submodule)})" ) def _get_top_level_group_offload_hook(module: torch.nn.Module) -> Optional[GroupOffloadingHook]: for submodule in module.modules(): if hasattr(submodule, "_diffusers_hook"): group_offloading_hook = submodule._diffusers_hook.get_hook(_GROUP_OFFLOADING) if group_offloading_hook is not None: return group_offloading_hook return None def _is_group_offload_enabled(module: torch.nn.Module) -> bool: top_level_group_offload_hook = _get_top_level_group_offload_hook(module) return top_level_group_offload_hook is not None def _get_group_onload_device(module: torch.nn.Module) -> torch.device: top_level_group_offload_hook = _get_top_level_group_offload_hook(module) if top_level_group_offload_hook is not None: return top_level_group_offload_hook.config.onload_device raise ValueError("Group offloading is not enabled for the provided module.") def _compute_group_hash(group_id): hashed_id = hashlib.sha256(group_id.encode("utf-8")).hexdigest() # first 16 characters for a reasonably short but unique name return hashed_id[:16] def _maybe_remove_and_reapply_group_offloading(module: torch.nn.Module) -> None: r""" Removes the group offloading hook from the module and re-applies it. This is useful when the module has been modified in-place and the group offloading hook references-to-tensors needs to be updated. The in-place modification can happen in a number of ways, for example, fusing QKV or unloading/loading LoRAs on-the-fly. In this implementation, we make an assumption that group offloading has only been applied at the top-level module, and therefore all submodules have the same onload and offload devices. If this assumption is not true, say in the case where user has applied group offloading at multiple levels, this function will not work as expected. There is some performance penalty associated with doing this when non-default streams are used, because we need to retrace the execution order of the layers with `LazyPrefetchGroupOffloadingHook`. """ top_level_group_offload_hook = _get_top_level_group_offload_hook(module) if top_level_group_offload_hook is None: return registry = HookRegistry.check_if_exists_or_initialize(module) registry.remove_hook(_GROUP_OFFLOADING, recurse=True) registry.remove_hook(_LAYER_EXECUTION_TRACKER, recurse=True) registry.remove_hook(_LAZY_PREFETCH_GROUP_OFFLOADING, recurse=True) _apply_group_offloading(module, top_level_group_offload_hook.config)
diffusers/src/diffusers/hooks/group_offloading.py/0
{ "file_path": "diffusers/src/diffusers/hooks/group_offloading.py", "repo_id": "diffusers", "token_count": 16728 }
143
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the Stable Diffusion checkpoints.""" import copy import os import re from contextlib import nullcontext from io import BytesIO from urllib.parse import urlparse import requests import torch import yaml from ..models.modeling_utils import load_state_dict from ..schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EDMDPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ..utils import ( SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, deprecate, is_accelerate_available, is_transformers_available, logging, ) from ..utils.constants import DIFFUSERS_REQUEST_TIMEOUT from ..utils.hub_utils import _get_model_file from ..utils.torch_utils import empty_device_cache if is_transformers_available(): from transformers import AutoImageProcessor if is_accelerate_available(): from accelerate import init_empty_weights from ..models.model_loading_utils import load_model_dict_into_meta logger = logging.get_logger(__name__) # pylint: disable=invalid-name CHECKPOINT_KEY_NAMES = { "v1": "model.diffusion_model.output_blocks.11.0.skip_connection.weight", "v2": "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", "xl_base": "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias", "xl_refiner": "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias", "upscale": "model.diffusion_model.input_blocks.10.0.skip_connection.bias", "controlnet": [ "control_model.time_embed.0.weight", "controlnet_cond_embedding.conv_in.weight", ], # TODO: find non-Diffusers keys for controlnet_xl "controlnet_xl": "add_embedding.linear_1.weight", "controlnet_xl_large": "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight", "controlnet_xl_mid": "down_blocks.1.attentions.0.norm.weight", "playground-v2-5": "edm_mean", "inpainting": "model.diffusion_model.input_blocks.0.0.weight", "clip": "cond_stage_model.transformer.text_model.embeddings.position_embedding.weight", "clip_sdxl": "conditioner.embedders.0.transformer.text_model.embeddings.position_embedding.weight", "clip_sd3": "text_encoders.clip_l.transformer.text_model.embeddings.position_embedding.weight", "open_clip": "cond_stage_model.model.token_embedding.weight", "open_clip_sdxl": "conditioner.embedders.1.model.positional_embedding", "open_clip_sdxl_refiner": "conditioner.embedders.0.model.text_projection", "open_clip_sd3": "text_encoders.clip_g.transformer.text_model.embeddings.position_embedding.weight", "stable_cascade_stage_b": "down_blocks.1.0.channelwise.0.weight", "stable_cascade_stage_c": "clip_txt_mapper.weight", "sd3": [ "joint_blocks.0.context_block.adaLN_modulation.1.bias", "model.diffusion_model.joint_blocks.0.context_block.adaLN_modulation.1.bias", ], "sd35_large": [ "joint_blocks.37.x_block.mlp.fc1.weight", "model.diffusion_model.joint_blocks.37.x_block.mlp.fc1.weight", ], "animatediff": "down_blocks.0.motion_modules.0.temporal_transformer.transformer_blocks.0.attention_blocks.0.pos_encoder.pe", "animatediff_v2": "mid_block.motion_modules.0.temporal_transformer.norm.bias", "animatediff_sdxl_beta": "up_blocks.2.motion_modules.0.temporal_transformer.norm.weight", "animatediff_scribble": "controlnet_cond_embedding.conv_in.weight", "animatediff_rgb": "controlnet_cond_embedding.weight", "auraflow": [ "double_layers.0.attn.w2q.weight", "double_layers.0.attn.w1q.weight", "cond_seq_linear.weight", "t_embedder.mlp.0.weight", ], "flux": [ "double_blocks.0.img_attn.norm.key_norm.scale", "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale", ], "ltx-video": [ "model.diffusion_model.patchify_proj.weight", "model.diffusion_model.transformer_blocks.27.scale_shift_table", "patchify_proj.weight", "transformer_blocks.27.scale_shift_table", "vae.per_channel_statistics.mean-of-means", ], "autoencoder-dc": "decoder.stages.1.op_list.0.main.conv.conv.bias", "autoencoder-dc-sana": "encoder.project_in.conv.bias", "mochi-1-preview": ["model.diffusion_model.blocks.0.attn.qkv_x.weight", "blocks.0.attn.qkv_x.weight"], "hunyuan-video": "txt_in.individual_token_refiner.blocks.0.adaLN_modulation.1.bias", "instruct-pix2pix": "model.diffusion_model.input_blocks.0.0.weight", "lumina2": ["model.diffusion_model.cap_embedder.0.weight", "cap_embedder.0.weight"], "sana": [ "blocks.0.cross_attn.q_linear.weight", "blocks.0.cross_attn.q_linear.bias", "blocks.0.cross_attn.kv_linear.weight", "blocks.0.cross_attn.kv_linear.bias", ], "wan": ["model.diffusion_model.head.modulation", "head.modulation"], "wan_vae": "decoder.middle.0.residual.0.gamma", "wan_vace": "vace_blocks.0.after_proj.bias", "hidream": "double_stream_blocks.0.block.adaLN_modulation.1.bias", "cosmos-1.0": [ "net.x_embedder.proj.1.weight", "net.blocks.block1.blocks.0.block.attn.to_q.0.weight", "net.extra_pos_embedder.pos_emb_h", ], "cosmos-2.0": [ "net.x_embedder.proj.1.weight", "net.blocks.0.self_attn.q_proj.weight", "net.pos_embedder.dim_spatial_range", ], } DIFFUSERS_DEFAULT_PIPELINE_PATHS = { "xl_base": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0"}, "xl_refiner": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-refiner-1.0"}, "xl_inpaint": {"pretrained_model_name_or_path": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1"}, "playground-v2-5": {"pretrained_model_name_or_path": "playgroundai/playground-v2.5-1024px-aesthetic"}, "upscale": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-x4-upscaler"}, "inpainting": {"pretrained_model_name_or_path": "stable-diffusion-v1-5/stable-diffusion-inpainting"}, "inpainting_v2": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-2-inpainting"}, "controlnet": {"pretrained_model_name_or_path": "lllyasviel/control_v11p_sd15_canny"}, "controlnet_xl_large": {"pretrained_model_name_or_path": "diffusers/controlnet-canny-sdxl-1.0"}, "controlnet_xl_mid": {"pretrained_model_name_or_path": "diffusers/controlnet-canny-sdxl-1.0-mid"}, "controlnet_xl_small": {"pretrained_model_name_or_path": "diffusers/controlnet-canny-sdxl-1.0-small"}, "v2": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-2-1"}, "v1": {"pretrained_model_name_or_path": "stable-diffusion-v1-5/stable-diffusion-v1-5"}, "stable_cascade_stage_b": {"pretrained_model_name_or_path": "stabilityai/stable-cascade", "subfolder": "decoder"}, "stable_cascade_stage_b_lite": { "pretrained_model_name_or_path": "stabilityai/stable-cascade", "subfolder": "decoder_lite", }, "stable_cascade_stage_c": { "pretrained_model_name_or_path": "stabilityai/stable-cascade-prior", "subfolder": "prior", }, "stable_cascade_stage_c_lite": { "pretrained_model_name_or_path": "stabilityai/stable-cascade-prior", "subfolder": "prior_lite", }, "sd3": { "pretrained_model_name_or_path": "stabilityai/stable-diffusion-3-medium-diffusers", }, "sd35_large": { "pretrained_model_name_or_path": "stabilityai/stable-diffusion-3.5-large", }, "sd35_medium": { "pretrained_model_name_or_path": "stabilityai/stable-diffusion-3.5-medium", }, "animatediff_v1": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-v1-5"}, "animatediff_v2": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-v1-5-2"}, "animatediff_v3": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-v1-5-3"}, "animatediff_sdxl_beta": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-sdxl-beta"}, "animatediff_scribble": {"pretrained_model_name_or_path": "guoyww/animatediff-sparsectrl-scribble"}, "animatediff_rgb": {"pretrained_model_name_or_path": "guoyww/animatediff-sparsectrl-rgb"}, "auraflow": {"pretrained_model_name_or_path": "fal/AuraFlow-v0.3"}, "flux-dev": {"pretrained_model_name_or_path": "black-forest-labs/FLUX.1-dev"}, "flux-fill": {"pretrained_model_name_or_path": "black-forest-labs/FLUX.1-Fill-dev"}, "flux-depth": {"pretrained_model_name_or_path": "black-forest-labs/FLUX.1-Depth-dev"}, "flux-schnell": {"pretrained_model_name_or_path": "black-forest-labs/FLUX.1-schnell"}, "ltx-video": {"pretrained_model_name_or_path": "diffusers/LTX-Video-0.9.0"}, "ltx-video-0.9.1": {"pretrained_model_name_or_path": "diffusers/LTX-Video-0.9.1"}, "ltx-video-0.9.5": {"pretrained_model_name_or_path": "Lightricks/LTX-Video-0.9.5"}, "ltx-video-0.9.7": {"pretrained_model_name_or_path": "Lightricks/LTX-Video-0.9.7-dev"}, "autoencoder-dc-f128c512": {"pretrained_model_name_or_path": "mit-han-lab/dc-ae-f128c512-mix-1.0-diffusers"}, "autoencoder-dc-f64c128": {"pretrained_model_name_or_path": "mit-han-lab/dc-ae-f64c128-mix-1.0-diffusers"}, "autoencoder-dc-f32c32": {"pretrained_model_name_or_path": "mit-han-lab/dc-ae-f32c32-mix-1.0-diffusers"}, "autoencoder-dc-f32c32-sana": {"pretrained_model_name_or_path": "mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers"}, "mochi-1-preview": {"pretrained_model_name_or_path": "genmo/mochi-1-preview"}, "hunyuan-video": {"pretrained_model_name_or_path": "hunyuanvideo-community/HunyuanVideo"}, "instruct-pix2pix": {"pretrained_model_name_or_path": "timbrooks/instruct-pix2pix"}, "lumina2": {"pretrained_model_name_or_path": "Alpha-VLLM/Lumina-Image-2.0"}, "sana": {"pretrained_model_name_or_path": "Efficient-Large-Model/Sana_1600M_1024px_diffusers"}, "wan-t2v-1.3B": {"pretrained_model_name_or_path": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"}, "wan-t2v-14B": {"pretrained_model_name_or_path": "Wan-AI/Wan2.1-T2V-14B-Diffusers"}, "wan-i2v-14B": {"pretrained_model_name_or_path": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"}, "wan-vace-1.3B": {"pretrained_model_name_or_path": "Wan-AI/Wan2.1-VACE-1.3B-diffusers"}, "wan-vace-14B": {"pretrained_model_name_or_path": "Wan-AI/Wan2.1-VACE-14B-diffusers"}, "hidream": {"pretrained_model_name_or_path": "HiDream-ai/HiDream-I1-Dev"}, "cosmos-1.0-t2w-7B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-7B-Text2World"}, "cosmos-1.0-t2w-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-14B-Text2World"}, "cosmos-1.0-v2w-7B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-7B-Video2World"}, "cosmos-1.0-v2w-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-14B-Video2World"}, "cosmos-2.0-t2i-2B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-2B-Text2Image"}, "cosmos-2.0-t2i-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-14B-Text2Image"}, "cosmos-2.0-v2w-2B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-2B-Video2World"}, "cosmos-2.0-v2w-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-14B-Video2World"}, } # Use to configure model sample size when original config is provided DIFFUSERS_TO_LDM_DEFAULT_IMAGE_SIZE_MAP = { "xl_base": 1024, "xl_refiner": 1024, "xl_inpaint": 1024, "playground-v2-5": 1024, "upscale": 512, "inpainting": 512, "inpainting_v2": 512, "controlnet": 512, "instruct-pix2pix": 512, "v2": 768, "v1": 512, } DIFFUSERS_TO_LDM_MAPPING = { "unet": { "layers": { "time_embedding.linear_1.weight": "time_embed.0.weight", "time_embedding.linear_1.bias": "time_embed.0.bias", "time_embedding.linear_2.weight": "time_embed.2.weight", "time_embedding.linear_2.bias": "time_embed.2.bias", "conv_in.weight": "input_blocks.0.0.weight", "conv_in.bias": "input_blocks.0.0.bias", "conv_norm_out.weight": "out.0.weight", "conv_norm_out.bias": "out.0.bias", "conv_out.weight": "out.2.weight", "conv_out.bias": "out.2.bias", }, "class_embed_type": { "class_embedding.linear_1.weight": "label_emb.0.0.weight", "class_embedding.linear_1.bias": "label_emb.0.0.bias", "class_embedding.linear_2.weight": "label_emb.0.2.weight", "class_embedding.linear_2.bias": "label_emb.0.2.bias", }, "addition_embed_type": { "add_embedding.linear_1.weight": "label_emb.0.0.weight", "add_embedding.linear_1.bias": "label_emb.0.0.bias", "add_embedding.linear_2.weight": "label_emb.0.2.weight", "add_embedding.linear_2.bias": "label_emb.0.2.bias", }, }, "controlnet": { "layers": { "time_embedding.linear_1.weight": "time_embed.0.weight", "time_embedding.linear_1.bias": "time_embed.0.bias", "time_embedding.linear_2.weight": "time_embed.2.weight", "time_embedding.linear_2.bias": "time_embed.2.bias", "conv_in.weight": "input_blocks.0.0.weight", "conv_in.bias": "input_blocks.0.0.bias", "controlnet_cond_embedding.conv_in.weight": "input_hint_block.0.weight", "controlnet_cond_embedding.conv_in.bias": "input_hint_block.0.bias", "controlnet_cond_embedding.conv_out.weight": "input_hint_block.14.weight", "controlnet_cond_embedding.conv_out.bias": "input_hint_block.14.bias", }, "class_embed_type": { "class_embedding.linear_1.weight": "label_emb.0.0.weight", "class_embedding.linear_1.bias": "label_emb.0.0.bias", "class_embedding.linear_2.weight": "label_emb.0.2.weight", "class_embedding.linear_2.bias": "label_emb.0.2.bias", }, "addition_embed_type": { "add_embedding.linear_1.weight": "label_emb.0.0.weight", "add_embedding.linear_1.bias": "label_emb.0.0.bias", "add_embedding.linear_2.weight": "label_emb.0.2.weight", "add_embedding.linear_2.bias": "label_emb.0.2.bias", }, }, "vae": { "encoder.conv_in.weight": "encoder.conv_in.weight", "encoder.conv_in.bias": "encoder.conv_in.bias", "encoder.conv_out.weight": "encoder.conv_out.weight", "encoder.conv_out.bias": "encoder.conv_out.bias", "encoder.conv_norm_out.weight": "encoder.norm_out.weight", "encoder.conv_norm_out.bias": "encoder.norm_out.bias", "decoder.conv_in.weight": "decoder.conv_in.weight", "decoder.conv_in.bias": "decoder.conv_in.bias", "decoder.conv_out.weight": "decoder.conv_out.weight", "decoder.conv_out.bias": "decoder.conv_out.bias", "decoder.conv_norm_out.weight": "decoder.norm_out.weight", "decoder.conv_norm_out.bias": "decoder.norm_out.bias", "quant_conv.weight": "quant_conv.weight", "quant_conv.bias": "quant_conv.bias", "post_quant_conv.weight": "post_quant_conv.weight", "post_quant_conv.bias": "post_quant_conv.bias", }, "openclip": { "layers": { "text_model.embeddings.position_embedding.weight": "positional_embedding", "text_model.embeddings.token_embedding.weight": "token_embedding.weight", "text_model.final_layer_norm.weight": "ln_final.weight", "text_model.final_layer_norm.bias": "ln_final.bias", "text_projection.weight": "text_projection", }, "transformer": { "text_model.encoder.layers.": "resblocks.", "layer_norm1": "ln_1", "layer_norm2": "ln_2", ".fc1.": ".c_fc.", ".fc2.": ".c_proj.", ".self_attn": ".attn", "transformer.text_model.final_layer_norm.": "ln_final.", "transformer.text_model.embeddings.token_embedding.weight": "token_embedding.weight", "transformer.text_model.embeddings.position_embedding.weight": "positional_embedding", }, }, } SD_2_TEXT_ENCODER_KEYS_TO_IGNORE = [ "cond_stage_model.model.transformer.resblocks.23.attn.in_proj_bias", "cond_stage_model.model.transformer.resblocks.23.attn.in_proj_weight", "cond_stage_model.model.transformer.resblocks.23.attn.out_proj.bias", "cond_stage_model.model.transformer.resblocks.23.attn.out_proj.weight", "cond_stage_model.model.transformer.resblocks.23.ln_1.bias", "cond_stage_model.model.transformer.resblocks.23.ln_1.weight", "cond_stage_model.model.transformer.resblocks.23.ln_2.bias", "cond_stage_model.model.transformer.resblocks.23.ln_2.weight", "cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.bias", "cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.weight", "cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.bias", "cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.weight", "cond_stage_model.model.text_projection", ] # To support legacy scheduler_type argument SCHEDULER_DEFAULT_CONFIG = { "beta_schedule": "scaled_linear", "beta_start": 0.00085, "beta_end": 0.012, "interpolation_type": "linear", "num_train_timesteps": 1000, "prediction_type": "epsilon", "sample_max_value": 1.0, "set_alpha_to_one": False, "skip_prk_steps": True, "steps_offset": 1, "timestep_spacing": "leading", } LDM_VAE_KEYS = ["first_stage_model.", "vae."] LDM_VAE_DEFAULT_SCALING_FACTOR = 0.18215 PLAYGROUND_VAE_SCALING_FACTOR = 0.5 LDM_UNET_KEY = "model.diffusion_model." LDM_CONTROLNET_KEY = "control_model." LDM_CLIP_PREFIX_TO_REMOVE = [ "cond_stage_model.transformer.", "conditioner.embedders.0.transformer.", ] LDM_OPEN_CLIP_TEXT_PROJECTION_DIM = 1024 SCHEDULER_LEGACY_KWARGS = ["prediction_type", "scheduler_type"] VALID_URL_PREFIXES = ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"] class SingleFileComponentError(Exception): def __init__(self, message=None): self.message = message super().__init__(self.message) def is_valid_url(url): result = urlparse(url) if result.scheme and result.netloc: return True return False def _extract_repo_id_and_weights_name(pretrained_model_name_or_path): if not is_valid_url(pretrained_model_name_or_path): raise ValueError("Invalid `pretrained_model_name_or_path` provided. Please set it to a valid URL.") pattern = r"([^/]+)/([^/]+)/(?:blob/main/)?(.+)" weights_name = None repo_id = (None,) for prefix in VALID_URL_PREFIXES: pretrained_model_name_or_path = pretrained_model_name_or_path.replace(prefix, "") match = re.match(pattern, pretrained_model_name_or_path) if not match: logger.warning("Unable to identify the repo_id and weights_name from the provided URL.") return repo_id, weights_name repo_id = f"{match.group(1)}/{match.group(2)}" weights_name = match.group(3) return repo_id, weights_name def _is_model_weights_in_cached_folder(cached_folder, name): pretrained_model_name_or_path = os.path.join(cached_folder, name) weights_exist = False for weights_name in [WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME]: if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): weights_exist = True return weights_exist def _is_legacy_scheduler_kwargs(kwargs): return any(k in SCHEDULER_LEGACY_KWARGS for k in kwargs.keys()) def load_single_file_checkpoint( pretrained_model_link_or_path, force_download=False, proxies=None, token=None, cache_dir=None, local_files_only=None, revision=None, disable_mmap=False, user_agent=None, ): if user_agent is None: user_agent = {"file_type": "single_file", "framework": "pytorch"} if os.path.isfile(pretrained_model_link_or_path): pretrained_model_link_or_path = pretrained_model_link_or_path else: repo_id, weights_name = _extract_repo_id_and_weights_name(pretrained_model_link_or_path) pretrained_model_link_or_path = _get_model_file( repo_id, weights_name=weights_name, force_download=force_download, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, user_agent=user_agent, ) checkpoint = load_state_dict(pretrained_model_link_or_path, disable_mmap=disable_mmap) # some checkpoints contain the model state dict under a "state_dict" key while "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] return checkpoint def fetch_original_config(original_config_file, local_files_only=False): if os.path.isfile(original_config_file): with open(original_config_file, "r") as fp: original_config_file = fp.read() elif is_valid_url(original_config_file): if local_files_only: raise ValueError( "`local_files_only` is set to True, but a URL was provided as `original_config_file`. " "Please provide a valid local file path." ) original_config_file = BytesIO(requests.get(original_config_file, timeout=DIFFUSERS_REQUEST_TIMEOUT).content) else: raise ValueError("Invalid `original_config_file` provided. Please set it to a valid file path or URL.") original_config = yaml.safe_load(original_config_file) return original_config def is_clip_model(checkpoint): if CHECKPOINT_KEY_NAMES["clip"] in checkpoint: return True return False def is_clip_sdxl_model(checkpoint): if CHECKPOINT_KEY_NAMES["clip_sdxl"] in checkpoint: return True return False def is_clip_sd3_model(checkpoint): if CHECKPOINT_KEY_NAMES["clip_sd3"] in checkpoint: return True return False def is_open_clip_model(checkpoint): if CHECKPOINT_KEY_NAMES["open_clip"] in checkpoint: return True return False def is_open_clip_sdxl_model(checkpoint): if CHECKPOINT_KEY_NAMES["open_clip_sdxl"] in checkpoint: return True return False def is_open_clip_sd3_model(checkpoint): if CHECKPOINT_KEY_NAMES["open_clip_sd3"] in checkpoint: return True return False def is_open_clip_sdxl_refiner_model(checkpoint): if CHECKPOINT_KEY_NAMES["open_clip_sdxl_refiner"] in checkpoint: return True return False def is_clip_model_in_single_file(class_obj, checkpoint): is_clip_in_checkpoint = any( [ is_clip_model(checkpoint), is_clip_sd3_model(checkpoint), is_open_clip_model(checkpoint), is_open_clip_sdxl_model(checkpoint), is_open_clip_sdxl_refiner_model(checkpoint), is_open_clip_sd3_model(checkpoint), ] ) if ( class_obj.__name__ == "CLIPTextModel" or class_obj.__name__ == "CLIPTextModelWithProjection" ) and is_clip_in_checkpoint: return True return False def infer_diffusers_model_type(checkpoint): if ( CHECKPOINT_KEY_NAMES["inpainting"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["inpainting"]].shape[1] == 9 ): if CHECKPOINT_KEY_NAMES["v2"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["v2"]].shape[-1] == 1024: model_type = "inpainting_v2" elif CHECKPOINT_KEY_NAMES["xl_base"] in checkpoint: model_type = "xl_inpaint" else: model_type = "inpainting" elif CHECKPOINT_KEY_NAMES["v2"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["v2"]].shape[-1] == 1024: model_type = "v2" elif CHECKPOINT_KEY_NAMES["playground-v2-5"] in checkpoint: model_type = "playground-v2-5" elif CHECKPOINT_KEY_NAMES["xl_base"] in checkpoint: model_type = "xl_base" elif CHECKPOINT_KEY_NAMES["xl_refiner"] in checkpoint: model_type = "xl_refiner" elif CHECKPOINT_KEY_NAMES["upscale"] in checkpoint: model_type = "upscale" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["controlnet"]): if CHECKPOINT_KEY_NAMES["controlnet_xl"] in checkpoint: if CHECKPOINT_KEY_NAMES["controlnet_xl_large"] in checkpoint: model_type = "controlnet_xl_large" elif CHECKPOINT_KEY_NAMES["controlnet_xl_mid"] in checkpoint: model_type = "controlnet_xl_mid" else: model_type = "controlnet_xl_small" else: model_type = "controlnet" elif ( CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"]].shape[0] == 1536 ): model_type = "stable_cascade_stage_c_lite" elif ( CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"]].shape[0] == 2048 ): model_type = "stable_cascade_stage_c" elif ( CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"]].shape[-1] == 576 ): model_type = "stable_cascade_stage_b_lite" elif ( CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"]].shape[-1] == 640 ): model_type = "stable_cascade_stage_b" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["sd3"]) and any( checkpoint[key].shape[-1] == 9216 if key in checkpoint else False for key in CHECKPOINT_KEY_NAMES["sd3"] ): if "model.diffusion_model.pos_embed" in checkpoint: key = "model.diffusion_model.pos_embed" else: key = "pos_embed" if checkpoint[key].shape[1] == 36864: model_type = "sd3" elif checkpoint[key].shape[1] == 147456: model_type = "sd35_medium" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["sd35_large"]): model_type = "sd35_large" elif CHECKPOINT_KEY_NAMES["animatediff"] in checkpoint: if CHECKPOINT_KEY_NAMES["animatediff_scribble"] in checkpoint: model_type = "animatediff_scribble" elif CHECKPOINT_KEY_NAMES["animatediff_rgb"] in checkpoint: model_type = "animatediff_rgb" elif CHECKPOINT_KEY_NAMES["animatediff_v2"] in checkpoint: model_type = "animatediff_v2" elif checkpoint[CHECKPOINT_KEY_NAMES["animatediff_sdxl_beta"]].shape[-1] == 320: model_type = "animatediff_sdxl_beta" elif checkpoint[CHECKPOINT_KEY_NAMES["animatediff"]].shape[1] == 24: model_type = "animatediff_v1" else: model_type = "animatediff_v3" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["flux"]): if any( g in checkpoint for g in ["guidance_in.in_layer.bias", "model.diffusion_model.guidance_in.in_layer.bias"] ): if "model.diffusion_model.img_in.weight" in checkpoint: key = "model.diffusion_model.img_in.weight" else: key = "img_in.weight" if checkpoint[key].shape[1] == 384: model_type = "flux-fill" elif checkpoint[key].shape[1] == 128: model_type = "flux-depth" else: model_type = "flux-dev" else: model_type = "flux-schnell" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["ltx-video"]): has_vae = "vae.encoder.conv_in.conv.bias" in checkpoint if any(key.endswith("transformer_blocks.47.scale_shift_table") for key in checkpoint): model_type = "ltx-video-0.9.7" elif has_vae and checkpoint["vae.encoder.conv_out.conv.weight"].shape[1] == 2048: model_type = "ltx-video-0.9.5" elif "vae.decoder.last_time_embedder.timestep_embedder.linear_1.weight" in checkpoint: model_type = "ltx-video-0.9.1" else: model_type = "ltx-video" elif CHECKPOINT_KEY_NAMES["autoencoder-dc"] in checkpoint: encoder_key = "encoder.project_in.conv.conv.bias" decoder_key = "decoder.project_in.main.conv.weight" if CHECKPOINT_KEY_NAMES["autoencoder-dc-sana"] in checkpoint: model_type = "autoencoder-dc-f32c32-sana" elif checkpoint[encoder_key].shape[-1] == 64 and checkpoint[decoder_key].shape[1] == 32: model_type = "autoencoder-dc-f32c32" elif checkpoint[encoder_key].shape[-1] == 64 and checkpoint[decoder_key].shape[1] == 128: model_type = "autoencoder-dc-f64c128" else: model_type = "autoencoder-dc-f128c512" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["mochi-1-preview"]): model_type = "mochi-1-preview" elif CHECKPOINT_KEY_NAMES["hunyuan-video"] in checkpoint: model_type = "hunyuan-video" elif all(key in checkpoint for key in CHECKPOINT_KEY_NAMES["auraflow"]): model_type = "auraflow" elif ( CHECKPOINT_KEY_NAMES["instruct-pix2pix"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["instruct-pix2pix"]].shape[1] == 8 ): model_type = "instruct-pix2pix" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["lumina2"]): model_type = "lumina2" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["sana"]): model_type = "sana" elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["wan"]): if "model.diffusion_model.patch_embedding.weight" in checkpoint: target_key = "model.diffusion_model.patch_embedding.weight" else: target_key = "patch_embedding.weight" if CHECKPOINT_KEY_NAMES["wan_vace"] in checkpoint: if checkpoint[target_key].shape[0] == 1536: model_type = "wan-vace-1.3B" elif checkpoint[target_key].shape[0] == 5120: model_type = "wan-vace-14B" elif checkpoint[target_key].shape[0] == 1536: model_type = "wan-t2v-1.3B" elif checkpoint[target_key].shape[0] == 5120 and checkpoint[target_key].shape[1] == 16: model_type = "wan-t2v-14B" else: model_type = "wan-i2v-14B" elif CHECKPOINT_KEY_NAMES["wan_vae"] in checkpoint: # All Wan models use the same VAE so we can use the same default model repo to fetch the config model_type = "wan-t2v-14B" elif CHECKPOINT_KEY_NAMES["hidream"] in checkpoint: model_type = "hidream" elif all(key in checkpoint for key in CHECKPOINT_KEY_NAMES["cosmos-1.0"]): x_embedder_shape = checkpoint[CHECKPOINT_KEY_NAMES["cosmos-1.0"][0]].shape if x_embedder_shape[1] == 68: model_type = "cosmos-1.0-t2w-7B" if x_embedder_shape[0] == 4096 else "cosmos-1.0-t2w-14B" elif x_embedder_shape[1] == 72: model_type = "cosmos-1.0-v2w-7B" if x_embedder_shape[0] == 4096 else "cosmos-1.0-v2w-14B" else: raise ValueError(f"Unexpected x_embedder shape: {x_embedder_shape} when loading Cosmos 1.0 model.") elif all(key in checkpoint for key in CHECKPOINT_KEY_NAMES["cosmos-2.0"]): x_embedder_shape = checkpoint[CHECKPOINT_KEY_NAMES["cosmos-2.0"][0]].shape if x_embedder_shape[1] == 68: model_type = "cosmos-2.0-t2i-2B" if x_embedder_shape[0] == 2048 else "cosmos-2.0-t2i-14B" elif x_embedder_shape[1] == 72: model_type = "cosmos-2.0-v2w-2B" if x_embedder_shape[0] == 2048 else "cosmos-2.0-v2w-14B" else: raise ValueError(f"Unexpected x_embedder shape: {x_embedder_shape} when loading Cosmos 2.0 model.") else: model_type = "v1" return model_type def fetch_diffusers_config(checkpoint): model_type = infer_diffusers_model_type(checkpoint) model_path = DIFFUSERS_DEFAULT_PIPELINE_PATHS[model_type] model_path = copy.deepcopy(model_path) return model_path def set_image_size(checkpoint, image_size=None): if image_size: return image_size model_type = infer_diffusers_model_type(checkpoint) image_size = DIFFUSERS_TO_LDM_DEFAULT_IMAGE_SIZE_MAP[model_type] return image_size # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.conv_attn_to_linear def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["query.weight", "key.weight", "value.weight"] for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif "proj_attn.weight" in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def create_unet_diffusers_config_from_ldm( original_config, checkpoint, image_size=None, upcast_attention=None, num_in_channels=None ): """ Creates a config for the diffusers based on the config of the LDM model. """ if image_size is not None: deprecation_message = ( "Configuring UNet2DConditionModel with the `image_size` argument to `from_single_file`" "is deprecated and will be ignored in future versions." ) deprecate("image_size", "1.0.0", deprecation_message) image_size = set_image_size(checkpoint, image_size=image_size) if ( "unet_config" in original_config["model"]["params"] and original_config["model"]["params"]["unet_config"] is not None ): unet_params = original_config["model"]["params"]["unet_config"]["params"] else: unet_params = original_config["model"]["params"]["network_config"]["params"] if num_in_channels is not None: deprecation_message = ( "Configuring UNet2DConditionModel with the `num_in_channels` argument to `from_single_file`" "is deprecated and will be ignored in future versions." ) deprecate("image_size", "1.0.0", deprecation_message) in_channels = num_in_channels else: in_channels = unet_params["in_channels"] vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" up_block_types.append(block_type) resolution //= 2 if unet_params["transformer_depth"] is not None: transformer_layers_per_block = ( unet_params["transformer_depth"] if isinstance(unet_params["transformer_depth"], int) else list(unet_params["transformer_depth"]) ) else: transformer_layers_per_block = 1 vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None use_linear_projection = ( unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False ) if use_linear_projection: # stable diffusion 2-base-512 and 2-768 if head_dim is None: head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"] head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])] class_embed_type = None addition_embed_type = None addition_time_embed_dim = None projection_class_embeddings_input_dim = None context_dim = None if unet_params["context_dim"] is not None: context_dim = ( unet_params["context_dim"] if isinstance(unet_params["context_dim"], int) else unet_params["context_dim"][0] ) if "num_classes" in unet_params: if unet_params["num_classes"] == "sequential": if context_dim in [2048, 1280]: # SDXL addition_embed_type = "text_time" addition_time_embed_dim = 256 else: class_embed_type = "projection" assert "adm_in_channels" in unet_params projection_class_embeddings_input_dim = unet_params["adm_in_channels"] config = { "sample_size": image_size // vae_scale_factor, "in_channels": in_channels, "down_block_types": down_block_types, "block_out_channels": block_out_channels, "layers_per_block": unet_params["num_res_blocks"], "cross_attention_dim": context_dim, "attention_head_dim": head_dim, "use_linear_projection": use_linear_projection, "class_embed_type": class_embed_type, "addition_embed_type": addition_embed_type, "addition_time_embed_dim": addition_time_embed_dim, "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, "transformer_layers_per_block": transformer_layers_per_block, } if upcast_attention is not None: deprecation_message = ( "Configuring UNet2DConditionModel with the `upcast_attention` argument to `from_single_file`" "is deprecated and will be ignored in future versions." ) deprecate("image_size", "1.0.0", deprecation_message) config["upcast_attention"] = upcast_attention if "disable_self_attentions" in unet_params: config["only_cross_attention"] = unet_params["disable_self_attentions"] if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int): config["num_class_embeds"] = unet_params["num_classes"] config["out_channels"] = unet_params["out_channels"] config["up_block_types"] = up_block_types return config def create_controlnet_diffusers_config_from_ldm(original_config, checkpoint, image_size=None, **kwargs): if image_size is not None: deprecation_message = ( "Configuring ControlNetModel with the `image_size` argument" "is deprecated and will be ignored in future versions." ) deprecate("image_size", "1.0.0", deprecation_message) image_size = set_image_size(checkpoint, image_size=image_size) unet_params = original_config["model"]["params"]["control_stage_config"]["params"] diffusers_unet_config = create_unet_diffusers_config_from_ldm(original_config, image_size=image_size) controlnet_config = { "conditioning_channels": unet_params["hint_channels"], "in_channels": diffusers_unet_config["in_channels"], "down_block_types": diffusers_unet_config["down_block_types"], "block_out_channels": diffusers_unet_config["block_out_channels"], "layers_per_block": diffusers_unet_config["layers_per_block"], "cross_attention_dim": diffusers_unet_config["cross_attention_dim"], "attention_head_dim": diffusers_unet_config["attention_head_dim"], "use_linear_projection": diffusers_unet_config["use_linear_projection"], "class_embed_type": diffusers_unet_config["class_embed_type"], "addition_embed_type": diffusers_unet_config["addition_embed_type"], "addition_time_embed_dim": diffusers_unet_config["addition_time_embed_dim"], "projection_class_embeddings_input_dim": diffusers_unet_config["projection_class_embeddings_input_dim"], "transformer_layers_per_block": diffusers_unet_config["transformer_layers_per_block"], } return controlnet_config def create_vae_diffusers_config_from_ldm(original_config, checkpoint, image_size=None, scaling_factor=None): """ Creates a config for the diffusers based on the config of the LDM model. """ if image_size is not None: deprecation_message = ( "Configuring AutoencoderKL with the `image_size` argument" "is deprecated and will be ignored in future versions." ) deprecate("image_size", "1.0.0", deprecation_message) image_size = set_image_size(checkpoint, image_size=image_size) if "edm_mean" in checkpoint and "edm_std" in checkpoint: latents_mean = checkpoint["edm_mean"] latents_std = checkpoint["edm_std"] else: latents_mean = None latents_std = None vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] if (scaling_factor is None) and (latents_mean is not None) and (latents_std is not None): scaling_factor = PLAYGROUND_VAE_SCALING_FACTOR elif (scaling_factor is None) and ("scale_factor" in original_config["model"]["params"]): scaling_factor = original_config["model"]["params"]["scale_factor"] elif scaling_factor is None: scaling_factor = LDM_VAE_DEFAULT_SCALING_FACTOR block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) config = { "sample_size": image_size, "in_channels": vae_params["in_channels"], "out_channels": vae_params["out_ch"], "down_block_types": down_block_types, "up_block_types": up_block_types, "block_out_channels": block_out_channels, "latent_channels": vae_params["z_channels"], "layers_per_block": vae_params["num_res_blocks"], "scaling_factor": scaling_factor, } if latents_mean is not None and latents_std is not None: config.update({"latents_mean": latents_mean, "latents_std": latents_std}) return config def update_unet_resnet_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping=None): for ldm_key in ldm_keys: diffusers_key = ( ldm_key.replace("in_layers.0", "norm1") .replace("in_layers.2", "conv1") .replace("out_layers.0", "norm2") .replace("out_layers.3", "conv2") .replace("emb_layers.1", "time_emb_proj") .replace("skip_connection", "conv_shortcut") ) if mapping: diffusers_key = diffusers_key.replace(mapping["old"], mapping["new"]) new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) def update_unet_attention_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping): for ldm_key in ldm_keys: diffusers_key = ldm_key.replace(mapping["old"], mapping["new"]) new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) def update_vae_resnet_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): for ldm_key in keys: diffusers_key = ldm_key.replace(mapping["old"], mapping["new"]).replace("nin_shortcut", "conv_shortcut") new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) def update_vae_attentions_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): for ldm_key in keys: diffusers_key = ( ldm_key.replace(mapping["old"], mapping["new"]) .replace("norm.weight", "group_norm.weight") .replace("norm.bias", "group_norm.bias") .replace("q.weight", "to_q.weight") .replace("q.bias", "to_q.bias") .replace("k.weight", "to_k.weight") .replace("k.bias", "to_k.bias") .replace("v.weight", "to_v.weight") .replace("v.bias", "to_v.bias") .replace("proj_out.weight", "to_out.0.weight") .replace("proj_out.bias", "to_out.0.bias") ) new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) # proj_attn.weight has to be converted from conv 1D to linear shape = new_checkpoint[diffusers_key].shape if len(shape) == 3: new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0] elif len(shape) == 4: new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0, 0] def convert_stable_cascade_unet_single_file_to_diffusers(checkpoint, **kwargs): is_stage_c = "clip_txt_mapper.weight" in checkpoint if is_stage_c: state_dict = {} for key in checkpoint.keys(): if key.endswith("in_proj_weight"): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] elif key.endswith("in_proj_bias"): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] elif key.endswith("out_proj.weight"): weights = checkpoint[key] state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights elif key.endswith("out_proj.bias"): weights = checkpoint[key] state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights else: state_dict[key] = checkpoint[key] else: state_dict = {} for key in checkpoint.keys(): if key.endswith("in_proj_weight"): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] elif key.endswith("in_proj_bias"): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] elif key.endswith("out_proj.weight"): weights = checkpoint[key] state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights elif key.endswith("out_proj.bias"): weights = checkpoint[key] state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights # rename clip_mapper to clip_txt_pooled_mapper elif key.endswith("clip_mapper.weight"): weights = checkpoint[key] state_dict[key.replace("clip_mapper.weight", "clip_txt_pooled_mapper.weight")] = weights elif key.endswith("clip_mapper.bias"): weights = checkpoint[key] state_dict[key.replace("clip_mapper.bias", "clip_txt_pooled_mapper.bias")] = weights else: state_dict[key] = checkpoint[key] return state_dict def convert_ldm_unet_checkpoint(checkpoint, config, extract_ema=False, **kwargs): """ Takes a state dict and a config, and returns a converted checkpoint. """ # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) unet_key = LDM_UNET_KEY # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: logger.warning("Checkpoint has both EMA and non-EMA weights.") logger.warning( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.get(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: logger.warning( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, "")] = checkpoint.get(key) new_checkpoint = {} ldm_unet_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["layers"] for diffusers_key, ldm_key in ldm_unet_keys.items(): if ldm_key not in unet_state_dict: continue new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] if ("class_embed_type" in config) and (config["class_embed_type"] in ["timestep", "projection"]): class_embed_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["class_embed_type"] for diffusers_key, ldm_key in class_embed_keys.items(): new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] if ("addition_embed_type" in config) and (config["addition_embed_type"] == "text_time"): addition_embed_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["addition_embed_type"] for diffusers_key, ldm_key in addition_embed_keys.items(): new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] # Relevant to StableDiffusionUpscalePipeline if "num_class_embeds" in config: if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict): new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } # Down blocks for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] update_unet_resnet_ldm_to_diffusers( resnets, new_checkpoint, unet_state_dict, {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}, ) if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.get( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.get( f"input_blocks.{i}.0.op.bias" ) attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if attentions: update_unet_attention_ldm_to_diffusers( attentions, new_checkpoint, unet_state_dict, {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}, ) # Mid blocks for key in middle_blocks.keys(): diffusers_key = max(key - 1, 0) if key % 2 == 0: update_unet_resnet_ldm_to_diffusers( middle_blocks[key], new_checkpoint, unet_state_dict, mapping={"old": f"middle_block.{key}", "new": f"mid_block.resnets.{diffusers_key}"}, ) else: update_unet_attention_ldm_to_diffusers( middle_blocks[key], new_checkpoint, unet_state_dict, mapping={"old": f"middle_block.{key}", "new": f"mid_block.attentions.{diffusers_key}"}, ) # Up Blocks for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) resnets = [ key for key in output_blocks[i] if f"output_blocks.{i}.0" in key and f"output_blocks.{i}.0.op" not in key ] update_unet_resnet_ldm_to_diffusers( resnets, new_checkpoint, unet_state_dict, {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}, ) attentions = [ key for key in output_blocks[i] if f"output_blocks.{i}.1" in key and f"output_blocks.{i}.1.conv" not in key ] if attentions: update_unet_attention_ldm_to_diffusers( attentions, new_checkpoint, unet_state_dict, {"old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}"}, ) if f"output_blocks.{i}.1.conv.weight" in unet_state_dict: new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.1.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.1.conv.bias" ] if f"output_blocks.{i}.2.conv.weight" in unet_state_dict: new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.2.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.2.conv.bias" ] return new_checkpoint def convert_controlnet_checkpoint( checkpoint, config, **kwargs, ): # Return checkpoint if it's already been converted if "time_embedding.linear_1.weight" in checkpoint: return checkpoint # Some controlnet ckpt files are distributed independently from the rest of the # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/ if "time_embed.0.weight" in checkpoint: controlnet_state_dict = checkpoint else: controlnet_state_dict = {} keys = list(checkpoint.keys()) controlnet_key = LDM_CONTROLNET_KEY for key in keys: if key.startswith(controlnet_key): controlnet_state_dict[key.replace(controlnet_key, "")] = checkpoint.get(key) new_checkpoint = {} ldm_controlnet_keys = DIFFUSERS_TO_LDM_MAPPING["controlnet"]["layers"] for diffusers_key, ldm_key in ldm_controlnet_keys.items(): if ldm_key not in controlnet_state_dict: continue new_checkpoint[diffusers_key] = controlnet_state_dict[ldm_key] # Retrieves the keys for the input blocks only num_input_blocks = len( {".".join(layer.split(".")[:2]) for layer in controlnet_state_dict if "input_blocks" in layer} ) input_blocks = { layer_id: [key for key in controlnet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Down blocks for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] update_unet_resnet_ldm_to_diffusers( resnets, new_checkpoint, controlnet_state_dict, {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}, ) if f"input_blocks.{i}.0.op.weight" in controlnet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = controlnet_state_dict.get( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = controlnet_state_dict.get( f"input_blocks.{i}.0.op.bias" ) attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if attentions: update_unet_attention_ldm_to_diffusers( attentions, new_checkpoint, controlnet_state_dict, {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}, ) # controlnet down blocks for i in range(num_input_blocks): new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = controlnet_state_dict.get(f"zero_convs.{i}.0.weight") new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = controlnet_state_dict.get(f"zero_convs.{i}.0.bias") # Retrieves the keys for the middle blocks only num_middle_blocks = len( {".".join(layer.split(".")[:2]) for layer in controlnet_state_dict if "middle_block" in layer} ) middle_blocks = { layer_id: [key for key in controlnet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Mid blocks for key in middle_blocks.keys(): diffusers_key = max(key - 1, 0) if key % 2 == 0: update_unet_resnet_ldm_to_diffusers( middle_blocks[key], new_checkpoint, controlnet_state_dict, mapping={"old": f"middle_block.{key}", "new": f"mid_block.resnets.{diffusers_key}"}, ) else: update_unet_attention_ldm_to_diffusers( middle_blocks[key], new_checkpoint, controlnet_state_dict, mapping={"old": f"middle_block.{key}", "new": f"mid_block.attentions.{diffusers_key}"}, ) # mid block new_checkpoint["controlnet_mid_block.weight"] = controlnet_state_dict.get("middle_block_out.0.weight") new_checkpoint["controlnet_mid_block.bias"] = controlnet_state_dict.get("middle_block_out.0.bias") # controlnet cond embedding blocks cond_embedding_blocks = { ".".join(layer.split(".")[:2]) for layer in controlnet_state_dict if "input_hint_block" in layer and ("input_hint_block.0" not in layer) and ("input_hint_block.14" not in layer) } num_cond_embedding_blocks = len(cond_embedding_blocks) for idx in range(1, num_cond_embedding_blocks + 1): diffusers_idx = idx - 1 cond_block_id = 2 * idx new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_idx}.weight"] = controlnet_state_dict.get( f"input_hint_block.{cond_block_id}.weight" ) new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_idx}.bias"] = controlnet_state_dict.get( f"input_hint_block.{cond_block_id}.bias" ) return new_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): # extract state dict for VAE # remove the LDM_VAE_KEY prefix from the ldm checkpoint keys so that it is easier to map them to diffusers keys vae_state_dict = {} keys = list(checkpoint.keys()) vae_key = "" for ldm_vae_key in LDM_VAE_KEYS: if any(k.startswith(ldm_vae_key) for k in keys): vae_key = ldm_vae_key for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) new_checkpoint = {} vae_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING["vae"] for diffusers_key, ldm_key in vae_diffusers_ldm_map.items(): if ldm_key not in vae_state_dict: continue new_checkpoint[diffusers_key] = vae_state_dict[ldm_key] # Retrieves the keys for the encoder down blocks only num_down_blocks = len(config["down_block_types"]) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}, ) if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.get( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.get( f"encoder.down.{i}.downsample.conv.bias" ) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, ) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] update_vae_attentions_ldm_to_diffusers( mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} ) # Retrieves the keys for the decoder up blocks only num_up_blocks = len(config["up_block_types"]) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}, ) if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, ) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] update_vae_attentions_ldm_to_diffusers( mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} ) conv_attn_to_linear(new_checkpoint) return new_checkpoint def convert_ldm_clip_checkpoint(checkpoint, remove_prefix=None): keys = list(checkpoint.keys()) text_model_dict = {} remove_prefixes = [] remove_prefixes.extend(LDM_CLIP_PREFIX_TO_REMOVE) if remove_prefix: remove_prefixes.append(remove_prefix) for key in keys: for prefix in remove_prefixes: if key.startswith(prefix): diffusers_key = key.replace(prefix, "") text_model_dict[diffusers_key] = checkpoint.get(key) return text_model_dict def convert_open_clip_checkpoint( text_model, checkpoint, prefix="cond_stage_model.model.", ): text_model_dict = {} text_proj_key = prefix + "text_projection" if text_proj_key in checkpoint: text_proj_dim = int(checkpoint[text_proj_key].shape[0]) elif hasattr(text_model.config, "hidden_size"): text_proj_dim = text_model.config.hidden_size else: text_proj_dim = LDM_OPEN_CLIP_TEXT_PROJECTION_DIM keys = list(checkpoint.keys()) keys_to_ignore = SD_2_TEXT_ENCODER_KEYS_TO_IGNORE openclip_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING["openclip"]["layers"] for diffusers_key, ldm_key in openclip_diffusers_ldm_map.items(): ldm_key = prefix + ldm_key if ldm_key not in checkpoint: continue if ldm_key in keys_to_ignore: continue if ldm_key.endswith("text_projection"): text_model_dict[diffusers_key] = checkpoint[ldm_key].T.contiguous() else: text_model_dict[diffusers_key] = checkpoint[ldm_key] for key in keys: if key in keys_to_ignore: continue if not key.startswith(prefix + "transformer."): continue diffusers_key = key.replace(prefix + "transformer.", "") transformer_diffusers_to_ldm_map = DIFFUSERS_TO_LDM_MAPPING["openclip"]["transformer"] for new_key, old_key in transformer_diffusers_to_ldm_map.items(): diffusers_key = ( diffusers_key.replace(old_key, new_key).replace(".in_proj_weight", "").replace(".in_proj_bias", "") ) if key.endswith(".in_proj_weight"): weight_value = checkpoint.get(key) text_model_dict[diffusers_key + ".q_proj.weight"] = weight_value[:text_proj_dim, :].clone().detach() text_model_dict[diffusers_key + ".k_proj.weight"] = ( weight_value[text_proj_dim : text_proj_dim * 2, :].clone().detach() ) text_model_dict[diffusers_key + ".v_proj.weight"] = weight_value[text_proj_dim * 2 :, :].clone().detach() elif key.endswith(".in_proj_bias"): weight_value = checkpoint.get(key) text_model_dict[diffusers_key + ".q_proj.bias"] = weight_value[:text_proj_dim].clone().detach() text_model_dict[diffusers_key + ".k_proj.bias"] = ( weight_value[text_proj_dim : text_proj_dim * 2].clone().detach() ) text_model_dict[diffusers_key + ".v_proj.bias"] = weight_value[text_proj_dim * 2 :].clone().detach() else: text_model_dict[diffusers_key] = checkpoint.get(key) return text_model_dict def create_diffusers_clip_model_from_ldm( cls, checkpoint, subfolder="", config=None, torch_dtype=None, local_files_only=None, is_legacy_loading=False, ): if config: config = {"pretrained_model_name_or_path": config} else: config = fetch_diffusers_config(checkpoint) # For backwards compatibility # Older versions of `from_single_file` expected CLIP configs to be placed in their original transformers model repo # in the cache_dir, rather than in a subfolder of the Diffusers model if is_legacy_loading: logger.warning( ( "Detected legacy CLIP loading behavior. Please run `from_single_file` with `local_files_only=False once to update " "the local cache directory with the necessary CLIP model config files. " "Attempting to load CLIP model from legacy cache directory." ) ) if is_clip_model(checkpoint) or is_clip_sdxl_model(checkpoint): clip_config = "openai/clip-vit-large-patch14" config["pretrained_model_name_or_path"] = clip_config subfolder = "" elif is_open_clip_model(checkpoint): clip_config = "stabilityai/stable-diffusion-2" config["pretrained_model_name_or_path"] = clip_config subfolder = "text_encoder" else: clip_config = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" config["pretrained_model_name_or_path"] = clip_config subfolder = "" model_config = cls.config_class.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): model = cls(model_config) position_embedding_dim = model.text_model.embeddings.position_embedding.weight.shape[-1] if is_clip_model(checkpoint): diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint) elif ( is_clip_sdxl_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES["clip_sdxl"]].shape[-1] == position_embedding_dim ): diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint) elif ( is_clip_sd3_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES["clip_sd3"]].shape[-1] == position_embedding_dim ): diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, "text_encoders.clip_l.transformer.") diffusers_format_checkpoint["text_projection.weight"] = torch.eye(position_embedding_dim) elif is_open_clip_model(checkpoint): prefix = "cond_stage_model.model." diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) elif ( is_open_clip_sdxl_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES["open_clip_sdxl"]].shape[-1] == position_embedding_dim ): prefix = "conditioner.embedders.1.model." diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) elif is_open_clip_sdxl_refiner_model(checkpoint): prefix = "conditioner.embedders.0.model." diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) elif ( is_open_clip_sd3_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES["open_clip_sd3"]].shape[-1] == position_embedding_dim ): diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, "text_encoders.clip_g.transformer.") else: raise ValueError("The provided checkpoint does not seem to contain a valid CLIP model.") if is_accelerate_available(): load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) empty_device_cache() else: model.load_state_dict(diffusers_format_checkpoint, strict=False) if torch_dtype is not None: model.to(torch_dtype) model.eval() return model def _legacy_load_scheduler( cls, checkpoint, component_name, original_config=None, **kwargs, ): scheduler_type = kwargs.get("scheduler_type", None) prediction_type = kwargs.get("prediction_type", None) if scheduler_type is not None: deprecation_message = ( "Please pass an instance of a Scheduler object directly to the `scheduler` argument in `from_single_file`\n\n" "Example:\n\n" "from diffusers import StableDiffusionPipeline, DDIMScheduler\n\n" "scheduler = DDIMScheduler()\n" "pipe = StableDiffusionPipeline.from_single_file(<checkpoint path>, scheduler=scheduler)\n" ) deprecate("scheduler_type", "1.0.0", deprecation_message) if prediction_type is not None: deprecation_message = ( "Please configure an instance of a Scheduler with the appropriate `prediction_type` and " "pass the object directly to the `scheduler` argument in `from_single_file`.\n\n" "Example:\n\n" "from diffusers import StableDiffusionPipeline, DDIMScheduler\n\n" 'scheduler = DDIMScheduler(prediction_type="v_prediction")\n' "pipe = StableDiffusionPipeline.from_single_file(<checkpoint path>, scheduler=scheduler)\n" ) deprecate("prediction_type", "1.0.0", deprecation_message) scheduler_config = SCHEDULER_DEFAULT_CONFIG model_type = infer_diffusers_model_type(checkpoint=checkpoint) global_step = checkpoint["global_step"] if "global_step" in checkpoint else None if original_config: num_train_timesteps = getattr(original_config["model"]["params"], "timesteps", 1000) else: num_train_timesteps = 1000 scheduler_config["num_train_timesteps"] = num_train_timesteps if model_type == "v2": if prediction_type is None: # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"` # as it relies on a brittle global step parameter here prediction_type = "epsilon" if global_step == 875000 else "v_prediction" else: prediction_type = prediction_type or "epsilon" scheduler_config["prediction_type"] = prediction_type if model_type in ["xl_base", "xl_refiner"]: scheduler_type = "euler" elif model_type == "playground": scheduler_type = "edm_dpm_solver_multistep" else: if original_config: beta_start = original_config["model"]["params"].get("linear_start") beta_end = original_config["model"]["params"].get("linear_end") else: beta_start = 0.02 beta_end = 0.085 scheduler_config["beta_start"] = beta_start scheduler_config["beta_end"] = beta_end scheduler_config["beta_schedule"] = "scaled_linear" scheduler_config["clip_sample"] = False scheduler_config["set_alpha_to_one"] = False # to deal with an edge case StableDiffusionUpscale pipeline has two schedulers if component_name == "low_res_scheduler": return cls.from_config( { "beta_end": 0.02, "beta_schedule": "scaled_linear", "beta_start": 0.0001, "clip_sample": True, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": None, "variance_type": "fixed_small", } ) if scheduler_type is None: return cls.from_config(scheduler_config) elif scheduler_type == "pndm": scheduler_config["skip_prk_steps"] = True scheduler = PNDMScheduler.from_config(scheduler_config) elif scheduler_type == "lms": scheduler = LMSDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == "heun": scheduler = HeunDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == "euler": scheduler = EulerDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == "euler-ancestral": scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == "dpm": scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config) elif scheduler_type == "ddim": scheduler = DDIMScheduler.from_config(scheduler_config) elif scheduler_type == "edm_dpm_solver_multistep": scheduler_config = { "algorithm_type": "dpmsolver++", "dynamic_thresholding_ratio": 0.995, "euler_at_final": False, "final_sigmas_type": "zero", "lower_order_final": True, "num_train_timesteps": 1000, "prediction_type": "epsilon", "rho": 7.0, "sample_max_value": 1.0, "sigma_data": 0.5, "sigma_max": 80.0, "sigma_min": 0.002, "solver_order": 2, "solver_type": "midpoint", "thresholding": False, } scheduler = EDMDPMSolverMultistepScheduler(**scheduler_config) else: raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") return scheduler def _legacy_load_clip_tokenizer(cls, checkpoint, config=None, local_files_only=False): if config: config = {"pretrained_model_name_or_path": config} else: config = fetch_diffusers_config(checkpoint) if is_clip_model(checkpoint) or is_clip_sdxl_model(checkpoint): clip_config = "openai/clip-vit-large-patch14" config["pretrained_model_name_or_path"] = clip_config subfolder = "" elif is_open_clip_model(checkpoint): clip_config = "stabilityai/stable-diffusion-2" config["pretrained_model_name_or_path"] = clip_config subfolder = "tokenizer" else: clip_config = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" config["pretrained_model_name_or_path"] = clip_config subfolder = "" tokenizer = cls.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) return tokenizer def _legacy_load_safety_checker(local_files_only, torch_dtype): # Support for loading safety checker components using the deprecated # `load_safety_checker` argument. from ..pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker feature_extractor = AutoImageProcessor.from_pretrained( "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only, torch_dtype=torch_dtype ) safety_checker = StableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only, torch_dtype=torch_dtype ) return {"safety_checker": safety_checker, "feature_extractor": feature_extractor} # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation def swap_scale_shift(weight, dim): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def swap_proj_gate(weight): proj, gate = weight.chunk(2, dim=0) new_weight = torch.cat([gate, proj], dim=0) return new_weight def get_attn2_layers(state_dict): attn2_layers = [] for key in state_dict.keys(): if "attn2." in key: # Extract the layer number from the key layer_num = int(key.split(".")[1]) attn2_layers.append(layer_num) return tuple(sorted(set(attn2_layers))) def get_caption_projection_dim(state_dict): caption_projection_dim = state_dict["context_embedder.weight"].shape[0] return caption_projection_dim def convert_sd3_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "joint_blocks" in k))[-1] + 1 # noqa: C401 dual_attention_layers = get_attn2_layers(checkpoint) caption_projection_dim = get_caption_projection_dim(checkpoint) has_qk_norm = any("ln_q" in key for key in checkpoint.keys()) # Positional and patch embeddings. converted_state_dict["pos_embed.pos_embed"] = checkpoint.pop("pos_embed") converted_state_dict["pos_embed.proj.weight"] = checkpoint.pop("x_embedder.proj.weight") converted_state_dict["pos_embed.proj.bias"] = checkpoint.pop("x_embedder.proj.bias") # Timestep embeddings. converted_state_dict["time_text_embed.timestep_embedder.linear_1.weight"] = checkpoint.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_1.bias"] = checkpoint.pop("t_embedder.mlp.0.bias") converted_state_dict["time_text_embed.timestep_embedder.linear_2.weight"] = checkpoint.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.bias"] = checkpoint.pop("t_embedder.mlp.2.bias") # Context projections. converted_state_dict["context_embedder.weight"] = checkpoint.pop("context_embedder.weight") converted_state_dict["context_embedder.bias"] = checkpoint.pop("context_embedder.bias") # Pooled context projection. converted_state_dict["time_text_embed.text_embedder.linear_1.weight"] = checkpoint.pop("y_embedder.mlp.0.weight") converted_state_dict["time_text_embed.text_embedder.linear_1.bias"] = checkpoint.pop("y_embedder.mlp.0.bias") converted_state_dict["time_text_embed.text_embedder.linear_2.weight"] = checkpoint.pop("y_embedder.mlp.2.weight") converted_state_dict["time_text_embed.text_embedder.linear_2.bias"] = checkpoint.pop("y_embedder.mlp.2.bias") # Transformer blocks 🎸. for i in range(num_layers): # Q, K, V sample_q, sample_k, sample_v = torch.chunk( checkpoint.pop(f"joint_blocks.{i}.x_block.attn.qkv.weight"), 3, dim=0 ) context_q, context_k, context_v = torch.chunk( checkpoint.pop(f"joint_blocks.{i}.context_block.attn.qkv.weight"), 3, dim=0 ) sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( checkpoint.pop(f"joint_blocks.{i}.x_block.attn.qkv.bias"), 3, dim=0 ) context_q_bias, context_k_bias, context_v_bias = torch.chunk( checkpoint.pop(f"joint_blocks.{i}.context_block.attn.qkv.bias"), 3, dim=0 ) converted_state_dict[f"transformer_blocks.{i}.attn.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"transformer_blocks.{i}.attn.to_q.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"transformer_blocks.{i}.attn.to_k.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.to_v.weight"] = torch.cat([sample_v]) converted_state_dict[f"transformer_blocks.{i}.attn.to_v.bias"] = torch.cat([sample_v_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.weight"] = torch.cat([context_q]) converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.weight"] = torch.cat([context_k]) converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.weight"] = torch.cat([context_v]) converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.bias"] = torch.cat([context_v_bias]) # qk norm if has_qk_norm: converted_state_dict[f"transformer_blocks.{i}.attn.norm_q.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn.ln_q.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.norm_k.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn.ln_k.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.norm_added_q.weight"] = checkpoint.pop( f"joint_blocks.{i}.context_block.attn.ln_q.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.norm_added_k.weight"] = checkpoint.pop( f"joint_blocks.{i}.context_block.attn.ln_k.weight" ) # output projections. converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.bias"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn.proj.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.weight"] = checkpoint.pop( f"joint_blocks.{i}.context_block.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.bias"] = checkpoint.pop( f"joint_blocks.{i}.context_block.attn.proj.bias" ) if i in dual_attention_layers: # Q, K, V sample_q2, sample_k2, sample_v2 = torch.chunk( checkpoint.pop(f"joint_blocks.{i}.x_block.attn2.qkv.weight"), 3, dim=0 ) sample_q2_bias, sample_k2_bias, sample_v2_bias = torch.chunk( checkpoint.pop(f"joint_blocks.{i}.x_block.attn2.qkv.bias"), 3, dim=0 ) converted_state_dict[f"transformer_blocks.{i}.attn2.to_q.weight"] = torch.cat([sample_q2]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_q.bias"] = torch.cat([sample_q2_bias]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_k.weight"] = torch.cat([sample_k2]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_k.bias"] = torch.cat([sample_k2_bias]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_v.weight"] = torch.cat([sample_v2]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_v.bias"] = torch.cat([sample_v2_bias]) # qk norm if has_qk_norm: converted_state_dict[f"transformer_blocks.{i}.attn2.norm_q.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn2.ln_q.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn2.norm_k.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn2.ln_k.weight" ) # output projections. converted_state_dict[f"transformer_blocks.{i}.attn2.to_out.0.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn2.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn2.to_out.0.bias"] = checkpoint.pop( f"joint_blocks.{i}.x_block.attn2.proj.bias" ) # norms. converted_state_dict[f"transformer_blocks.{i}.norm1.linear.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.adaLN_modulation.1.weight" ) converted_state_dict[f"transformer_blocks.{i}.norm1.linear.bias"] = checkpoint.pop( f"joint_blocks.{i}.x_block.adaLN_modulation.1.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = checkpoint.pop( f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight" ) converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = checkpoint.pop( f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias" ) else: converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = swap_scale_shift( checkpoint.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight"), dim=caption_projection_dim, ) converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = swap_scale_shift( checkpoint.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias"), dim=caption_projection_dim, ) # ffs. converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.bias"] = checkpoint.pop( f"joint_blocks.{i}.x_block.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.2.weight"] = checkpoint.pop( f"joint_blocks.{i}.x_block.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.2.bias"] = checkpoint.pop( f"joint_blocks.{i}.x_block.mlp.fc2.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.weight"] = checkpoint.pop( f"joint_blocks.{i}.context_block.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.bias"] = checkpoint.pop( f"joint_blocks.{i}.context_block.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.weight"] = checkpoint.pop( f"joint_blocks.{i}.context_block.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.bias"] = checkpoint.pop( f"joint_blocks.{i}.context_block.mlp.fc2.bias" ) # Final blocks. converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( checkpoint.pop("final_layer.adaLN_modulation.1.weight"), dim=caption_projection_dim ) converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( checkpoint.pop("final_layer.adaLN_modulation.1.bias"), dim=caption_projection_dim ) return converted_state_dict def is_t5_in_single_file(checkpoint): if "text_encoders.t5xxl.transformer.shared.weight" in checkpoint: return True return False def convert_sd3_t5_checkpoint_to_diffusers(checkpoint): keys = list(checkpoint.keys()) text_model_dict = {} remove_prefixes = ["text_encoders.t5xxl.transformer."] for key in keys: for prefix in remove_prefixes: if key.startswith(prefix): diffusers_key = key.replace(prefix, "") text_model_dict[diffusers_key] = checkpoint.get(key) return text_model_dict def create_diffusers_t5_model_from_checkpoint( cls, checkpoint, subfolder="", config=None, torch_dtype=None, local_files_only=None, ): if config: config = {"pretrained_model_name_or_path": config} else: config = fetch_diffusers_config(checkpoint) model_config = cls.config_class.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): model = cls(model_config) diffusers_format_checkpoint = convert_sd3_t5_checkpoint_to_diffusers(checkpoint) if is_accelerate_available(): load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) empty_device_cache() else: model.load_state_dict(diffusers_format_checkpoint) use_keep_in_fp32_modules = (cls._keep_in_fp32_modules is not None) and (torch_dtype == torch.float16) if use_keep_in_fp32_modules: keep_in_fp32_modules = model._keep_in_fp32_modules else: keep_in_fp32_modules = [] if keep_in_fp32_modules is not None: for name, param in model.named_parameters(): if any(module_to_keep_in_fp32 in name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules): # param = param.to(torch.float32) does not work here as only in the local scope. param.data = param.data.to(torch.float32) return model def convert_animatediff_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} for k, v in checkpoint.items(): if "pos_encoder" in k: continue else: converted_state_dict[ k.replace(".norms.0", ".norm1") .replace(".norms.1", ".norm2") .replace(".ff_norm", ".norm3") .replace(".attention_blocks.0", ".attn1") .replace(".attention_blocks.1", ".attn2") .replace(".temporal_transformer", "") ] = v return converted_state_dict def convert_flux_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "double_blocks." in k))[-1] + 1 # noqa: C401 num_single_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "single_blocks." in k))[-1] + 1 # noqa: C401 mlp_ratio = 4.0 inner_dim = 3072 # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation def swap_scale_shift(weight): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight ## time_text_embed.timestep_embedder <- time_in converted_state_dict["time_text_embed.timestep_embedder.linear_1.weight"] = checkpoint.pop( "time_in.in_layer.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_1.bias"] = checkpoint.pop("time_in.in_layer.bias") converted_state_dict["time_text_embed.timestep_embedder.linear_2.weight"] = checkpoint.pop( "time_in.out_layer.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.bias"] = checkpoint.pop("time_in.out_layer.bias") ## time_text_embed.text_embedder <- vector_in converted_state_dict["time_text_embed.text_embedder.linear_1.weight"] = checkpoint.pop("vector_in.in_layer.weight") converted_state_dict["time_text_embed.text_embedder.linear_1.bias"] = checkpoint.pop("vector_in.in_layer.bias") converted_state_dict["time_text_embed.text_embedder.linear_2.weight"] = checkpoint.pop( "vector_in.out_layer.weight" ) converted_state_dict["time_text_embed.text_embedder.linear_2.bias"] = checkpoint.pop("vector_in.out_layer.bias") # guidance has_guidance = any("guidance" in k for k in checkpoint) if has_guidance: converted_state_dict["time_text_embed.guidance_embedder.linear_1.weight"] = checkpoint.pop( "guidance_in.in_layer.weight" ) converted_state_dict["time_text_embed.guidance_embedder.linear_1.bias"] = checkpoint.pop( "guidance_in.in_layer.bias" ) converted_state_dict["time_text_embed.guidance_embedder.linear_2.weight"] = checkpoint.pop( "guidance_in.out_layer.weight" ) converted_state_dict["time_text_embed.guidance_embedder.linear_2.bias"] = checkpoint.pop( "guidance_in.out_layer.bias" ) # context_embedder converted_state_dict["context_embedder.weight"] = checkpoint.pop("txt_in.weight") converted_state_dict["context_embedder.bias"] = checkpoint.pop("txt_in.bias") # x_embedder converted_state_dict["x_embedder.weight"] = checkpoint.pop("img_in.weight") converted_state_dict["x_embedder.bias"] = checkpoint.pop("img_in.bias") # double transformer blocks for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." # norms. ## norm1 converted_state_dict[f"{block_prefix}norm1.linear.weight"] = checkpoint.pop( f"double_blocks.{i}.img_mod.lin.weight" ) converted_state_dict[f"{block_prefix}norm1.linear.bias"] = checkpoint.pop( f"double_blocks.{i}.img_mod.lin.bias" ) ## norm1_context converted_state_dict[f"{block_prefix}norm1_context.linear.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_mod.lin.weight" ) converted_state_dict[f"{block_prefix}norm1_context.linear.bias"] = checkpoint.pop( f"double_blocks.{i}.txt_mod.lin.bias" ) # Q, K, V sample_q, sample_k, sample_v = torch.chunk(checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.weight"), 3, dim=0) context_q, context_k, context_v = torch.chunk( checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.weight"), 3, dim=0 ) sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.bias"), 3, dim=0 ) context_q_bias, context_k_bias, context_v_bias = torch.chunk( checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.bias"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([sample_v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([sample_v_bias]) converted_state_dict[f"{block_prefix}attn.add_q_proj.weight"] = torch.cat([context_q]) converted_state_dict[f"{block_prefix}attn.add_q_proj.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"{block_prefix}attn.add_k_proj.weight"] = torch.cat([context_k]) converted_state_dict[f"{block_prefix}attn.add_k_proj.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"{block_prefix}attn.add_v_proj.weight"] = torch.cat([context_v]) converted_state_dict[f"{block_prefix}attn.add_v_proj.bias"] = torch.cat([context_v_bias]) # qk_norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( f"double_blocks.{i}.img_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( f"double_blocks.{i}.img_attn.norm.key_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_q.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_k.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.norm.key_norm.scale" ) # ff img_mlp converted_state_dict[f"{block_prefix}ff.net.0.proj.weight"] = checkpoint.pop( f"double_blocks.{i}.img_mlp.0.weight" ) converted_state_dict[f"{block_prefix}ff.net.0.proj.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.0.bias") converted_state_dict[f"{block_prefix}ff.net.2.weight"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.weight") converted_state_dict[f"{block_prefix}ff.net.2.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.bias") converted_state_dict[f"{block_prefix}ff_context.net.0.proj.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.0.weight" ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.bias"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.0.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.2.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.2.weight" ) converted_state_dict[f"{block_prefix}ff_context.net.2.bias"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.2.bias" ) # output projections. converted_state_dict[f"{block_prefix}attn.to_out.0.weight"] = checkpoint.pop( f"double_blocks.{i}.img_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_out.0.bias"] = checkpoint.pop( f"double_blocks.{i}.img_attn.proj.bias" ) converted_state_dict[f"{block_prefix}attn.to_add_out.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_add_out.bias"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.proj.bias" ) # single transformer blocks for i in range(num_single_layers): block_prefix = f"single_transformer_blocks.{i}." # norm.linear <- single_blocks.0.modulation.lin converted_state_dict[f"{block_prefix}norm.linear.weight"] = checkpoint.pop( f"single_blocks.{i}.modulation.lin.weight" ) converted_state_dict[f"{block_prefix}norm.linear.bias"] = checkpoint.pop( f"single_blocks.{i}.modulation.lin.bias" ) # Q, K, V, mlp mlp_hidden_dim = int(inner_dim * mlp_ratio) split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) q, k, v, mlp = torch.split(checkpoint.pop(f"single_blocks.{i}.linear1.weight"), split_size, dim=0) q_bias, k_bias, v_bias, mlp_bias = torch.split( checkpoint.pop(f"single_blocks.{i}.linear1.bias"), split_size, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([v_bias]) converted_state_dict[f"{block_prefix}proj_mlp.weight"] = torch.cat([mlp]) converted_state_dict[f"{block_prefix}proj_mlp.bias"] = torch.cat([mlp_bias]) # qk norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( f"single_blocks.{i}.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( f"single_blocks.{i}.norm.key_norm.scale" ) # output projections. converted_state_dict[f"{block_prefix}proj_out.weight"] = checkpoint.pop(f"single_blocks.{i}.linear2.weight") converted_state_dict[f"{block_prefix}proj_out.bias"] = checkpoint.pop(f"single_blocks.{i}.linear2.bias") converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( checkpoint.pop("final_layer.adaLN_modulation.1.weight") ) converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( checkpoint.pop("final_layer.adaLN_modulation.1.bias") ) return converted_state_dict def convert_ltx_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {key: checkpoint.pop(key) for key in list(checkpoint.keys()) if "vae" not in key} TRANSFORMER_KEYS_RENAME_DICT = { "model.diffusion_model.": "", "patchify_proj": "proj_in", "adaln_single": "time_embed", "q_norm": "norm_q", "k_norm": "norm_k", } TRANSFORMER_SPECIAL_KEYS_REMAP = {} for key in list(converted_state_dict.keys()): new_key = key for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) converted_state_dict[new_key] = converted_state_dict.pop(key) for key in list(converted_state_dict.keys()): for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, converted_state_dict) return converted_state_dict def convert_ltx_vae_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {key: checkpoint.pop(key) for key in list(checkpoint.keys()) if "vae." in key} def remove_keys_(key: str, state_dict): state_dict.pop(key) VAE_KEYS_RENAME_DICT = { # common "vae.": "", # decoder "up_blocks.0": "mid_block", "up_blocks.1": "up_blocks.0", "up_blocks.2": "up_blocks.1.upsamplers.0", "up_blocks.3": "up_blocks.1", "up_blocks.4": "up_blocks.2.conv_in", "up_blocks.5": "up_blocks.2.upsamplers.0", "up_blocks.6": "up_blocks.2", "up_blocks.7": "up_blocks.3.conv_in", "up_blocks.8": "up_blocks.3.upsamplers.0", "up_blocks.9": "up_blocks.3", # encoder "down_blocks.0": "down_blocks.0", "down_blocks.1": "down_blocks.0.downsamplers.0", "down_blocks.2": "down_blocks.0.conv_out", "down_blocks.3": "down_blocks.1", "down_blocks.4": "down_blocks.1.downsamplers.0", "down_blocks.5": "down_blocks.1.conv_out", "down_blocks.6": "down_blocks.2", "down_blocks.7": "down_blocks.2.downsamplers.0", "down_blocks.8": "down_blocks.3", "down_blocks.9": "mid_block", # common "conv_shortcut": "conv_shortcut.conv", "res_blocks": "resnets", "norm3.norm": "norm3", "per_channel_statistics.mean-of-means": "latents_mean", "per_channel_statistics.std-of-means": "latents_std", } VAE_091_RENAME_DICT = { # decoder "up_blocks.0": "mid_block", "up_blocks.1": "up_blocks.0.upsamplers.0", "up_blocks.2": "up_blocks.0", "up_blocks.3": "up_blocks.1.upsamplers.0", "up_blocks.4": "up_blocks.1", "up_blocks.5": "up_blocks.2.upsamplers.0", "up_blocks.6": "up_blocks.2", "up_blocks.7": "up_blocks.3.upsamplers.0", "up_blocks.8": "up_blocks.3", # common "last_time_embedder": "time_embedder", "last_scale_shift_table": "scale_shift_table", } VAE_095_RENAME_DICT = { # decoder "up_blocks.0": "mid_block", "up_blocks.1": "up_blocks.0.upsamplers.0", "up_blocks.2": "up_blocks.0", "up_blocks.3": "up_blocks.1.upsamplers.0", "up_blocks.4": "up_blocks.1", "up_blocks.5": "up_blocks.2.upsamplers.0", "up_blocks.6": "up_blocks.2", "up_blocks.7": "up_blocks.3.upsamplers.0", "up_blocks.8": "up_blocks.3", # encoder "down_blocks.0": "down_blocks.0", "down_blocks.1": "down_blocks.0.downsamplers.0", "down_blocks.2": "down_blocks.1", "down_blocks.3": "down_blocks.1.downsamplers.0", "down_blocks.4": "down_blocks.2", "down_blocks.5": "down_blocks.2.downsamplers.0", "down_blocks.6": "down_blocks.3", "down_blocks.7": "down_blocks.3.downsamplers.0", "down_blocks.8": "mid_block", # common "last_time_embedder": "time_embedder", "last_scale_shift_table": "scale_shift_table", } VAE_SPECIAL_KEYS_REMAP = { "per_channel_statistics.channel": remove_keys_, "per_channel_statistics.mean-of-means": remove_keys_, "per_channel_statistics.mean-of-stds": remove_keys_, } if converted_state_dict["vae.encoder.conv_out.conv.weight"].shape[1] == 2048: VAE_KEYS_RENAME_DICT.update(VAE_095_RENAME_DICT) elif "vae.decoder.last_time_embedder.timestep_embedder.linear_1.weight" in converted_state_dict: VAE_KEYS_RENAME_DICT.update(VAE_091_RENAME_DICT) for key in list(converted_state_dict.keys()): new_key = key for replace_key, rename_key in VAE_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) converted_state_dict[new_key] = converted_state_dict.pop(key) for key in list(converted_state_dict.keys()): for special_key, handler_fn_inplace in VAE_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, converted_state_dict) return converted_state_dict def convert_autoencoder_dc_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {key: checkpoint.pop(key) for key in list(checkpoint.keys())} def remap_qkv_(key: str, state_dict): qkv = state_dict.pop(key) q, k, v = torch.chunk(qkv, 3, dim=0) parent_module, _, _ = key.rpartition(".qkv.conv.weight") state_dict[f"{parent_module}.to_q.weight"] = q.squeeze() state_dict[f"{parent_module}.to_k.weight"] = k.squeeze() state_dict[f"{parent_module}.to_v.weight"] = v.squeeze() def remap_proj_conv_(key: str, state_dict): parent_module, _, _ = key.rpartition(".proj.conv.weight") state_dict[f"{parent_module}.to_out.weight"] = state_dict.pop(key).squeeze() AE_KEYS_RENAME_DICT = { # common "main.": "", "op_list.": "", "context_module": "attn", "local_module": "conv_out", # NOTE: The below two lines work because scales in the available configs only have a tuple length of 1 # If there were more scales, there would be more layers, so a loop would be better to handle this "aggreg.0.0": "to_qkv_multiscale.0.proj_in", "aggreg.0.1": "to_qkv_multiscale.0.proj_out", "depth_conv.conv": "conv_depth", "inverted_conv.conv": "conv_inverted", "point_conv.conv": "conv_point", "point_conv.norm": "norm", "conv.conv.": "conv.", "conv1.conv": "conv1", "conv2.conv": "conv2", "conv2.norm": "norm", "proj.norm": "norm_out", # encoder "encoder.project_in.conv": "encoder.conv_in", "encoder.project_out.0.conv": "encoder.conv_out", "encoder.stages": "encoder.down_blocks", # decoder "decoder.project_in.conv": "decoder.conv_in", "decoder.project_out.0": "decoder.norm_out", "decoder.project_out.2.conv": "decoder.conv_out", "decoder.stages": "decoder.up_blocks", } AE_F32C32_F64C128_F128C512_KEYS = { "encoder.project_in.conv": "encoder.conv_in.conv", "decoder.project_out.2.conv": "decoder.conv_out.conv", } AE_SPECIAL_KEYS_REMAP = { "qkv.conv.weight": remap_qkv_, "proj.conv.weight": remap_proj_conv_, } if "encoder.project_in.conv.bias" not in converted_state_dict: AE_KEYS_RENAME_DICT.update(AE_F32C32_F64C128_F128C512_KEYS) for key in list(converted_state_dict.keys()): new_key = key[:] for replace_key, rename_key in AE_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) converted_state_dict[new_key] = converted_state_dict.pop(key) for key in list(converted_state_dict.keys()): for special_key, handler_fn_inplace in AE_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, converted_state_dict) return converted_state_dict def convert_mochi_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} # Comfy checkpoints add this prefix keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) # Convert patch_embed converted_state_dict["patch_embed.proj.weight"] = checkpoint.pop("x_embedder.proj.weight") converted_state_dict["patch_embed.proj.bias"] = checkpoint.pop("x_embedder.proj.bias") # Convert time_embed converted_state_dict["time_embed.timestep_embedder.linear_1.weight"] = checkpoint.pop("t_embedder.mlp.0.weight") converted_state_dict["time_embed.timestep_embedder.linear_1.bias"] = checkpoint.pop("t_embedder.mlp.0.bias") converted_state_dict["time_embed.timestep_embedder.linear_2.weight"] = checkpoint.pop("t_embedder.mlp.2.weight") converted_state_dict["time_embed.timestep_embedder.linear_2.bias"] = checkpoint.pop("t_embedder.mlp.2.bias") converted_state_dict["time_embed.pooler.to_kv.weight"] = checkpoint.pop("t5_y_embedder.to_kv.weight") converted_state_dict["time_embed.pooler.to_kv.bias"] = checkpoint.pop("t5_y_embedder.to_kv.bias") converted_state_dict["time_embed.pooler.to_q.weight"] = checkpoint.pop("t5_y_embedder.to_q.weight") converted_state_dict["time_embed.pooler.to_q.bias"] = checkpoint.pop("t5_y_embedder.to_q.bias") converted_state_dict["time_embed.pooler.to_out.weight"] = checkpoint.pop("t5_y_embedder.to_out.weight") converted_state_dict["time_embed.pooler.to_out.bias"] = checkpoint.pop("t5_y_embedder.to_out.bias") converted_state_dict["time_embed.caption_proj.weight"] = checkpoint.pop("t5_yproj.weight") converted_state_dict["time_embed.caption_proj.bias"] = checkpoint.pop("t5_yproj.bias") # Convert transformer blocks num_layers = 48 for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." old_prefix = f"blocks.{i}." # norm1 converted_state_dict[block_prefix + "norm1.linear.weight"] = checkpoint.pop(old_prefix + "mod_x.weight") converted_state_dict[block_prefix + "norm1.linear.bias"] = checkpoint.pop(old_prefix + "mod_x.bias") if i < num_layers - 1: converted_state_dict[block_prefix + "norm1_context.linear.weight"] = checkpoint.pop( old_prefix + "mod_y.weight" ) converted_state_dict[block_prefix + "norm1_context.linear.bias"] = checkpoint.pop( old_prefix + "mod_y.bias" ) else: converted_state_dict[block_prefix + "norm1_context.linear_1.weight"] = checkpoint.pop( old_prefix + "mod_y.weight" ) converted_state_dict[block_prefix + "norm1_context.linear_1.bias"] = checkpoint.pop( old_prefix + "mod_y.bias" ) # Visual attention qkv_weight = checkpoint.pop(old_prefix + "attn.qkv_x.weight") q, k, v = qkv_weight.chunk(3, dim=0) converted_state_dict[block_prefix + "attn1.to_q.weight"] = q converted_state_dict[block_prefix + "attn1.to_k.weight"] = k converted_state_dict[block_prefix + "attn1.to_v.weight"] = v converted_state_dict[block_prefix + "attn1.norm_q.weight"] = checkpoint.pop( old_prefix + "attn.q_norm_x.weight" ) converted_state_dict[block_prefix + "attn1.norm_k.weight"] = checkpoint.pop( old_prefix + "attn.k_norm_x.weight" ) converted_state_dict[block_prefix + "attn1.to_out.0.weight"] = checkpoint.pop( old_prefix + "attn.proj_x.weight" ) converted_state_dict[block_prefix + "attn1.to_out.0.bias"] = checkpoint.pop(old_prefix + "attn.proj_x.bias") # Context attention qkv_weight = checkpoint.pop(old_prefix + "attn.qkv_y.weight") q, k, v = qkv_weight.chunk(3, dim=0) converted_state_dict[block_prefix + "attn1.add_q_proj.weight"] = q converted_state_dict[block_prefix + "attn1.add_k_proj.weight"] = k converted_state_dict[block_prefix + "attn1.add_v_proj.weight"] = v converted_state_dict[block_prefix + "attn1.norm_added_q.weight"] = checkpoint.pop( old_prefix + "attn.q_norm_y.weight" ) converted_state_dict[block_prefix + "attn1.norm_added_k.weight"] = checkpoint.pop( old_prefix + "attn.k_norm_y.weight" ) if i < num_layers - 1: converted_state_dict[block_prefix + "attn1.to_add_out.weight"] = checkpoint.pop( old_prefix + "attn.proj_y.weight" ) converted_state_dict[block_prefix + "attn1.to_add_out.bias"] = checkpoint.pop( old_prefix + "attn.proj_y.bias" ) # MLP converted_state_dict[block_prefix + "ff.net.0.proj.weight"] = swap_proj_gate( checkpoint.pop(old_prefix + "mlp_x.w1.weight") ) converted_state_dict[block_prefix + "ff.net.2.weight"] = checkpoint.pop(old_prefix + "mlp_x.w2.weight") if i < num_layers - 1: converted_state_dict[block_prefix + "ff_context.net.0.proj.weight"] = swap_proj_gate( checkpoint.pop(old_prefix + "mlp_y.w1.weight") ) converted_state_dict[block_prefix + "ff_context.net.2.weight"] = checkpoint.pop( old_prefix + "mlp_y.w2.weight" ) # Output layers converted_state_dict["norm_out.linear.weight"] = swap_scale_shift(checkpoint.pop("final_layer.mod.weight"), dim=0) converted_state_dict["norm_out.linear.bias"] = swap_scale_shift(checkpoint.pop("final_layer.mod.bias"), dim=0) converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") converted_state_dict["pos_frequencies"] = checkpoint.pop("pos_frequencies") return converted_state_dict def convert_hunyuan_video_transformer_to_diffusers(checkpoint, **kwargs): def remap_norm_scale_shift_(key, state_dict): weight = state_dict.pop(key) shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) state_dict[key.replace("final_layer.adaLN_modulation.1", "norm_out.linear")] = new_weight def remap_txt_in_(key, state_dict): def rename_key(key): new_key = key.replace("individual_token_refiner.blocks", "token_refiner.refiner_blocks") new_key = new_key.replace("adaLN_modulation.1", "norm_out.linear") new_key = new_key.replace("txt_in", "context_embedder") new_key = new_key.replace("t_embedder.mlp.0", "time_text_embed.timestep_embedder.linear_1") new_key = new_key.replace("t_embedder.mlp.2", "time_text_embed.timestep_embedder.linear_2") new_key = new_key.replace("c_embedder", "time_text_embed.text_embedder") new_key = new_key.replace("mlp", "ff") return new_key if "self_attn_qkv" in key: weight = state_dict.pop(key) to_q, to_k, to_v = weight.chunk(3, dim=0) state_dict[rename_key(key.replace("self_attn_qkv", "attn.to_q"))] = to_q state_dict[rename_key(key.replace("self_attn_qkv", "attn.to_k"))] = to_k state_dict[rename_key(key.replace("self_attn_qkv", "attn.to_v"))] = to_v else: state_dict[rename_key(key)] = state_dict.pop(key) def remap_img_attn_qkv_(key, state_dict): weight = state_dict.pop(key) to_q, to_k, to_v = weight.chunk(3, dim=0) state_dict[key.replace("img_attn_qkv", "attn.to_q")] = to_q state_dict[key.replace("img_attn_qkv", "attn.to_k")] = to_k state_dict[key.replace("img_attn_qkv", "attn.to_v")] = to_v def remap_txt_attn_qkv_(key, state_dict): weight = state_dict.pop(key) to_q, to_k, to_v = weight.chunk(3, dim=0) state_dict[key.replace("txt_attn_qkv", "attn.add_q_proj")] = to_q state_dict[key.replace("txt_attn_qkv", "attn.add_k_proj")] = to_k state_dict[key.replace("txt_attn_qkv", "attn.add_v_proj")] = to_v def remap_single_transformer_blocks_(key, state_dict): hidden_size = 3072 if "linear1.weight" in key: linear1_weight = state_dict.pop(key) split_size = (hidden_size, hidden_size, hidden_size, linear1_weight.size(0) - 3 * hidden_size) q, k, v, mlp = torch.split(linear1_weight, split_size, dim=0) new_key = key.replace("single_blocks", "single_transformer_blocks").removesuffix(".linear1.weight") state_dict[f"{new_key}.attn.to_q.weight"] = q state_dict[f"{new_key}.attn.to_k.weight"] = k state_dict[f"{new_key}.attn.to_v.weight"] = v state_dict[f"{new_key}.proj_mlp.weight"] = mlp elif "linear1.bias" in key: linear1_bias = state_dict.pop(key) split_size = (hidden_size, hidden_size, hidden_size, linear1_bias.size(0) - 3 * hidden_size) q_bias, k_bias, v_bias, mlp_bias = torch.split(linear1_bias, split_size, dim=0) new_key = key.replace("single_blocks", "single_transformer_blocks").removesuffix(".linear1.bias") state_dict[f"{new_key}.attn.to_q.bias"] = q_bias state_dict[f"{new_key}.attn.to_k.bias"] = k_bias state_dict[f"{new_key}.attn.to_v.bias"] = v_bias state_dict[f"{new_key}.proj_mlp.bias"] = mlp_bias else: new_key = key.replace("single_blocks", "single_transformer_blocks") new_key = new_key.replace("linear2", "proj_out") new_key = new_key.replace("q_norm", "attn.norm_q") new_key = new_key.replace("k_norm", "attn.norm_k") state_dict[new_key] = state_dict.pop(key) TRANSFORMER_KEYS_RENAME_DICT = { "img_in": "x_embedder", "time_in.mlp.0": "time_text_embed.timestep_embedder.linear_1", "time_in.mlp.2": "time_text_embed.timestep_embedder.linear_2", "guidance_in.mlp.0": "time_text_embed.guidance_embedder.linear_1", "guidance_in.mlp.2": "time_text_embed.guidance_embedder.linear_2", "vector_in.in_layer": "time_text_embed.text_embedder.linear_1", "vector_in.out_layer": "time_text_embed.text_embedder.linear_2", "double_blocks": "transformer_blocks", "img_attn_q_norm": "attn.norm_q", "img_attn_k_norm": "attn.norm_k", "img_attn_proj": "attn.to_out.0", "txt_attn_q_norm": "attn.norm_added_q", "txt_attn_k_norm": "attn.norm_added_k", "txt_attn_proj": "attn.to_add_out", "img_mod.linear": "norm1.linear", "img_norm1": "norm1.norm", "img_norm2": "norm2", "img_mlp": "ff", "txt_mod.linear": "norm1_context.linear", "txt_norm1": "norm1.norm", "txt_norm2": "norm2_context", "txt_mlp": "ff_context", "self_attn_proj": "attn.to_out.0", "modulation.linear": "norm.linear", "pre_norm": "norm.norm", "final_layer.norm_final": "norm_out.norm", "final_layer.linear": "proj_out", "fc1": "net.0.proj", "fc2": "net.2", "input_embedder": "proj_in", } TRANSFORMER_SPECIAL_KEYS_REMAP = { "txt_in": remap_txt_in_, "img_attn_qkv": remap_img_attn_qkv_, "txt_attn_qkv": remap_txt_attn_qkv_, "single_blocks": remap_single_transformer_blocks_, "final_layer.adaLN_modulation.1": remap_norm_scale_shift_, } def update_state_dict_(state_dict, old_key, new_key): state_dict[new_key] = state_dict.pop(old_key) for key in list(checkpoint.keys()): new_key = key[:] for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict_(checkpoint, key, new_key) for key in list(checkpoint.keys()): for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, checkpoint) return checkpoint def convert_auraflow_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} state_dict_keys = list(checkpoint.keys()) # Handle register tokens and positional embeddings converted_state_dict["register_tokens"] = checkpoint.pop("register_tokens", None) # Handle time step projection converted_state_dict["time_step_proj.linear_1.weight"] = checkpoint.pop("t_embedder.mlp.0.weight", None) converted_state_dict["time_step_proj.linear_1.bias"] = checkpoint.pop("t_embedder.mlp.0.bias", None) converted_state_dict["time_step_proj.linear_2.weight"] = checkpoint.pop("t_embedder.mlp.2.weight", None) converted_state_dict["time_step_proj.linear_2.bias"] = checkpoint.pop("t_embedder.mlp.2.bias", None) # Handle context embedder converted_state_dict["context_embedder.weight"] = checkpoint.pop("cond_seq_linear.weight", None) # Calculate the number of layers def calculate_layers(keys, key_prefix): layers = set() for k in keys: if key_prefix in k: layer_num = int(k.split(".")[1]) # get the layer number layers.add(layer_num) return len(layers) mmdit_layers = calculate_layers(state_dict_keys, key_prefix="double_layers") single_dit_layers = calculate_layers(state_dict_keys, key_prefix="single_layers") # MMDiT blocks for i in range(mmdit_layers): # Feed-forward path_mapping = {"mlpX": "ff", "mlpC": "ff_context"} weight_mapping = {"c_fc1": "linear_1", "c_fc2": "linear_2", "c_proj": "out_projection"} for orig_k, diffuser_k in path_mapping.items(): for k, v in weight_mapping.items(): converted_state_dict[f"joint_transformer_blocks.{i}.{diffuser_k}.{v}.weight"] = checkpoint.pop( f"double_layers.{i}.{orig_k}.{k}.weight", None ) # Norms path_mapping = {"modX": "norm1", "modC": "norm1_context"} for orig_k, diffuser_k in path_mapping.items(): converted_state_dict[f"joint_transformer_blocks.{i}.{diffuser_k}.linear.weight"] = checkpoint.pop( f"double_layers.{i}.{orig_k}.1.weight", None ) # Attentions x_attn_mapping = {"w2q": "to_q", "w2k": "to_k", "w2v": "to_v", "w2o": "to_out.0"} context_attn_mapping = {"w1q": "add_q_proj", "w1k": "add_k_proj", "w1v": "add_v_proj", "w1o": "to_add_out"} for attn_mapping in [x_attn_mapping, context_attn_mapping]: for k, v in attn_mapping.items(): converted_state_dict[f"joint_transformer_blocks.{i}.attn.{v}.weight"] = checkpoint.pop( f"double_layers.{i}.attn.{k}.weight", None ) # Single-DiT blocks for i in range(single_dit_layers): # Feed-forward mapping = {"c_fc1": "linear_1", "c_fc2": "linear_2", "c_proj": "out_projection"} for k, v in mapping.items(): converted_state_dict[f"single_transformer_blocks.{i}.ff.{v}.weight"] = checkpoint.pop( f"single_layers.{i}.mlp.{k}.weight", None ) # Norms converted_state_dict[f"single_transformer_blocks.{i}.norm1.linear.weight"] = checkpoint.pop( f"single_layers.{i}.modCX.1.weight", None ) # Attentions x_attn_mapping = {"w1q": "to_q", "w1k": "to_k", "w1v": "to_v", "w1o": "to_out.0"} for k, v in x_attn_mapping.items(): converted_state_dict[f"single_transformer_blocks.{i}.attn.{v}.weight"] = checkpoint.pop( f"single_layers.{i}.attn.{k}.weight", None ) # Final blocks converted_state_dict["proj_out.weight"] = checkpoint.pop("final_linear.weight", None) # Handle the final norm layer norm_weight = checkpoint.pop("modF.1.weight", None) if norm_weight is not None: converted_state_dict["norm_out.linear.weight"] = swap_scale_shift(norm_weight, dim=None) else: converted_state_dict["norm_out.linear.weight"] = None converted_state_dict["pos_embed.pos_embed"] = checkpoint.pop("positional_encoding") converted_state_dict["pos_embed.proj.weight"] = checkpoint.pop("init_x_linear.weight") converted_state_dict["pos_embed.proj.bias"] = checkpoint.pop("init_x_linear.bias") return converted_state_dict def convert_lumina2_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} # Original Lumina-Image-2 has an extra norm parameter that is unused # We just remove it here checkpoint.pop("norm_final.weight", None) # Comfy checkpoints add this prefix keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) LUMINA_KEY_MAP = { "cap_embedder": "time_caption_embed.caption_embedder", "t_embedder.mlp.0": "time_caption_embed.timestep_embedder.linear_1", "t_embedder.mlp.2": "time_caption_embed.timestep_embedder.linear_2", "attention": "attn", ".out.": ".to_out.0.", "k_norm": "norm_k", "q_norm": "norm_q", "w1": "linear_1", "w2": "linear_2", "w3": "linear_3", "adaLN_modulation.1": "norm1.linear", } ATTENTION_NORM_MAP = { "attention_norm1": "norm1.norm", "attention_norm2": "norm2", } CONTEXT_REFINER_MAP = { "context_refiner.0.attention_norm1": "context_refiner.0.norm1", "context_refiner.0.attention_norm2": "context_refiner.0.norm2", "context_refiner.1.attention_norm1": "context_refiner.1.norm1", "context_refiner.1.attention_norm2": "context_refiner.1.norm2", } FINAL_LAYER_MAP = { "final_layer.adaLN_modulation.1": "norm_out.linear_1", "final_layer.linear": "norm_out.linear_2", } def convert_lumina_attn_to_diffusers(tensor, diffusers_key): q_dim = 2304 k_dim = v_dim = 768 to_q, to_k, to_v = torch.split(tensor, [q_dim, k_dim, v_dim], dim=0) return { diffusers_key.replace("qkv", "to_q"): to_q, diffusers_key.replace("qkv", "to_k"): to_k, diffusers_key.replace("qkv", "to_v"): to_v, } for key in keys: diffusers_key = key for k, v in CONTEXT_REFINER_MAP.items(): diffusers_key = diffusers_key.replace(k, v) for k, v in FINAL_LAYER_MAP.items(): diffusers_key = diffusers_key.replace(k, v) for k, v in ATTENTION_NORM_MAP.items(): diffusers_key = diffusers_key.replace(k, v) for k, v in LUMINA_KEY_MAP.items(): diffusers_key = diffusers_key.replace(k, v) if "qkv" in diffusers_key: converted_state_dict.update(convert_lumina_attn_to_diffusers(checkpoint.pop(key), diffusers_key)) else: converted_state_dict[diffusers_key] = checkpoint.pop(key) return converted_state_dict def convert_sana_transformer_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "blocks" in k))[-1] + 1 # noqa: C401 # Positional and patch embeddings. checkpoint.pop("pos_embed") converted_state_dict["patch_embed.proj.weight"] = checkpoint.pop("x_embedder.proj.weight") converted_state_dict["patch_embed.proj.bias"] = checkpoint.pop("x_embedder.proj.bias") # Timestep embeddings. converted_state_dict["time_embed.emb.timestep_embedder.linear_1.weight"] = checkpoint.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_1.bias"] = checkpoint.pop("t_embedder.mlp.0.bias") converted_state_dict["time_embed.emb.timestep_embedder.linear_2.weight"] = checkpoint.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_2.bias"] = checkpoint.pop("t_embedder.mlp.2.bias") converted_state_dict["time_embed.linear.weight"] = checkpoint.pop("t_block.1.weight") converted_state_dict["time_embed.linear.bias"] = checkpoint.pop("t_block.1.bias") # Caption Projection. checkpoint.pop("y_embedder.y_embedding") converted_state_dict["caption_projection.linear_1.weight"] = checkpoint.pop("y_embedder.y_proj.fc1.weight") converted_state_dict["caption_projection.linear_1.bias"] = checkpoint.pop("y_embedder.y_proj.fc1.bias") converted_state_dict["caption_projection.linear_2.weight"] = checkpoint.pop("y_embedder.y_proj.fc2.weight") converted_state_dict["caption_projection.linear_2.bias"] = checkpoint.pop("y_embedder.y_proj.fc2.bias") converted_state_dict["caption_norm.weight"] = checkpoint.pop("attention_y_norm.weight") for i in range(num_layers): converted_state_dict[f"transformer_blocks.{i}.scale_shift_table"] = checkpoint.pop( f"blocks.{i}.scale_shift_table" ) # Self-Attention sample_q, sample_k, sample_v = torch.chunk(checkpoint.pop(f"blocks.{i}.attn.qkv.weight"), 3, dim=0) converted_state_dict[f"transformer_blocks.{i}.attn1.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"transformer_blocks.{i}.attn1.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"transformer_blocks.{i}.attn1.to_v.weight"] = torch.cat([sample_v]) # Output Projections converted_state_dict[f"transformer_blocks.{i}.attn1.to_out.0.weight"] = checkpoint.pop( f"blocks.{i}.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn1.to_out.0.bias"] = checkpoint.pop( f"blocks.{i}.attn.proj.bias" ) # Cross-Attention converted_state_dict[f"transformer_blocks.{i}.attn2.to_q.weight"] = checkpoint.pop( f"blocks.{i}.cross_attn.q_linear.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn2.to_q.bias"] = checkpoint.pop( f"blocks.{i}.cross_attn.q_linear.bias" ) linear_sample_k, linear_sample_v = torch.chunk( checkpoint.pop(f"blocks.{i}.cross_attn.kv_linear.weight"), 2, dim=0 ) linear_sample_k_bias, linear_sample_v_bias = torch.chunk( checkpoint.pop(f"blocks.{i}.cross_attn.kv_linear.bias"), 2, dim=0 ) converted_state_dict[f"transformer_blocks.{i}.attn2.to_k.weight"] = linear_sample_k converted_state_dict[f"transformer_blocks.{i}.attn2.to_v.weight"] = linear_sample_v converted_state_dict[f"transformer_blocks.{i}.attn2.to_k.bias"] = linear_sample_k_bias converted_state_dict[f"transformer_blocks.{i}.attn2.to_v.bias"] = linear_sample_v_bias # Output Projections converted_state_dict[f"transformer_blocks.{i}.attn2.to_out.0.weight"] = checkpoint.pop( f"blocks.{i}.cross_attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn2.to_out.0.bias"] = checkpoint.pop( f"blocks.{i}.cross_attn.proj.bias" ) # MLP converted_state_dict[f"transformer_blocks.{i}.ff.conv_inverted.weight"] = checkpoint.pop( f"blocks.{i}.mlp.inverted_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.conv_inverted.bias"] = checkpoint.pop( f"blocks.{i}.mlp.inverted_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff.conv_depth.weight"] = checkpoint.pop( f"blocks.{i}.mlp.depth_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.conv_depth.bias"] = checkpoint.pop( f"blocks.{i}.mlp.depth_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff.conv_point.weight"] = checkpoint.pop( f"blocks.{i}.mlp.point_conv.conv.weight" ) # Final layer converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") converted_state_dict["scale_shift_table"] = checkpoint.pop("final_layer.scale_shift_table") return converted_state_dict def convert_wan_transformer_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) TRANSFORMER_KEYS_RENAME_DICT = { "time_embedding.0": "condition_embedder.time_embedder.linear_1", "time_embedding.2": "condition_embedder.time_embedder.linear_2", "text_embedding.0": "condition_embedder.text_embedder.linear_1", "text_embedding.2": "condition_embedder.text_embedder.linear_2", "time_projection.1": "condition_embedder.time_proj", "cross_attn": "attn2", "self_attn": "attn1", ".o.": ".to_out.0.", ".q.": ".to_q.", ".k.": ".to_k.", ".v.": ".to_v.", ".k_img.": ".add_k_proj.", ".v_img.": ".add_v_proj.", ".norm_k_img.": ".norm_added_k.", "head.modulation": "scale_shift_table", "head.head": "proj_out", "modulation": "scale_shift_table", "ffn.0": "ffn.net.0.proj", "ffn.2": "ffn.net.2", # Hack to swap the layer names # The original model calls the norms in following order: norm1, norm3, norm2 # We convert it to: norm1, norm2, norm3 "norm2": "norm__placeholder", "norm3": "norm2", "norm__placeholder": "norm3", # For the I2V model "img_emb.proj.0": "condition_embedder.image_embedder.norm1", "img_emb.proj.1": "condition_embedder.image_embedder.ff.net.0.proj", "img_emb.proj.3": "condition_embedder.image_embedder.ff.net.2", "img_emb.proj.4": "condition_embedder.image_embedder.norm2", # For the VACE model "before_proj": "proj_in", "after_proj": "proj_out", } for key in list(checkpoint.keys()): new_key = key[:] for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) converted_state_dict[new_key] = checkpoint.pop(key) return converted_state_dict def convert_wan_vae_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} # Create mappings for specific components middle_key_mapping = { # Encoder middle block "encoder.middle.0.residual.0.gamma": "encoder.mid_block.resnets.0.norm1.gamma", "encoder.middle.0.residual.2.bias": "encoder.mid_block.resnets.0.conv1.bias", "encoder.middle.0.residual.2.weight": "encoder.mid_block.resnets.0.conv1.weight", "encoder.middle.0.residual.3.gamma": "encoder.mid_block.resnets.0.norm2.gamma", "encoder.middle.0.residual.6.bias": "encoder.mid_block.resnets.0.conv2.bias", "encoder.middle.0.residual.6.weight": "encoder.mid_block.resnets.0.conv2.weight", "encoder.middle.2.residual.0.gamma": "encoder.mid_block.resnets.1.norm1.gamma", "encoder.middle.2.residual.2.bias": "encoder.mid_block.resnets.1.conv1.bias", "encoder.middle.2.residual.2.weight": "encoder.mid_block.resnets.1.conv1.weight", "encoder.middle.2.residual.3.gamma": "encoder.mid_block.resnets.1.norm2.gamma", "encoder.middle.2.residual.6.bias": "encoder.mid_block.resnets.1.conv2.bias", "encoder.middle.2.residual.6.weight": "encoder.mid_block.resnets.1.conv2.weight", # Decoder middle block "decoder.middle.0.residual.0.gamma": "decoder.mid_block.resnets.0.norm1.gamma", "decoder.middle.0.residual.2.bias": "decoder.mid_block.resnets.0.conv1.bias", "decoder.middle.0.residual.2.weight": "decoder.mid_block.resnets.0.conv1.weight", "decoder.middle.0.residual.3.gamma": "decoder.mid_block.resnets.0.norm2.gamma", "decoder.middle.0.residual.6.bias": "decoder.mid_block.resnets.0.conv2.bias", "decoder.middle.0.residual.6.weight": "decoder.mid_block.resnets.0.conv2.weight", "decoder.middle.2.residual.0.gamma": "decoder.mid_block.resnets.1.norm1.gamma", "decoder.middle.2.residual.2.bias": "decoder.mid_block.resnets.1.conv1.bias", "decoder.middle.2.residual.2.weight": "decoder.mid_block.resnets.1.conv1.weight", "decoder.middle.2.residual.3.gamma": "decoder.mid_block.resnets.1.norm2.gamma", "decoder.middle.2.residual.6.bias": "decoder.mid_block.resnets.1.conv2.bias", "decoder.middle.2.residual.6.weight": "decoder.mid_block.resnets.1.conv2.weight", } # Create a mapping for attention blocks attention_mapping = { # Encoder middle attention "encoder.middle.1.norm.gamma": "encoder.mid_block.attentions.0.norm.gamma", "encoder.middle.1.to_qkv.weight": "encoder.mid_block.attentions.0.to_qkv.weight", "encoder.middle.1.to_qkv.bias": "encoder.mid_block.attentions.0.to_qkv.bias", "encoder.middle.1.proj.weight": "encoder.mid_block.attentions.0.proj.weight", "encoder.middle.1.proj.bias": "encoder.mid_block.attentions.0.proj.bias", # Decoder middle attention "decoder.middle.1.norm.gamma": "decoder.mid_block.attentions.0.norm.gamma", "decoder.middle.1.to_qkv.weight": "decoder.mid_block.attentions.0.to_qkv.weight", "decoder.middle.1.to_qkv.bias": "decoder.mid_block.attentions.0.to_qkv.bias", "decoder.middle.1.proj.weight": "decoder.mid_block.attentions.0.proj.weight", "decoder.middle.1.proj.bias": "decoder.mid_block.attentions.0.proj.bias", } # Create a mapping for the head components head_mapping = { # Encoder head "encoder.head.0.gamma": "encoder.norm_out.gamma", "encoder.head.2.bias": "encoder.conv_out.bias", "encoder.head.2.weight": "encoder.conv_out.weight", # Decoder head "decoder.head.0.gamma": "decoder.norm_out.gamma", "decoder.head.2.bias": "decoder.conv_out.bias", "decoder.head.2.weight": "decoder.conv_out.weight", } # Create a mapping for the quant components quant_mapping = { "conv1.weight": "quant_conv.weight", "conv1.bias": "quant_conv.bias", "conv2.weight": "post_quant_conv.weight", "conv2.bias": "post_quant_conv.bias", } # Process each key in the state dict for key, value in checkpoint.items(): # Handle middle block keys using the mapping if key in middle_key_mapping: new_key = middle_key_mapping[key] converted_state_dict[new_key] = value # Handle attention blocks using the mapping elif key in attention_mapping: new_key = attention_mapping[key] converted_state_dict[new_key] = value # Handle head keys using the mapping elif key in head_mapping: new_key = head_mapping[key] converted_state_dict[new_key] = value # Handle quant keys using the mapping elif key in quant_mapping: new_key = quant_mapping[key] converted_state_dict[new_key] = value # Handle encoder conv1 elif key == "encoder.conv1.weight": converted_state_dict["encoder.conv_in.weight"] = value elif key == "encoder.conv1.bias": converted_state_dict["encoder.conv_in.bias"] = value # Handle decoder conv1 elif key == "decoder.conv1.weight": converted_state_dict["decoder.conv_in.weight"] = value elif key == "decoder.conv1.bias": converted_state_dict["decoder.conv_in.bias"] = value # Handle encoder downsamples elif key.startswith("encoder.downsamples."): # Convert to down_blocks new_key = key.replace("encoder.downsamples.", "encoder.down_blocks.") # Convert residual block naming but keep the original structure if ".residual.0.gamma" in new_key: new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma") elif ".residual.2.bias" in new_key: new_key = new_key.replace(".residual.2.bias", ".conv1.bias") elif ".residual.2.weight" in new_key: new_key = new_key.replace(".residual.2.weight", ".conv1.weight") elif ".residual.3.gamma" in new_key: new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma") elif ".residual.6.bias" in new_key: new_key = new_key.replace(".residual.6.bias", ".conv2.bias") elif ".residual.6.weight" in new_key: new_key = new_key.replace(".residual.6.weight", ".conv2.weight") elif ".shortcut.bias" in new_key: new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias") elif ".shortcut.weight" in new_key: new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight") converted_state_dict[new_key] = value # Handle decoder upsamples elif key.startswith("decoder.upsamples."): # Convert to up_blocks parts = key.split(".") block_idx = int(parts[2]) # Group residual blocks if "residual" in key: if block_idx in [0, 1, 2]: new_block_idx = 0 resnet_idx = block_idx elif block_idx in [4, 5, 6]: new_block_idx = 1 resnet_idx = block_idx - 4 elif block_idx in [8, 9, 10]: new_block_idx = 2 resnet_idx = block_idx - 8 elif block_idx in [12, 13, 14]: new_block_idx = 3 resnet_idx = block_idx - 12 else: # Keep as is for other blocks converted_state_dict[key] = value continue # Convert residual block naming if ".residual.0.gamma" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm1.gamma" elif ".residual.2.bias" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.bias" elif ".residual.2.weight" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.weight" elif ".residual.3.gamma" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm2.gamma" elif ".residual.6.bias" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.bias" elif ".residual.6.weight" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.weight" else: new_key = key converted_state_dict[new_key] = value # Handle shortcut connections elif ".shortcut." in key: if block_idx == 4: new_key = key.replace(".shortcut.", ".resnets.0.conv_shortcut.") new_key = new_key.replace("decoder.upsamples.4", "decoder.up_blocks.1") else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") new_key = new_key.replace(".shortcut.", ".conv_shortcut.") converted_state_dict[new_key] = value # Handle upsamplers elif ".resample." in key or ".time_conv." in key: if block_idx == 3: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.0.upsamplers.0") elif block_idx == 7: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.1.upsamplers.0") elif block_idx == 11: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.2.upsamplers.0") else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") converted_state_dict[new_key] = value else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") converted_state_dict[new_key] = value else: # Keep other keys unchanged converted_state_dict[key] = value return converted_state_dict def convert_hidream_transformer_to_diffusers(checkpoint, **kwargs): keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) return checkpoint def convert_chroma_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} keys = list(checkpoint.keys()) for k in keys: if "model.diffusion_model." in k: checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "double_blocks." in k))[-1] + 1 # noqa: C401 num_single_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "single_blocks." in k))[-1] + 1 # noqa: C401 num_guidance_layers = ( list(set(int(k.split(".", 3)[2]) for k in checkpoint if "distilled_guidance_layer.layers." in k))[-1] + 1 # noqa: C401 ) mlp_ratio = 4.0 inner_dim = 3072 # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation def swap_scale_shift(weight): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight # guidance converted_state_dict["distilled_guidance_layer.in_proj.bias"] = checkpoint.pop( "distilled_guidance_layer.in_proj.bias" ) converted_state_dict["distilled_guidance_layer.in_proj.weight"] = checkpoint.pop( "distilled_guidance_layer.in_proj.weight" ) converted_state_dict["distilled_guidance_layer.out_proj.bias"] = checkpoint.pop( "distilled_guidance_layer.out_proj.bias" ) converted_state_dict["distilled_guidance_layer.out_proj.weight"] = checkpoint.pop( "distilled_guidance_layer.out_proj.weight" ) for i in range(num_guidance_layers): block_prefix = f"distilled_guidance_layer.layers.{i}." converted_state_dict[f"{block_prefix}linear_1.bias"] = checkpoint.pop( f"distilled_guidance_layer.layers.{i}.in_layer.bias" ) converted_state_dict[f"{block_prefix}linear_1.weight"] = checkpoint.pop( f"distilled_guidance_layer.layers.{i}.in_layer.weight" ) converted_state_dict[f"{block_prefix}linear_2.bias"] = checkpoint.pop( f"distilled_guidance_layer.layers.{i}.out_layer.bias" ) converted_state_dict[f"{block_prefix}linear_2.weight"] = checkpoint.pop( f"distilled_guidance_layer.layers.{i}.out_layer.weight" ) converted_state_dict[f"distilled_guidance_layer.norms.{i}.weight"] = checkpoint.pop( f"distilled_guidance_layer.norms.{i}.scale" ) # context_embedder converted_state_dict["context_embedder.weight"] = checkpoint.pop("txt_in.weight") converted_state_dict["context_embedder.bias"] = checkpoint.pop("txt_in.bias") # x_embedder converted_state_dict["x_embedder.weight"] = checkpoint.pop("img_in.weight") converted_state_dict["x_embedder.bias"] = checkpoint.pop("img_in.bias") # double transformer blocks for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." # Q, K, V sample_q, sample_k, sample_v = torch.chunk(checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.weight"), 3, dim=0) context_q, context_k, context_v = torch.chunk( checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.weight"), 3, dim=0 ) sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.bias"), 3, dim=0 ) context_q_bias, context_k_bias, context_v_bias = torch.chunk( checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.bias"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([sample_v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([sample_v_bias]) converted_state_dict[f"{block_prefix}attn.add_q_proj.weight"] = torch.cat([context_q]) converted_state_dict[f"{block_prefix}attn.add_q_proj.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"{block_prefix}attn.add_k_proj.weight"] = torch.cat([context_k]) converted_state_dict[f"{block_prefix}attn.add_k_proj.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"{block_prefix}attn.add_v_proj.weight"] = torch.cat([context_v]) converted_state_dict[f"{block_prefix}attn.add_v_proj.bias"] = torch.cat([context_v_bias]) # qk_norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( f"double_blocks.{i}.img_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( f"double_blocks.{i}.img_attn.norm.key_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_q.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_k.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.norm.key_norm.scale" ) # ff img_mlp converted_state_dict[f"{block_prefix}ff.net.0.proj.weight"] = checkpoint.pop( f"double_blocks.{i}.img_mlp.0.weight" ) converted_state_dict[f"{block_prefix}ff.net.0.proj.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.0.bias") converted_state_dict[f"{block_prefix}ff.net.2.weight"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.weight") converted_state_dict[f"{block_prefix}ff.net.2.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.bias") converted_state_dict[f"{block_prefix}ff_context.net.0.proj.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.0.weight" ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.bias"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.0.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.2.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.2.weight" ) converted_state_dict[f"{block_prefix}ff_context.net.2.bias"] = checkpoint.pop( f"double_blocks.{i}.txt_mlp.2.bias" ) # output projections. converted_state_dict[f"{block_prefix}attn.to_out.0.weight"] = checkpoint.pop( f"double_blocks.{i}.img_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_out.0.bias"] = checkpoint.pop( f"double_blocks.{i}.img_attn.proj.bias" ) converted_state_dict[f"{block_prefix}attn.to_add_out.weight"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_add_out.bias"] = checkpoint.pop( f"double_blocks.{i}.txt_attn.proj.bias" ) # single transformer blocks for i in range(num_single_layers): block_prefix = f"single_transformer_blocks.{i}." # Q, K, V, mlp mlp_hidden_dim = int(inner_dim * mlp_ratio) split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) q, k, v, mlp = torch.split(checkpoint.pop(f"single_blocks.{i}.linear1.weight"), split_size, dim=0) q_bias, k_bias, v_bias, mlp_bias = torch.split( checkpoint.pop(f"single_blocks.{i}.linear1.bias"), split_size, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([v_bias]) converted_state_dict[f"{block_prefix}proj_mlp.weight"] = torch.cat([mlp]) converted_state_dict[f"{block_prefix}proj_mlp.bias"] = torch.cat([mlp_bias]) # qk norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( f"single_blocks.{i}.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( f"single_blocks.{i}.norm.key_norm.scale" ) # output projections. converted_state_dict[f"{block_prefix}proj_out.weight"] = checkpoint.pop(f"single_blocks.{i}.linear2.weight") converted_state_dict[f"{block_prefix}proj_out.bias"] = checkpoint.pop(f"single_blocks.{i}.linear2.bias") converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") return converted_state_dict def convert_cosmos_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {key: checkpoint.pop(key) for key in list(checkpoint.keys())} def remove_keys_(key: str, state_dict): state_dict.pop(key) def rename_transformer_blocks_(key: str, state_dict): block_index = int(key.split(".")[1].removeprefix("block")) new_key = key old_prefix = f"blocks.block{block_index}" new_prefix = f"transformer_blocks.{block_index}" new_key = new_prefix + new_key.removeprefix(old_prefix) state_dict[new_key] = state_dict.pop(key) TRANSFORMER_KEYS_RENAME_DICT_COSMOS_1_0 = { "t_embedder.1": "time_embed.t_embedder", "affline_norm": "time_embed.norm", ".blocks.0.block.attn": ".attn1", ".blocks.1.block.attn": ".attn2", ".blocks.2.block": ".ff", ".blocks.0.adaLN_modulation.1": ".norm1.linear_1", ".blocks.0.adaLN_modulation.2": ".norm1.linear_2", ".blocks.1.adaLN_modulation.1": ".norm2.linear_1", ".blocks.1.adaLN_modulation.2": ".norm2.linear_2", ".blocks.2.adaLN_modulation.1": ".norm3.linear_1", ".blocks.2.adaLN_modulation.2": ".norm3.linear_2", "to_q.0": "to_q", "to_q.1": "norm_q", "to_k.0": "to_k", "to_k.1": "norm_k", "to_v.0": "to_v", "layer1": "net.0.proj", "layer2": "net.2", "proj.1": "proj", "x_embedder": "patch_embed", "extra_pos_embedder": "learnable_pos_embed", "final_layer.adaLN_modulation.1": "norm_out.linear_1", "final_layer.adaLN_modulation.2": "norm_out.linear_2", "final_layer.linear": "proj_out", } TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_1_0 = { "blocks.block": rename_transformer_blocks_, "logvar.0.freqs": remove_keys_, "logvar.0.phases": remove_keys_, "logvar.1.weight": remove_keys_, "pos_embedder.seq": remove_keys_, } TRANSFORMER_KEYS_RENAME_DICT_COSMOS_2_0 = { "t_embedder.1": "time_embed.t_embedder", "t_embedding_norm": "time_embed.norm", "blocks": "transformer_blocks", "adaln_modulation_self_attn.1": "norm1.linear_1", "adaln_modulation_self_attn.2": "norm1.linear_2", "adaln_modulation_cross_attn.1": "norm2.linear_1", "adaln_modulation_cross_attn.2": "norm2.linear_2", "adaln_modulation_mlp.1": "norm3.linear_1", "adaln_modulation_mlp.2": "norm3.linear_2", "self_attn": "attn1", "cross_attn": "attn2", "q_proj": "to_q", "k_proj": "to_k", "v_proj": "to_v", "output_proj": "to_out.0", "q_norm": "norm_q", "k_norm": "norm_k", "mlp.layer1": "ff.net.0.proj", "mlp.layer2": "ff.net.2", "x_embedder.proj.1": "patch_embed.proj", "final_layer.adaln_modulation.1": "norm_out.linear_1", "final_layer.adaln_modulation.2": "norm_out.linear_2", "final_layer.linear": "proj_out", } TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_2_0 = { "accum_video_sample_counter": remove_keys_, "accum_image_sample_counter": remove_keys_, "accum_iteration": remove_keys_, "accum_train_in_hours": remove_keys_, "pos_embedder.seq": remove_keys_, "pos_embedder.dim_spatial_range": remove_keys_, "pos_embedder.dim_temporal_range": remove_keys_, "_extra_state": remove_keys_, } PREFIX_KEY = "net." if "net.blocks.block1.blocks.0.block.attn.to_q.0.weight" in checkpoint: TRANSFORMER_KEYS_RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT_COSMOS_1_0 TRANSFORMER_SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_1_0 else: TRANSFORMER_KEYS_RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT_COSMOS_2_0 TRANSFORMER_SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_2_0 state_dict_keys = list(converted_state_dict.keys()) for key in state_dict_keys: new_key = key[:] if new_key.startswith(PREFIX_KEY): new_key = new_key.removeprefix(PREFIX_KEY) for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) converted_state_dict[new_key] = converted_state_dict.pop(key) state_dict_keys = list(converted_state_dict.keys()) for key in state_dict_keys: for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, converted_state_dict) return converted_state_dict
diffusers/src/diffusers/loaders/single_file_utils.py/0
{ "file_path": "diffusers/src/diffusers/loaders/single_file_utils.py", "repo_id": "diffusers", "token_count": 76480 }
144
from .autoencoder_asym_kl import AsymmetricAutoencoderKL from .autoencoder_dc import AutoencoderDC from .autoencoder_kl import AutoencoderKL from .autoencoder_kl_allegro import AutoencoderKLAllegro from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX from .autoencoder_kl_cosmos import AutoencoderKLCosmos from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo from .autoencoder_kl_ltx import AutoencoderKLLTXVideo from .autoencoder_kl_magvit import AutoencoderKLMagvit from .autoencoder_kl_mochi import AutoencoderKLMochi from .autoencoder_kl_qwenimage import AutoencoderKLQwenImage from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder from .autoencoder_kl_wan import AutoencoderKLWan from .autoencoder_oobleck import AutoencoderOobleck from .autoencoder_tiny import AutoencoderTiny from .consistency_decoder_vae import ConsistencyDecoderVAE from .vq_model import VQModel
diffusers/src/diffusers/models/autoencoders/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/__init__.py", "repo_id": "diffusers", "token_count": 347 }
145
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...schedulers import ConsistencyDecoderScheduler from ...utils import BaseOutput from ...utils.accelerate_utils import apply_forward_hook from ...utils.torch_utils import randn_tensor from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..modeling_utils import ModelMixin from ..unets.unet_2d import UNet2DModel from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class ConsistencyDecoderVAEOutput(BaseOutput): """ Output of encoding method. Args: latent_dist (`DiagonalGaussianDistribution`): Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`. `DiagonalGaussianDistribution` allows for sampling latents from the distribution. """ latent_dist: "DiagonalGaussianDistribution" class ConsistencyDecoderVAE(ModelMixin, ConfigMixin): r""" The consistency decoder used with DALL-E 3. Examples: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE >>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16) >>> pipe = StableDiffusionPipeline.from_pretrained( ... "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16 ... ).to("cuda") >>> image = pipe("horse", generator=torch.manual_seed(0)).images[0] >>> image ``` """ _supports_group_offloading = False @register_to_config def __init__( self, scaling_factor: float = 0.18215, latent_channels: int = 4, sample_size: int = 32, encoder_act_fn: str = "silu", encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512), encoder_double_z: bool = True, encoder_down_block_types: Tuple[str, ...] = ( "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ), encoder_in_channels: int = 3, encoder_layers_per_block: int = 2, encoder_norm_num_groups: int = 32, encoder_out_channels: int = 4, decoder_add_attention: bool = False, decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024), decoder_down_block_types: Tuple[str, ...] = ( "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", ), decoder_downsample_padding: int = 1, decoder_in_channels: int = 7, decoder_layers_per_block: int = 3, decoder_norm_eps: float = 1e-05, decoder_norm_num_groups: int = 32, decoder_num_train_timesteps: int = 1024, decoder_out_channels: int = 6, decoder_resnet_time_scale_shift: str = "scale_shift", decoder_time_embedding_type: str = "learned", decoder_up_block_types: Tuple[str, ...] = ( "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ), ): super().__init__() self.encoder = Encoder( act_fn=encoder_act_fn, block_out_channels=encoder_block_out_channels, double_z=encoder_double_z, down_block_types=encoder_down_block_types, in_channels=encoder_in_channels, layers_per_block=encoder_layers_per_block, norm_num_groups=encoder_norm_num_groups, out_channels=encoder_out_channels, ) self.decoder_unet = UNet2DModel( add_attention=decoder_add_attention, block_out_channels=decoder_block_out_channels, down_block_types=decoder_down_block_types, downsample_padding=decoder_downsample_padding, in_channels=decoder_in_channels, layers_per_block=decoder_layers_per_block, norm_eps=decoder_norm_eps, norm_num_groups=decoder_norm_num_groups, num_train_timesteps=decoder_num_train_timesteps, out_channels=decoder_out_channels, resnet_time_scale_shift=decoder_resnet_time_scale_shift, time_embedding_type=decoder_time_embedding_type, up_block_types=decoder_up_block_types, ) self.decoder_scheduler = ConsistencyDecoderScheduler() self.register_to_config(block_out_channels=encoder_block_out_channels) self.register_to_config(force_upcast=False) self.register_buffer( "means", torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None], persistent=False, ) self.register_buffer( "stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) self.use_slicing = False self.use_tiling = False # only relevant if vae tiling is enabled self.tile_sample_min_size = self.config.sample_size sample_size = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size ) self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) self.tile_overlap_factor = 0.25 # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling def enable_tiling(self, use_tiling: bool = True): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.use_tiling = use_tiling # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling def disable_tiling(self): r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.enable_tiling(False) # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing def enable_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing def disable_slicing(self): r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(x, return_dict=return_dict) if self.use_slicing and x.shape[0] > 1: encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return ConsistencyDecoderVAEOutput(latent_dist=posterior) @apply_forward_hook def decode( self, z: torch.Tensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, num_inference_steps: int = 2, ) -> Union[DecoderOutput, Tuple[torch.Tensor]]: """ Decodes the input latent vector `z` using the consistency decoder VAE model. Args: z (torch.Tensor): The input latent vector. generator (Optional[torch.Generator]): The random number generator. Default is None. return_dict (bool): Whether to return the output as a dictionary. Default is True. num_inference_steps (int): The number of inference steps. Default is 2. Returns: Union[DecoderOutput, Tuple[torch.Tensor]]: The decoded output. """ z = (z * self.config.scaling_factor - self.means) / self.stds scale_factor = 2 ** (len(self.config.block_out_channels) - 1) z = F.interpolate(z, mode="nearest", scale_factor=scale_factor) batch_size, _, height, width = z.shape self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device) x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor( (batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device ) for t in self.decoder_scheduler.timesteps: model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1) model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :] prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample x_t = prev_sample x_0 = x_t if not return_dict: return (x_0,) return DecoderOutput(sample=x_0) # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[2], b.shape[2], blend_extent) for y in range(blend_extent): b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for x in range(blend_extent): b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> Union[ConsistencyDecoderVAEOutput, Tuple]: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] instead of a plain tuple. Returns: [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`: If return_dict is True, a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a plain `tuple` is returned. """ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) moments = torch.cat(result_rows, dim=2) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return ConsistencyDecoderVAEOutput(latent_dist=posterior) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[DecoderOutput, Tuple[torch.Tensor]]: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. generator (`torch.Generator`, *optional*, defaults to `None`): Generator to use for sampling. Returns: [`DecoderOutput`] or `tuple`: If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, generator=generator).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
diffusers/src/diffusers/models/autoencoders/consistency_decoder_vae.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/consistency_decoder_vae.py", "repo_id": "diffusers", "token_count": 8609 }
146
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin from ...utils import BaseOutput, logging from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..unets.unet_2d_blocks import UNetMidBlock2DCrossAttn from ..unets.unet_2d_condition import UNet2DConditionModel from ..unets.unet_motion_model import CrossAttnDownBlockMotion, DownBlockMotion logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class SparseControlNetOutput(BaseOutput): """ The output of [`SparseControlNetModel`]. Args: down_block_res_samples (`tuple[torch.Tensor]`): A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be used to condition the original UNet's downsampling activations. mid_down_block_re_sample (`torch.Tensor`): The activation of the middle block (the lowest sample resolution). Each tensor should be of shape `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. Output can be used to condition the original UNet's middle block activation. """ down_block_res_samples: Tuple[torch.Tensor] mid_block_res_sample: torch.Tensor class SparseControlNetConditioningEmbedding(nn.Module): def __init__( self, conditioning_embedding_channels: int, conditioning_channels: int = 3, block_out_channels: Tuple[int, ...] = (16, 32, 96, 256), ): super().__init__() self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) self.blocks = nn.ModuleList([]) for i in range(len(block_out_channels) - 1): channel_in = block_out_channels[i] channel_out = block_out_channels[i + 1] self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) self.conv_out = zero_module( nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) ) def forward(self, conditioning: torch.Tensor) -> torch.Tensor: embedding = self.conv_in(conditioning) embedding = F.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = F.silu(embedding) embedding = self.conv_out(embedding) return embedding class SparseControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): """ A SparseControlNet model as described in [SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion Models](https://huggingface.co/papers/2311.16933). Args: in_channels (`int`, defaults to 4): The number of channels in the input sample. conditioning_channels (`int`, defaults to 4): The number of input channels in the controlnet conditional embedding module. If `concat_condition_embedding` is True, the value provided here is incremented by 1. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, defaults to 2): The number of layers per block. downsample_padding (`int`, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, defaults to 1): The scale factor to use for the mid block. act_fn (`str`, defaults to "silu"): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If None, normalization and activation layers is skipped in post-processing. norm_eps (`float`, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. transformer_layers_per_mid_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer layers to use in each layer in the middle block. attention_head_dim (`int` or `Tuple[int]`, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of heads to use for multi-head attention. use_linear_projection (`bool`, defaults to `False`): upcast_attention (`bool`, defaults to `False`): resnet_time_scale_shift (`str`, defaults to `"default"`): Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. conditioning_embedding_out_channels (`Tuple[int]`, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. global_pool_conditions (`bool`, defaults to `False`): TODO(Patrick) - unused parameter controlnet_conditioning_channel_order (`str`, defaults to `rgb`): motion_max_seq_length (`int`, defaults to `32`): The maximum sequence length to use in the motion module. motion_num_attention_heads (`int` or `Tuple[int]`, defaults to `8`): The number of heads to use in each attention layer of the motion module. concat_conditioning_mask (`bool`, defaults to `True`): use_simplified_condition_embedding (`bool`, defaults to `True`): """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 4, conditioning_channels: int = 4, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "DownBlockMotion", ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 768, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]] = None, temporal_transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, controlnet_conditioning_channel_order: str = "rgb", motion_max_seq_length: int = 32, motion_num_attention_heads: int = 8, concat_conditioning_mask: bool = True, use_simplified_condition_embedding: bool = True, ): super().__init__() self.use_simplified_condition_embedding = use_simplified_condition_embedding # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = [temporal_transformer_layers_per_block] * len(down_block_types) # input conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) if concat_conditioning_mask: conditioning_channels = conditioning_channels + 1 self.concat_conditioning_mask = concat_conditioning_mask # control net conditioning embedding if use_simplified_condition_embedding: self.controlnet_cond_embedding = zero_module( nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) ) else: self.controlnet_cond_embedding = SparseControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels, ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, ) self.down_blocks = nn.ModuleList([]) self.controlnet_down_blocks = nn.ModuleList([]) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(motion_num_attention_heads, int): motion_num_attention_heads = (motion_num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == "CrossAttnDownBlockMotion": down_block = CrossAttnDownBlockMotion( in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, dropout=0, num_layers=layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, num_attention_heads=num_attention_heads[i], cross_attention_dim=cross_attention_dim[i], add_downsample=not is_final_block, dual_cross_attention=False, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], temporal_double_self_attention=False, ) elif down_block_type == "DownBlockMotion": down_block = DownBlockMotion( in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, dropout=0, num_layers=layers_per_block, resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, add_downsample=not is_final_block, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], temporal_double_self_attention=False, ) else: raise ValueError( "Invalid `block_type` encountered. Must be one of `CrossAttnDownBlockMotion` or `DownBlockMotion`" ) self.down_blocks.append(down_block) for _ in range(layers_per_block): controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) # mid mid_block_channels = block_out_channels[-1] controlnet_block = nn.Conv2d(mid_block_channels, mid_block_channels, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_mid_block = controlnet_block if transformer_layers_per_mid_block is None: transformer_layers_per_mid_block = ( transformer_layers_per_block[-1] if isinstance(transformer_layers_per_block[-1], int) else 1 ) self.mid_block = UNetMidBlock2DCrossAttn( in_channels=mid_block_channels, temb_channels=time_embed_dim, dropout=0, num_layers=1, transformer_layers_per_block=transformer_layers_per_mid_block, resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, num_attention_heads=num_attention_heads[-1], output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], dual_cross_attention=False, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type="default", ) @classmethod def from_unet( cls, unet: UNet2DConditionModel, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), load_weights_from_unet: bool = True, conditioning_channels: int = 3, ) -> "SparseControlNetModel": r""" Instantiate a [`SparseControlNetModel`] from [`UNet2DConditionModel`]. Parameters: unet (`UNet2DConditionModel`): The UNet model weights to copy to the [`SparseControlNetModel`]. All configuration options are also copied where applicable. """ transformer_layers_per_block = ( unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 ) down_block_types = unet.config.down_block_types for i in range(len(down_block_types)): if "CrossAttn" in down_block_types[i]: down_block_types[i] = "CrossAttnDownBlockMotion" elif "Down" in down_block_types[i]: down_block_types[i] = "DownBlockMotion" else: raise ValueError("Invalid `block_type` encountered. Must be a cross-attention or down block") controlnet = cls( in_channels=unet.config.in_channels, conditioning_channels=conditioning_channels, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, downsample_padding=unet.config.downsample_padding, mid_block_scale_factor=unet.config.mid_block_scale_factor, act_fn=unet.config.act_fn, norm_num_groups=unet.config.norm_num_groups, norm_eps=unet.config.norm_eps, cross_attention_dim=unet.config.cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, attention_head_dim=unet.config.attention_head_dim, num_attention_heads=unet.config.num_attention_heads, use_linear_projection=unet.config.use_linear_projection, upcast_attention=unet.config.upcast_attention, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, conditioning_embedding_out_channels=conditioning_embedding_out_channels, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, ) if load_weights_from_unet: controlnet.conv_in.load_state_dict(unet.conv_in.state_dict(), strict=False) controlnet.time_proj.load_state_dict(unet.time_proj.state_dict(), strict=False) controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict(), strict=False) controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict(), strict=False) controlnet.mid_block.load_state_dict(unet.mid_block.state_dict(), strict=False) return controlnet @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, conditioning_scale: float = 1.0, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, conditioning_mask: Optional[torch.Tensor] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[SparseControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: """ The [`SparseControlNetModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. guess_mode (`bool`, defaults to `False`): In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ sample_batch_size, sample_channels, sample_num_frames, sample_height, sample_width = sample.shape sample = torch.zeros_like(sample) # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... elif channel_order == "bgr": controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" is_npu = sample.device.type == "npu" if isinstance(timestep, float): dtype = torch.float32 if (is_mps or is_npu) else torch.float64 else: dtype = torch.int32 if (is_mps or is_npu) else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) emb = emb.repeat_interleave(sample_num_frames, dim=0, output_size=emb.shape[0] * sample_num_frames) # 2. pre-process batch_size, channels, num_frames, height, width = sample.shape sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) sample = self.conv_in(sample) batch_frames, channels, height, width = sample.shape sample = sample[:, None].reshape(sample_batch_size, sample_num_frames, channels, height, width) if self.concat_conditioning_mask: controlnet_cond = torch.cat([controlnet_cond, conditioning_mask], dim=1) batch_size, channels, num_frames, height, width = controlnet_cond.shape controlnet_cond = controlnet_cond.permute(0, 2, 1, 3, 4).reshape( batch_size * num_frames, channels, height, width ) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) batch_frames, channels, height, width = controlnet_cond.shape controlnet_cond = controlnet_cond[:, None].reshape(batch_size, num_frames, channels, height, width) sample = sample + controlnet_cond batch_size, num_frames, channels, height, width = sample.shape sample = sample.reshape(sample_batch_size * sample_num_frames, channels, height, width) # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = self.mid_block(sample, emb) # 5. Control net blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling if guess_mode and not self.config.global_pool_conditions: scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return SparseControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample ) # Copied from diffusers.models.controlnets.controlnet.zero_module def zero_module(module: nn.Module) -> nn.Module: for p in module.parameters(): nn.init.zeros_(p) return module
diffusers/src/diffusers/models/controlnets/controlnet_sparsectrl.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnets/controlnet_sparsectrl.py", "repo_id": "diffusers", "token_count": 16706 }
147
# Copyright 2025 The HuggingFace Team. All rights reserved. # `TemporalConvLayer` Copyright 2025 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from ..utils import deprecate from .activations import get_activation from .attention_processor import SpatialNorm from .downsampling import ( # noqa Downsample1D, Downsample2D, FirDownsample2D, KDownsample2D, downsample_2d, ) from .normalization import AdaGroupNorm from .upsampling import ( # noqa FirUpsample2D, KUpsample2D, Upsample1D, Upsample2D, upfirdn2d_native, upsample_2d, ) class ResnetBlockCondNorm2D(nn.Module): r""" A Resnet block that use normalization layer that incorporate conditioning information. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. groups_out (`int`, *optional*, default to None): The number of groups to use for the second normalization layer. if set to None, same as `groups`. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. time_embedding_norm (`str`, *optional*, default to `"ada_group"` ): The normalization layer for time embedding `temb`. Currently only support "ada_group" or "spatial". kernel (`torch.Tensor`, optional, default to None): FIR filter, see [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. use_in_shortcut (`bool`, *optional*, default to `True`): If `True`, add a 1x1 nn.conv2d layer for skip-connection. up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the `conv_shortcut` output. conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. If None, same as `out_channels`. """ def __init__( self, *, in_channels: int, out_channels: Optional[int] = None, conv_shortcut: bool = False, dropout: float = 0.0, temb_channels: int = 512, groups: int = 32, groups_out: Optional[int] = None, eps: float = 1e-6, non_linearity: str = "swish", time_embedding_norm: str = "ada_group", # ada_group, spatial output_scale_factor: float = 1.0, use_in_shortcut: Optional[bool] = None, up: bool = False, down: bool = False, conv_shortcut_bias: bool = True, conv_2d_out_channels: Optional[int] = None, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm if groups_out is None: groups_out = groups if self.time_embedding_norm == "ada_group": # ada_group self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps) elif self.time_embedding_norm == "spatial": self.norm1 = SpatialNorm(in_channels, temb_channels) else: raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}") self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.time_embedding_norm == "ada_group": # ada_group self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps) elif self.time_embedding_norm == "spatial": # spatial self.norm2 = SpatialNorm(out_channels, temb_channels) else: raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}") self.dropout = torch.nn.Dropout(dropout) conv_2d_out_channels = conv_2d_out_channels or out_channels self.conv2 = nn.Conv2d(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) self.nonlinearity = get_activation(non_linearity) self.upsample = self.downsample = None if self.up: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv2d( in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias, ) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) hidden_states = input_tensor hidden_states = self.norm1(hidden_states, temb) hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 if hidden_states.shape[0] >= 64: input_tensor = input_tensor.contiguous() hidden_states = hidden_states.contiguous() input_tensor = self.upsample(input_tensor) hidden_states = self.upsample(hidden_states) elif self.downsample is not None: input_tensor = self.downsample(input_tensor) hidden_states = self.downsample(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states, temb) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = (input_tensor + hidden_states) / self.output_scale_factor return output_tensor class ResnetBlock2D(nn.Module): r""" A Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. groups_out (`int`, *optional*, default to None): The number of groups to use for the second normalization layer. if set to None, same as `groups`. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. time_embedding_norm (`str`, *optional*, default to `"default"` ): Time scale shift config. By default, apply timestep embedding conditioning with a simple shift mechanism. Choose "scale_shift" for a stronger conditioning with scale and shift. kernel (`torch.Tensor`, optional, default to None): FIR filter, see [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. use_in_shortcut (`bool`, *optional*, default to `True`): If `True`, add a 1x1 nn.conv2d layer for skip-connection. up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the `conv_shortcut` output. conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. If None, same as `out_channels`. """ def __init__( self, *, in_channels: int, out_channels: Optional[int] = None, conv_shortcut: bool = False, dropout: float = 0.0, temb_channels: int = 512, groups: int = 32, groups_out: Optional[int] = None, pre_norm: bool = True, eps: float = 1e-6, non_linearity: str = "swish", skip_time_act: bool = False, time_embedding_norm: str = "default", # default, scale_shift, kernel: Optional[torch.Tensor] = None, output_scale_factor: float = 1.0, use_in_shortcut: Optional[bool] = None, up: bool = False, down: bool = False, conv_shortcut_bias: bool = True, conv_2d_out_channels: Optional[int] = None, ): super().__init__() if time_embedding_norm == "ada_group": raise ValueError( "This class cannot be used with `time_embedding_norm==ada_group`, please use `ResnetBlockCondNorm2D` instead", ) if time_embedding_norm == "spatial": raise ValueError( "This class cannot be used with `time_embedding_norm==spatial`, please use `ResnetBlockCondNorm2D` instead", ) self.pre_norm = True self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm self.skip_time_act = skip_time_act if groups_out is None: groups_out = groups self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels is not None: if self.time_embedding_norm == "default": self.time_emb_proj = nn.Linear(temb_channels, out_channels) elif self.time_embedding_norm == "scale_shift": self.time_emb_proj = nn.Linear(temb_channels, 2 * out_channels) else: raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ") else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(dropout) conv_2d_out_channels = conv_2d_out_channels or out_channels self.conv2 = nn.Conv2d(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) self.nonlinearity = get_activation(non_linearity) self.upsample = self.downsample = None if self.up: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel) elif kernel == "sde_vp": self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest") else: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel) elif kernel == "sde_vp": self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) else: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv2d( in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias, ) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 if hidden_states.shape[0] >= 64: input_tensor = input_tensor.contiguous() hidden_states = hidden_states.contiguous() input_tensor = self.upsample(input_tensor) hidden_states = self.upsample(hidden_states) elif self.downsample is not None: input_tensor = self.downsample(input_tensor) hidden_states = self.downsample(hidden_states) hidden_states = self.conv1(hidden_states) if self.time_emb_proj is not None: if not self.skip_time_act: temb = self.nonlinearity(temb) temb = self.time_emb_proj(temb)[:, :, None, None] if self.time_embedding_norm == "default": if temb is not None: hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) elif self.time_embedding_norm == "scale_shift": if temb is None: raise ValueError( f" `temb` should not be None when `time_embedding_norm` is {self.time_embedding_norm}" ) time_scale, time_shift = torch.chunk(temb, 2, dim=1) hidden_states = self.norm2(hidden_states) hidden_states = hidden_states * (1 + time_scale) + time_shift else: hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor.contiguous()) output_tensor = (input_tensor + hidden_states) / self.output_scale_factor return output_tensor # unet_rl.py def rearrange_dims(tensor: torch.Tensor) -> torch.Tensor: if len(tensor.shape) == 2: return tensor[:, :, None] if len(tensor.shape) == 3: return tensor[:, :, None, :] elif len(tensor.shape) == 4: return tensor[:, :, 0, :] else: raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") class Conv1dBlock(nn.Module): """ Conv1d --> GroupNorm --> Mish Parameters: inp_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. kernel_size (`int` or `tuple`): Size of the convolving kernel. n_groups (`int`, default `8`): Number of groups to separate the channels into. activation (`str`, defaults to `mish`): Name of the activation function. """ def __init__( self, inp_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], n_groups: int = 8, activation: str = "mish", ): super().__init__() self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) self.group_norm = nn.GroupNorm(n_groups, out_channels) self.mish = get_activation(activation) def forward(self, inputs: torch.Tensor) -> torch.Tensor: intermediate_repr = self.conv1d(inputs) intermediate_repr = rearrange_dims(intermediate_repr) intermediate_repr = self.group_norm(intermediate_repr) intermediate_repr = rearrange_dims(intermediate_repr) output = self.mish(intermediate_repr) return output # unet_rl.py class ResidualTemporalBlock1D(nn.Module): """ Residual 1D block with temporal convolutions. Parameters: inp_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. embed_dim (`int`): Embedding dimension. kernel_size (`int` or `tuple`): Size of the convolving kernel. activation (`str`, defaults `mish`): It is possible to choose the right activation function. """ def __init__( self, inp_channels: int, out_channels: int, embed_dim: int, kernel_size: Union[int, Tuple[int, int]] = 5, activation: str = "mish", ): super().__init__() self.conv_in = Conv1dBlock(inp_channels, out_channels, kernel_size) self.conv_out = Conv1dBlock(out_channels, out_channels, kernel_size) self.time_emb_act = get_activation(activation) self.time_emb = nn.Linear(embed_dim, out_channels) self.residual_conv = ( nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() ) def forward(self, inputs: torch.Tensor, t: torch.Tensor) -> torch.Tensor: """ Args: inputs : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x out_channels x horizon ] """ t = self.time_emb_act(t) t = self.time_emb(t) out = self.conv_in(inputs) + rearrange_dims(t) out = self.conv_out(out) return out + self.residual_conv(inputs) class TemporalConvLayer(nn.Module): """ Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from: https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016 Parameters: in_dim (`int`): Number of input channels. out_dim (`int`): Number of output channels. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. """ def __init__( self, in_dim: int, out_dim: Optional[int] = None, dropout: float = 0.0, norm_num_groups: int = 32, ): super().__init__() out_dim = out_dim or in_dim self.in_dim = in_dim self.out_dim = out_dim # conv layers self.conv1 = nn.Sequential( nn.GroupNorm(norm_num_groups, in_dim), nn.SiLU(), nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0)), ) self.conv2 = nn.Sequential( nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), ) self.conv3 = nn.Sequential( nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), ) self.conv4 = nn.Sequential( nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), ) # zero out the last layer params,so the conv block is identity nn.init.zeros_(self.conv4[-1].weight) nn.init.zeros_(self.conv4[-1].bias) def forward(self, hidden_states: torch.Tensor, num_frames: int = 1) -> torch.Tensor: hidden_states = ( hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4) ) identity = hidden_states hidden_states = self.conv1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.conv3(hidden_states) hidden_states = self.conv4(hidden_states) hidden_states = identity + hidden_states hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape( (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:] ) return hidden_states class TemporalResnetBlock(nn.Module): r""" A Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. """ def __init__( self, in_channels: int, out_channels: Optional[int] = None, temb_channels: int = 512, eps: float = 1e-6, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels kernel_size = (3, 1, 1) padding = [k // 2 for k in kernel_size] self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=eps, affine=True) self.conv1 = nn.Conv3d( in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding, ) if temb_channels is not None: self.time_emb_proj = nn.Linear(temb_channels, out_channels) else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(0.0) self.conv2 = nn.Conv3d( out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding, ) self.nonlinearity = get_activation("silu") self.use_in_shortcut = self.in_channels != out_channels self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv3d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, ) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if self.time_emb_proj is not None: temb = self.nonlinearity(temb) temb = self.time_emb_proj(temb)[:, :, :, None, None] temb = temb.permute(0, 2, 1, 3, 4) hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = input_tensor + hidden_states return output_tensor # VideoResBlock class SpatioTemporalResBlock(nn.Module): r""" A SpatioTemporal Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the spatial resenet. temporal_eps (`float`, *optional*, defaults to `eps`): The epsilon to use for the temporal resnet. merge_factor (`float`, *optional*, defaults to `0.5`): The merge factor to use for the temporal mixing. merge_strategy (`str`, *optional*, defaults to `learned_with_images`): The merge strategy to use for the temporal mixing. switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`): If `True`, switch the spatial and temporal mixing. """ def __init__( self, in_channels: int, out_channels: Optional[int] = None, temb_channels: int = 512, eps: float = 1e-6, temporal_eps: Optional[float] = None, merge_factor: float = 0.5, merge_strategy="learned_with_images", switch_spatial_to_temporal_mix: bool = False, ): super().__init__() self.spatial_res_block = ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=eps, ) self.temporal_res_block = TemporalResnetBlock( in_channels=out_channels if out_channels is not None else in_channels, out_channels=out_channels if out_channels is not None else in_channels, temb_channels=temb_channels, eps=temporal_eps if temporal_eps is not None else eps, ) self.time_mixer = AlphaBlender( alpha=merge_factor, merge_strategy=merge_strategy, switch_spatial_to_temporal_mix=switch_spatial_to_temporal_mix, ) def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, ): num_frames = image_only_indicator.shape[-1] hidden_states = self.spatial_res_block(hidden_states, temb) batch_frames, channels, height, width = hidden_states.shape batch_size = batch_frames // num_frames hidden_states_mix = ( hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) ) hidden_states = ( hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) ) if temb is not None: temb = temb.reshape(batch_size, num_frames, -1) hidden_states = self.temporal_res_block(hidden_states, temb) hidden_states = self.time_mixer( x_spatial=hidden_states_mix, x_temporal=hidden_states, image_only_indicator=image_only_indicator, ) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return hidden_states class AlphaBlender(nn.Module): r""" A module to blend spatial and temporal features. Parameters: alpha (`float`): The initial value of the blending factor. merge_strategy (`str`, *optional*, defaults to `learned_with_images`): The merge strategy to use for the temporal mixing. switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`): If `True`, switch the spatial and temporal mixing. """ strategies = ["learned", "fixed", "learned_with_images"] def __init__( self, alpha: float, merge_strategy: str = "learned_with_images", switch_spatial_to_temporal_mix: bool = False, ): super().__init__() self.merge_strategy = merge_strategy self.switch_spatial_to_temporal_mix = switch_spatial_to_temporal_mix # For TemporalVAE if merge_strategy not in self.strategies: raise ValueError(f"merge_strategy needs to be in {self.strategies}") if self.merge_strategy == "fixed": self.register_buffer("mix_factor", torch.Tensor([alpha])) elif self.merge_strategy == "learned" or self.merge_strategy == "learned_with_images": self.register_parameter("mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))) else: raise ValueError(f"Unknown merge strategy {self.merge_strategy}") def get_alpha(self, image_only_indicator: torch.Tensor, ndims: int) -> torch.Tensor: if self.merge_strategy == "fixed": alpha = self.mix_factor elif self.merge_strategy == "learned": alpha = torch.sigmoid(self.mix_factor) elif self.merge_strategy == "learned_with_images": if image_only_indicator is None: raise ValueError("Please provide image_only_indicator to use learned_with_images merge strategy") alpha = torch.where( image_only_indicator.bool(), torch.ones(1, 1, device=image_only_indicator.device), torch.sigmoid(self.mix_factor)[..., None], ) # (batch, channel, frames, height, width) if ndims == 5: alpha = alpha[:, None, :, None, None] # (batch*frames, height*width, channels) elif ndims == 3: alpha = alpha.reshape(-1)[:, None, None] else: raise ValueError(f"Unexpected ndims {ndims}. Dimensions should be 3 or 5") else: raise NotImplementedError return alpha def forward( self, x_spatial: torch.Tensor, x_temporal: torch.Tensor, image_only_indicator: Optional[torch.Tensor] = None, ) -> torch.Tensor: alpha = self.get_alpha(image_only_indicator, x_spatial.ndim) alpha = alpha.to(x_spatial.dtype) if self.switch_spatial_to_temporal_mix: alpha = 1.0 - alpha x = alpha * x_spatial + (1.0 - alpha) * x_temporal return x
diffusers/src/diffusers/models/resnet.py/0
{ "file_path": "diffusers/src/diffusers/models/resnet.py", "repo_id": "diffusers", "token_count": 14445 }
148
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import LegacyConfigMixin, register_to_config from ...utils import deprecate, logging from ..attention import BasicTransformerBlock from ..embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import LegacyModelMixin from ..normalization import AdaLayerNormSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name class Transformer2DModelOutput(Transformer2DModelOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.modeling_outputs import Transformer2DModelOutput`, instead." deprecate("Transformer2DModelOutput", "1.0.0", deprecation_message) super().__init__(*args, **kwargs) class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin): """ A 2D Transformer model for image-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. num_vector_embeds (`int`, *optional*): The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**). Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. num_embeds_ada_norm ( `int`, *optional*): The number of diffusion steps used during training. Pass if at least one of the norm_layers is `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the `TransformerBlocks` attention should contain a bias parameter. """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock"] _skip_layerwise_casting_patterns = ["latent_image_embedding", "norm"] @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' norm_elementwise_affine: bool = True, norm_eps: float = 1e-5, attention_type: str = "default", caption_channels: int = None, interpolation_scale: float = None, use_additional_conditions: Optional[bool] = None, ): super().__init__() # Validate inputs. if patch_size is not None: if norm_type not in ["ada_norm", "ada_norm_zero", "ada_norm_single"]: raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type in ["ada_norm", "ada_norm_zero"] and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." ) # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." ) if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`. Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm" # Set some common variables used across the board. self.use_linear_projection = use_linear_projection self.interpolation_scale = interpolation_scale self.caption_channels = caption_channels self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.gradient_checkpointing = False if use_additional_conditions is None: if norm_type == "ada_norm_single" and sample_size == 128: use_additional_conditions = True else: use_additional_conditions = False self.use_additional_conditions = use_additional_conditions # 2. Initialize the right blocks. # These functions follow a common structure: # a. Initialize the input blocks. b. Initialize the transformer blocks. # c. Initialize the output blocks and other projection blocks when necessary. if self.is_input_continuous: self._init_continuous_input(norm_type=norm_type) elif self.is_input_vectorized: self._init_vectorized_inputs(norm_type=norm_type) elif self.is_input_patches: self._init_patched_inputs(norm_type=norm_type) def _init_continuous_input(self, norm_type): self.norm = torch.nn.GroupNorm( num_groups=self.config.norm_num_groups, num_channels=self.in_channels, eps=1e-6, affine=True ) if self.use_linear_projection: self.proj_in = torch.nn.Linear(self.in_channels, self.inner_dim) else: self.proj_in = torch.nn.Conv2d(self.in_channels, self.inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] ) if self.use_linear_projection: self.proj_out = torch.nn.Linear(self.inner_dim, self.out_channels) else: self.proj_out = torch.nn.Conv2d(self.inner_dim, self.out_channels, kernel_size=1, stride=1, padding=0) def _init_vectorized_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert self.config.num_vector_embeds is not None, ( "Transformer2DModel over discrete input must provide num_embed" ) self.height = self.config.sample_size self.width = self.config.sample_size self.num_latent_pixels = self.height * self.width self.latent_image_embedding = ImagePositionalEmbeddings( num_embed=self.config.num_vector_embeds, embed_dim=self.inner_dim, height=self.height, width=self.width ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] ) self.norm_out = nn.LayerNorm(self.inner_dim) self.out = nn.Linear(self.inner_dim, self.config.num_vector_embeds - 1) def _init_patched_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over patched input must provide sample_size" self.height = self.config.sample_size self.width = self.config.sample_size self.patch_size = self.config.patch_size interpolation_scale = ( self.config.interpolation_scale if self.config.interpolation_scale is not None else max(self.config.sample_size // 64, 1) ) self.pos_embed = PatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.in_channels, embed_dim=self.inner_dim, interpolation_scale=interpolation_scale, ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] ) if self.config.norm_type != "ada_norm_single": self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) self.proj_out_2 = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels ) elif self.config.norm_type == "ada_norm_single": self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5) self.proj_out = nn.Linear( self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels ) # PixArt-Alpha blocks. self.adaln_single = None if self.config.norm_type == "ada_norm_single": # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use # additional conditions until we find better name self.adaln_single = AdaLayerNormSingle( self.inner_dim, use_additional_conditions=self.use_additional_conditions ) self.caption_projection = None if self.caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection( in_features=self.caption_channels, hidden_size=self.inner_dim ) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, class_labels: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ The [`Transformer2DModel`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous): Input `hidden_states`. encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. cross_attention_kwargs ( `Dict[str, Any]`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). attention_mask ( `torch.Tensor`, *optional*): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. encoder_attention_mask ( `torch.Tensor`, *optional*): Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: * Mask `(batch, sequence_length)` True = keep, False = discard. * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format above. This bias will be added to the cross-attention scores. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformers.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input if self.is_input_continuous: batch_size, _, height, width = hidden_states.shape residual = hidden_states hidden_states, inner_dim = self._operate_on_continuous_inputs(hidden_states) elif self.is_input_vectorized: hidden_states = self.latent_image_embedding(hidden_states) elif self.is_input_patches: height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size hidden_states, encoder_hidden_states, timestep, embedded_timestep = self._operate_on_patched_inputs( hidden_states, encoder_hidden_states, timestep, added_cond_kwargs ) # 2. Blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, class_labels, ) else: hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output if self.is_input_continuous: output = self._get_output_for_continuous_inputs( hidden_states=hidden_states, residual=residual, batch_size=batch_size, height=height, width=width, inner_dim=inner_dim, ) elif self.is_input_vectorized: output = self._get_output_for_vectorized_inputs(hidden_states) elif self.is_input_patches: output = self._get_output_for_patched_inputs( hidden_states=hidden_states, timestep=timestep, class_labels=class_labels, embedded_timestep=embedded_timestep, height=height, width=width, ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) def _operate_on_continuous_inputs(self, hidden_states): batch, _, height, width = hidden_states.shape hidden_states = self.norm(hidden_states) if not self.use_linear_projection: hidden_states = self.proj_in(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) else: inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) return hidden_states, inner_dim def _operate_on_patched_inputs(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs): batch_size = hidden_states.shape[0] hidden_states = self.pos_embed(hidden_states) embedded_timestep = None if self.adaln_single is not None: if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError( "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`." ) timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) return hidden_states, encoder_hidden_states, timestep, embedded_timestep def _get_output_for_continuous_inputs(self, hidden_states, residual, batch_size, height, width, inner_dim): if not self.use_linear_projection: hidden_states = ( hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() ) hidden_states = self.proj_out(hidden_states) else: hidden_states = self.proj_out(hidden_states) hidden_states = ( hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() ) output = hidden_states + residual return output def _get_output_for_vectorized_inputs(self, hidden_states): hidden_states = self.norm_out(hidden_states) logits = self.out(hidden_states) # (batch, self.num_vector_embeds - 1, self.num_latent_pixels) logits = logits.permute(0, 2, 1) # log(p(x_0)) output = F.log_softmax(logits.double(), dim=1).float() return output def _get_output_for_patched_inputs( self, hidden_states, timestep, class_labels, embedded_timestep, height=None, width=None ): if self.config.norm_type != "ada_norm_single": conditioning = self.transformer_blocks[0].norm1.emb( timestep, class_labels, hidden_dtype=hidden_states.dtype ) shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) elif self.config.norm_type == "ada_norm_single": shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # unpatchify if self.adaln_single is None: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) return output
diffusers/src/diffusers/models/transformers/transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_2d.py", "repo_id": "diffusers", "token_count": 12697 }
149
# Copyright 2025 Qwen-Image Team, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import math from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph from ..attention import FeedForward from ..attention_dispatch import dispatch_attention_fn from ..attention_processor import Attention from ..cache_utils import CacheMixin from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name def get_timestep_embedding( timesteps: torch.Tensor, embedding_dim: int, flip_sin_to_cos: bool = False, downscale_freq_shift: float = 1, scale: float = 1, max_period: int = 10000, ) -> torch.Tensor: """ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. Args timesteps (torch.Tensor): a 1-D Tensor of N indices, one per batch element. These may be fractional. embedding_dim (int): the dimension of the output. flip_sin_to_cos (bool): Whether the embedding order should be `cos, sin` (if True) or `sin, cos` (if False) downscale_freq_shift (float): Controls the delta between frequencies between dimensions scale (float): Scaling factor applied to the embeddings. max_period (int): Controls the maximum frequency of the embeddings Returns torch.Tensor: an [N x dim] Tensor of positional embeddings. """ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" half_dim = embedding_dim // 2 exponent = -math.log(max_period) * torch.arange( start=0, end=half_dim, dtype=torch.float32, device=timesteps.device ) exponent = exponent / (half_dim - downscale_freq_shift) emb = torch.exp(exponent).to(timesteps.dtype) emb = timesteps[:, None].float() * emb[None, :] # scale embeddings emb = scale * emb # concat sine and cosine embeddings emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # flip sine and cosine embeddings if flip_sin_to_cos: emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) # zero pad if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb def apply_rotary_emb_qwen( x: torch.Tensor, freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], use_real: bool = True, use_real_unbind_dim: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are returned as real tensors. Args: x (`torch.Tensor`): Query or key tensor to apply rotary embeddings. [B, S, H, D] xk (torch.Tensor): Key tensor to apply freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],) Returns: Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. """ if use_real: cos, sin = freqs_cis # [S, D] cos = cos[None, None] sin = sin[None, None] cos, sin = cos.to(x.device), sin.to(x.device) if use_real_unbind_dim == -1: # Used for flux, cogvideox, hunyuan-dit x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, S, H, D//2] x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) elif use_real_unbind_dim == -2: # Used for Stable Audio, OmniGen, CogView4 and Cosmos x_real, x_imag = x.reshape(*x.shape[:-1], 2, -1).unbind(-2) # [B, S, H, D//2] x_rotated = torch.cat([-x_imag, x_real], dim=-1) else: raise ValueError(f"`use_real_unbind_dim={use_real_unbind_dim}` but should be -1 or -2.") out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) return out else: x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) freqs_cis = freqs_cis.unsqueeze(1) x_out = torch.view_as_real(x_rotated * freqs_cis).flatten(3) return x_out.type_as(x) class QwenTimestepProjEmbeddings(nn.Module): def __init__(self, embedding_dim): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0, scale=1000) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) def forward(self, timestep, hidden_states): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_states.dtype)) # (N, D) conditioning = timesteps_emb return conditioning class QwenEmbedRope(nn.Module): def __init__(self, theta: int, axes_dim: List[int], scale_rope=False): super().__init__() self.theta = theta self.axes_dim = axes_dim pos_index = torch.arange(4096) neg_index = torch.arange(4096).flip(0) * -1 - 1 self.pos_freqs = torch.cat( [ self.rope_params(pos_index, self.axes_dim[0], self.theta), self.rope_params(pos_index, self.axes_dim[1], self.theta), self.rope_params(pos_index, self.axes_dim[2], self.theta), ], dim=1, ) self.neg_freqs = torch.cat( [ self.rope_params(neg_index, self.axes_dim[0], self.theta), self.rope_params(neg_index, self.axes_dim[1], self.theta), self.rope_params(neg_index, self.axes_dim[2], self.theta), ], dim=1, ) self.rope_cache = {} # DO NOT USING REGISTER BUFFER HERE, IT WILL CAUSE COMPLEX NUMBERS LOSE ITS IMAGINARY PART self.scale_rope = scale_rope def rope_params(self, index, dim, theta=10000): """ Args: index: [0, 1, 2, 3] 1D Tensor representing the position index of the token """ assert dim % 2 == 0 freqs = torch.outer(index, 1.0 / torch.pow(theta, torch.arange(0, dim, 2).to(torch.float32).div(dim))) freqs = torch.polar(torch.ones_like(freqs), freqs) return freqs def forward(self, video_fhw, txt_seq_lens, device): """ Args: video_fhw: [frame, height, width] a list of 3 integers representing the shape of the video Args: txt_length: [bs] a list of 1 integers representing the length of the text """ if self.pos_freqs.device != device: self.pos_freqs = self.pos_freqs.to(device) self.neg_freqs = self.neg_freqs.to(device) if isinstance(video_fhw, list): video_fhw = video_fhw[0] if not isinstance(video_fhw, list): video_fhw = [video_fhw] vid_freqs = [] max_vid_index = 0 for idx, fhw in enumerate(video_fhw): frame, height, width = fhw rope_key = f"{idx}_{height}_{width}" if not torch.compiler.is_compiling(): if rope_key not in self.rope_cache: self.rope_cache[rope_key] = self._compute_video_freqs(frame, height, width, idx) video_freq = self.rope_cache[rope_key] else: video_freq = self._compute_video_freqs(frame, height, width, idx) video_freq = video_freq.to(device) vid_freqs.append(video_freq) if self.scale_rope: max_vid_index = max(height // 2, width // 2, max_vid_index) else: max_vid_index = max(height, width, max_vid_index) max_len = max(txt_seq_lens) txt_freqs = self.pos_freqs[max_vid_index : max_vid_index + max_len, ...] vid_freqs = torch.cat(vid_freqs, dim=0) return vid_freqs, txt_freqs @functools.lru_cache(maxsize=None) def _compute_video_freqs(self, frame, height, width, idx=0): seq_lens = frame * height * width freqs_pos = self.pos_freqs.split([x // 2 for x in self.axes_dim], dim=1) freqs_neg = self.neg_freqs.split([x // 2 for x in self.axes_dim], dim=1) freqs_frame = freqs_pos[0][idx : idx + frame].view(frame, 1, 1, -1).expand(frame, height, width, -1) if self.scale_rope: freqs_height = torch.cat([freqs_neg[1][-(height - height // 2) :], freqs_pos[1][: height // 2]], dim=0) freqs_height = freqs_height.view(1, height, 1, -1).expand(frame, height, width, -1) freqs_width = torch.cat([freqs_neg[2][-(width - width // 2) :], freqs_pos[2][: width // 2]], dim=0) freqs_width = freqs_width.view(1, 1, width, -1).expand(frame, height, width, -1) else: freqs_height = freqs_pos[1][:height].view(1, height, 1, -1).expand(frame, height, width, -1) freqs_width = freqs_pos[2][:width].view(1, 1, width, -1).expand(frame, height, width, -1) freqs = torch.cat([freqs_frame, freqs_height, freqs_width], dim=-1).reshape(seq_lens, -1) return freqs.clone().contiguous() class QwenDoubleStreamAttnProcessor2_0: """ Attention processor for Qwen double-stream architecture, matching DoubleStreamLayerMegatron logic. This processor implements joint attention computation where text and image streams are processed together. """ _attention_backend = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "QwenDoubleStreamAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, # Image stream encoder_hidden_states: torch.FloatTensor = None, # Text stream encoder_hidden_states_mask: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: if encoder_hidden_states is None: raise ValueError("QwenDoubleStreamAttnProcessor2_0 requires encoder_hidden_states (text stream)") seq_txt = encoder_hidden_states.shape[1] # Compute QKV for image stream (sample projections) img_query = attn.to_q(hidden_states) img_key = attn.to_k(hidden_states) img_value = attn.to_v(hidden_states) # Compute QKV for text stream (context projections) txt_query = attn.add_q_proj(encoder_hidden_states) txt_key = attn.add_k_proj(encoder_hidden_states) txt_value = attn.add_v_proj(encoder_hidden_states) # Reshape for multi-head attention img_query = img_query.unflatten(-1, (attn.heads, -1)) img_key = img_key.unflatten(-1, (attn.heads, -1)) img_value = img_value.unflatten(-1, (attn.heads, -1)) txt_query = txt_query.unflatten(-1, (attn.heads, -1)) txt_key = txt_key.unflatten(-1, (attn.heads, -1)) txt_value = txt_value.unflatten(-1, (attn.heads, -1)) # Apply QK normalization if attn.norm_q is not None: img_query = attn.norm_q(img_query) if attn.norm_k is not None: img_key = attn.norm_k(img_key) if attn.norm_added_q is not None: txt_query = attn.norm_added_q(txt_query) if attn.norm_added_k is not None: txt_key = attn.norm_added_k(txt_key) # Apply RoPE if image_rotary_emb is not None: img_freqs, txt_freqs = image_rotary_emb img_query = apply_rotary_emb_qwen(img_query, img_freqs, use_real=False) img_key = apply_rotary_emb_qwen(img_key, img_freqs, use_real=False) txt_query = apply_rotary_emb_qwen(txt_query, txt_freqs, use_real=False) txt_key = apply_rotary_emb_qwen(txt_key, txt_freqs, use_real=False) # Concatenate for joint attention # Order: [text, image] joint_query = torch.cat([txt_query, img_query], dim=1) joint_key = torch.cat([txt_key, img_key], dim=1) joint_value = torch.cat([txt_value, img_value], dim=1) # Compute joint attention joint_hidden_states = dispatch_attention_fn( joint_query, joint_key, joint_value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, ) # Reshape back joint_hidden_states = joint_hidden_states.flatten(2, 3) joint_hidden_states = joint_hidden_states.to(joint_query.dtype) # Split attention outputs back txt_attn_output = joint_hidden_states[:, :seq_txt, :] # Text part img_attn_output = joint_hidden_states[:, seq_txt:, :] # Image part # Apply output projections img_attn_output = attn.to_out[0](img_attn_output) if len(attn.to_out) > 1: img_attn_output = attn.to_out[1](img_attn_output) # dropout txt_attn_output = attn.to_add_out(txt_attn_output) return img_attn_output, txt_attn_output @maybe_allow_in_graph class QwenImageTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6 ): super().__init__() self.dim = dim self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim # Image processing modules self.img_mod = nn.Sequential( nn.SiLU(), nn.Linear(dim, 6 * dim, bias=True), # For scale, shift, gate for norm1 and norm2 ) self.img_norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) self.attn = Attention( query_dim=dim, cross_attention_dim=None, # Enable cross attention for joint computation added_kv_proj_dim=dim, # Enable added KV projections for text stream dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=QwenDoubleStreamAttnProcessor2_0(), qk_norm=qk_norm, eps=eps, ) self.img_norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) self.img_mlp = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") # Text processing modules self.txt_mod = nn.Sequential( nn.SiLU(), nn.Linear(dim, 6 * dim, bias=True), # For scale, shift, gate for norm1 and norm2 ) self.txt_norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) # Text doesn't need separate attention - it's handled by img_attn joint computation self.txt_norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) self.txt_mlp = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") def _modulate(self, x, mod_params): """Apply modulation to input tensor""" shift, scale, gate = mod_params.chunk(3, dim=-1) return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1), gate.unsqueeze(1) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_hidden_states_mask: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # Get modulation parameters for both streams img_mod_params = self.img_mod(temb) # [B, 6*dim] txt_mod_params = self.txt_mod(temb) # [B, 6*dim] # Split modulation parameters for norm1 and norm2 img_mod1, img_mod2 = img_mod_params.chunk(2, dim=-1) # Each [B, 3*dim] txt_mod1, txt_mod2 = txt_mod_params.chunk(2, dim=-1) # Each [B, 3*dim] # Process image stream - norm1 + modulation img_normed = self.img_norm1(hidden_states) img_modulated, img_gate1 = self._modulate(img_normed, img_mod1) # Process text stream - norm1 + modulation txt_normed = self.txt_norm1(encoder_hidden_states) txt_modulated, txt_gate1 = self._modulate(txt_normed, txt_mod1) # Use QwenAttnProcessor2_0 for joint attention computation # This directly implements the DoubleStreamLayerMegatron logic: # 1. Computes QKV for both streams # 2. Applies QK normalization and RoPE # 3. Concatenates and runs joint attention # 4. Splits results back to separate streams joint_attention_kwargs = joint_attention_kwargs or {} attn_output = self.attn( hidden_states=img_modulated, # Image stream (will be processed as "sample") encoder_hidden_states=txt_modulated, # Text stream (will be processed as "context") encoder_hidden_states_mask=encoder_hidden_states_mask, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) # QwenAttnProcessor2_0 returns (img_output, txt_output) when encoder_hidden_states is provided img_attn_output, txt_attn_output = attn_output # Apply attention gates and add residual (like in Megatron) hidden_states = hidden_states + img_gate1 * img_attn_output encoder_hidden_states = encoder_hidden_states + txt_gate1 * txt_attn_output # Process image stream - norm2 + MLP img_normed2 = self.img_norm2(hidden_states) img_modulated2, img_gate2 = self._modulate(img_normed2, img_mod2) img_mlp_output = self.img_mlp(img_modulated2) hidden_states = hidden_states + img_gate2 * img_mlp_output # Process text stream - norm2 + MLP txt_normed2 = self.txt_norm2(encoder_hidden_states) txt_modulated2, txt_gate2 = self._modulate(txt_normed2, txt_mod2) txt_mlp_output = self.txt_mlp(txt_modulated2) encoder_hidden_states = encoder_hidden_states + txt_gate2 * txt_mlp_output # Clip to prevent overflow for fp16 if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states class QwenImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): """ The Transformer model introduced in Qwen. Args: patch_size (`int`, defaults to `2`): Patch size to turn the input data into small patches. in_channels (`int`, defaults to `64`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `None`): The number of channels in the output. If not specified, it defaults to `in_channels`. num_layers (`int`, defaults to `60`): The number of layers of dual stream DiT blocks to use. attention_head_dim (`int`, defaults to `128`): The number of dimensions to use for each attention head. num_attention_heads (`int`, defaults to `24`): The number of attention heads to use. joint_attention_dim (`int`, defaults to `3584`): The number of dimensions to use for the joint attention (embedding/channel dimension of `encoder_hidden_states`). guidance_embeds (`bool`, defaults to `False`): Whether to use guidance embeddings for guidance-distilled variant of the model. axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`): The dimensions to use for the rotary positional embeddings. """ _supports_gradient_checkpointing = True _no_split_modules = ["QwenImageTransformerBlock"] _skip_layerwise_casting_patterns = ["pos_embed", "norm"] _repeated_blocks = ["QwenImageTransformerBlock"] @register_to_config def __init__( self, patch_size: int = 2, in_channels: int = 64, out_channels: Optional[int] = 16, num_layers: int = 60, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 3584, guidance_embeds: bool = False, # TODO: this should probably be removed axes_dims_rope: Tuple[int, int, int] = (16, 56, 56), ): super().__init__() self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = QwenEmbedRope(theta=10000, axes_dim=list(axes_dims_rope), scale_rope=True) self.time_text_embed = QwenTimestepProjEmbeddings(embedding_dim=self.inner_dim) self.txt_norm = RMSNorm(joint_attention_dim, eps=1e-6) self.img_in = nn.Linear(in_channels, self.inner_dim) self.txt_in = nn.Linear(joint_attention_dim, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ QwenImageTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, encoder_hidden_states_mask: torch.Tensor = None, timestep: torch.LongTensor = None, img_shapes: Optional[List[Tuple[int, int, int]]] = None, txt_seq_lens: Optional[List[int]] = None, guidance: torch.Tensor = None, # TODO: this should probably be removed attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_block_samples=None, return_dict: bool = True, ) -> Union[torch.Tensor, Transformer2DModelOutput]: """ The [`QwenTransformer2DModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): Input `hidden_states`. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. encoder_hidden_states_mask (`torch.Tensor` of shape `(batch_size, text_sequence_length)`): Mask of the input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) hidden_states = self.img_in(hidden_states) timestep = timestep.to(hidden_states.dtype) encoder_hidden_states = self.txt_norm(encoder_hidden_states) encoder_hidden_states = self.txt_in(encoder_hidden_states) if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 temb = ( self.time_text_embed(timestep, hidden_states) if guidance is None else self.time_text_embed(timestep, guidance, hidden_states) ) image_rotary_emb = self.pos_embed(img_shapes, txt_seq_lens, device=hidden_states.device) for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, encoder_hidden_states_mask, temb, image_rotary_emb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, encoder_hidden_states_mask=encoder_hidden_states_mask, temb=temb, image_rotary_emb=image_rotary_emb, joint_attention_kwargs=attention_kwargs, ) # controlnet residual if controlnet_block_samples is not None: interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) interval_control = int(np.ceil(interval_control)) hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] # Use only the image part (hidden_states) from the dual-stream blocks hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_qwenimage.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_qwenimage.py", "repo_id": "diffusers", "token_count": 12660 }
150