text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# candle-vit Vision Transformer (ViT) model implementation following the lines of [vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) This uses a classification head trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example vit --release -- --image tiger.jpg loaded image Tensor[dims 3, 224, 224; f32] model built tiger, Panthera tigris : 100.00% tiger cat : 0.00% jaguar, panther, Panthera onca, Felis onca: 0.00% leopard, Panthera pardus: 0.00% lion, king of beasts, Panthera leo: 0.00% ```
candle/candle-examples/examples/vit/README.md/0
{ "file_path": "candle/candle-examples/examples/vit/README.md", "repo_id": "candle", "token_count": 219 }
32
def remove_prefix(text, prefix): return text[text.startswith(prefix) and len(prefix):] nps = {} for k, v in model.state_dict().items(): k = remove_prefix(k, 'module_list.') nps[k] = v.detach().numpy() np.savez('yolo-v3.ot', **nps)
candle/candle-examples/examples/yolo-v3/extract-weights.py/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/extract-weights.py", "repo_id": "candle", "token_count": 98 }
33
use candle::Result; /// This is a wrapper around a tokenizer to ensure that tokens can be returned to the user in a /// streaming way rather than having to wait for the full decoding. pub struct TokenOutputStream { tokenizer: tokenizers::Tokenizer, tokens: Vec<u32>, prev_index: usize, current_index: usize, } impl TokenOutputStream { pub fn new(tokenizer: tokenizers::Tokenizer) -> Self { Self { tokenizer, tokens: Vec::new(), prev_index: 0, current_index: 0, } } pub fn into_inner(self) -> tokenizers::Tokenizer { self.tokenizer } fn decode(&self, tokens: &[u32]) -> Result<String> { match self.tokenizer.decode(tokens, true) { Ok(str) => Ok(str), Err(err) => candle::bail!("cannot decode: {err}"), } } // https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/models/model.py#L68 pub fn next_token(&mut self, token: u32) -> Result<Option<String>> { let prev_text = if self.tokens.is_empty() { String::new() } else { let tokens = &self.tokens[self.prev_index..self.current_index]; self.decode(tokens)? }; self.tokens.push(token); let text = self.decode(&self.tokens[self.prev_index..])?; if text.len() > prev_text.len() && text.chars().last().unwrap().is_alphanumeric() { let text = text.split_at(prev_text.len()); self.prev_index = self.current_index; self.current_index = self.tokens.len(); Ok(Some(text.1.to_string())) } else { Ok(None) } } pub fn decode_rest(&self) -> Result<Option<String>> { let prev_text = if self.tokens.is_empty() { String::new() } else { let tokens = &self.tokens[self.prev_index..self.current_index]; self.decode(tokens)? }; let text = self.decode(&self.tokens[self.prev_index..])?; if text.len() > prev_text.len() { let text = text.split_at(prev_text.len()); Ok(Some(text.1.to_string())) } else { Ok(None) } } pub fn decode_all(&self) -> Result<String> { self.decode(&self.tokens) } pub fn get_token(&self, token_s: &str) -> Option<u32> { self.tokenizer.get_vocab(true).get(token_s).copied() } pub fn tokenizer(&self) -> &tokenizers::Tokenizer { &self.tokenizer } pub fn clear(&mut self) { self.tokens.clear(); self.prev_index = 0; self.current_index = 0; } }
candle/candle-examples/src/token_output_stream.rs/0
{ "file_path": "candle/candle-examples/src/token_output_stream.rs", "repo_id": "candle", "token_count": 1295 }
34
#ifndef _GPU_OPS_KERNELS_H_ #define _GPU_OPS_KERNELS_H_ #include <cuda_runtime_api.h> #include <cstddef> #include <cstdint> #include<stdlib.h> #include<stdint.h> namespace gpu_ops { struct MHAParams { uint32_t q_batch_stride; uint32_t k_batch_stride; uint32_t v_batch_stride; uint32_t o_batch_stride; uint32_t q_row_stride; uint32_t k_row_stride; uint32_t v_row_stride; uint32_t o_row_stride; uint32_t q_head_stride; uint32_t k_head_stride; uint32_t v_head_stride; uint32_t o_head_stride; uint32_t b; uint32_t h; uint32_t h_k; uint32_t d; uint32_t d_rounded; float softmax_scale; float softcap; uint32_t seqlen_q; uint32_t seqlen_k; uint32_t seqlen_q_rounded; uint32_t seqlen_k_rounded; int window_size_left; int window_size_right; int is_causal; int is_bf16; }; void run_mha_fwd_j(cudaStream_t stream, void **buffers, const char *opaque, std::size_t opaque_len); void run_mha_bwd_j(cudaStream_t stream, void **buffers, const char *opaque, std::size_t opaque_len); } #endif
candle/candle-flash-attn/kernels/kernels.h/0
{ "file_path": "candle/candle-flash-attn/kernels/kernels.h", "repo_id": "candle", "token_count": 557 }
35
#include "cuda_utils.cuh" #include<stdint.h> template <typename S, typename T> __device__ void cast_( const size_t numel, const size_t num_dims, const size_t *info, const S *inp, T *out ) { const size_t *dims = info; const size_t *strides = info + num_dims; if (info == nullptr || is_contiguous(num_dims, dims, strides)) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { out[i] = inp[i]; } } else { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { unsigned strided_i = get_strided_index(i, num_dims, dims, strides); out[i] = inp[strided_i]; } } } template <typename S, typename T, typename I> __device__ void cast_through( const size_t numel, const size_t num_dims, const size_t *info, const S *inp, T *out ) { const size_t *dims = info; const size_t *strides = info + num_dims; if (info == nullptr || is_contiguous(num_dims, dims, strides)) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { out[i] = static_cast<T>(static_cast<I>(inp[i])); } } else { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { unsigned strided_i = get_strided_index(i, num_dims, dims, strides); out[i] = static_cast<T>(static_cast<I>(inp[strided_i])); } } } #define CAST_OP(SRC_TYPENAME, DST_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const SRC_TYPENAME *inp, \ DST_TYPENAME *out \ ) { \ cast_<SRC_TYPENAME, DST_TYPENAME>(numel, num_dims, info, inp, out); \ } \ #define CAST_THROUGH_OP(SRC_TYPENAME, DST_TYPENAME, INT_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const SRC_TYPENAME *inp, \ DST_TYPENAME *out \ ) { \ cast_through<SRC_TYPENAME, DST_TYPENAME, INT_TYPENAME>(numel, num_dims, info, inp, out); \ } \ #if __CUDA_ARCH__ >= 800 CAST_OP(__nv_bfloat16, __nv_bfloat16, cast_bf16_bf16) CAST_OP(__nv_bfloat16, uint32_t, cast_bf16_u32) CAST_OP(__nv_bfloat16, float, cast_bf16_f32) CAST_OP(__nv_bfloat16, double, cast_bf16_f64) CAST_OP(uint8_t, __nv_bfloat16, cast_u8_bf16) CAST_OP(uint32_t, __nv_bfloat16, cast_u32_bf16) CAST_OP(float, __nv_bfloat16, cast_f32_bf16) CAST_OP(double, __nv_bfloat16, cast_f64_bf16) CAST_THROUGH_OP(__nv_bfloat16, uint8_t, float, cast_bf16_u8) CAST_THROUGH_OP(__nv_bfloat16, __half, float, cast_bf16_f16) CAST_THROUGH_OP(__half, __nv_bfloat16, float, cast_f16_bf16) #else #include <cuda.h> #if CUDA_VERSION >= 11000 CAST_OP(__nv_bfloat16, float, cast_bf16_f32) CAST_OP(float, __nv_bfloat16, cast_f32_bf16) CAST_THROUGH_OP(__nv_bfloat16, uint8_t, float, cast_bf16_u8) CAST_THROUGH_OP(__nv_bfloat16, __half, float, cast_bf16_f16) CAST_THROUGH_OP(__nv_bfloat16, double, float, cast_bf16_f64) CAST_THROUGH_OP(__half, __nv_bfloat16, float, cast_f16_bf16) CAST_THROUGH_OP(double, __nv_bfloat16, float, cast_f64_bf16) CAST_THROUGH_OP(uint8_t, __nv_bfloat16, float, cast_u8_bf16) #endif #endif #if __CUDA_ARCH__ >= 530 CAST_OP(__half, __half, cast_f16_f16) CAST_THROUGH_OP(__half, uint8_t, float, cast_f16_u8) CAST_OP(__half, uint32_t, cast_f16_u32) CAST_OP(__half, float, cast_f16_f32) CAST_OP(__half, double, cast_f16_f64) CAST_OP(uint8_t, __half, cast_u8_f16 ) CAST_OP(uint32_t, __half, cast_u32_f16) CAST_OP(float, __half, cast_f32_f16) CAST_OP(double, __half, cast_f64_f16) #endif CAST_OP(uint32_t, uint32_t, cast_u32_u32) CAST_OP(uint32_t, uint8_t, cast_u32_u8 ) CAST_OP(uint32_t, int64_t, cast_u32_i64 ) CAST_OP(uint32_t, float, cast_u32_f32) CAST_OP(uint32_t, double, cast_u32_f64) CAST_OP(uint8_t, uint32_t, cast_u8_u32) CAST_OP(uint8_t, uint8_t, cast_u8_u8 ) CAST_OP(uint8_t, int64_t, cast_u8_i64 ) CAST_OP(uint8_t, float, cast_u8_f32) CAST_OP(uint8_t, double, cast_u8_f64) CAST_OP(int64_t, uint32_t, cast_i64_u32) CAST_OP(int64_t, uint8_t, cast_i64_u8 ) CAST_OP(int64_t, int64_t, cast_i64_i64 ) CAST_OP(int64_t, float, cast_i64_f32) CAST_OP(int64_t, double, cast_i64_f64) CAST_OP(float, uint8_t, cast_f32_u8 ) CAST_OP(float, uint32_t, cast_f32_u32) CAST_OP(float, int64_t, cast_f32_i64 ) CAST_OP(float, float, cast_f32_f32) CAST_OP(float, double, cast_f32_f64) CAST_OP(double, uint8_t, cast_f64_u8 ) CAST_OP(double, uint32_t, cast_f64_u32) CAST_OP(double, int64_t, cast_f64_i64 ) CAST_OP(double, float, cast_f64_f32) CAST_OP(double, double, cast_f64_f64)
candle/candle-kernels/src/cast.cu/0
{ "file_path": "candle/candle-kernels/src/cast.cu", "repo_id": "candle", "token_count": 2430 }
36
#include <metal_stdlib> METAL_FUNC uint get_strided_index( uint idx, constant size_t &num_dims, constant size_t *dims, constant size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } using namespace metal; #define CAST(FN_NAME, FN_NAME_STRIDED, LEFT_TYPENAME, RIGHT_TYPENAME) \ kernel void FN_NAME( \ constant size_t &dim, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(input[tid]); \ } \ kernel void FN_NAME_STRIDED( \ constant size_t &dim, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(input[get_strided_index(tid, num_dims, dims, strides)]); \ } \ #define CAST_THROUGH(FN_NAME, FN_NAME_STRIDED, LEFT_TYPENAME, RIGHT_TYPENAME, IR_TYPENAME) \ kernel void FN_NAME( \ constant size_t &dim, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(static_cast<IR_TYPENAME>(input[tid])); \ } \ kernel void FN_NAME_STRIDED( \ constant size_t &dim, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(static_cast<IR_TYPENAME>(input[get_strided_index(tid, num_dims, dims, strides)])); \ } \ // u32 CAST(cast_u32_f32, cast_u32_f32_strided, uint32_t, float) CAST(cast_u32_u8, cast_u32_u8_strided, uint32_t, uint8_t) CAST(cast_u32_f16, cast_u32_f16_strided, uint32_t, half) #if __METAL_VERSION__ >= 220 CAST(cast_u32_i64, cast_u32_i64_strided, uint32_t, int64_t) #endif #if defined(__HAVE_BFLOAT__) CAST(cast_u32_bf16, cast_u32_bf16_strided, uint32_t, bfloat) #endif // u8 CAST(cast_u8_u32, cast_u8_u32_strided, uint8_t, uint32_t) CAST(cast_u8_f32, cast_u8_f32_strided, uint8_t, float) CAST(cast_u8_f16, cast_u8_f16_strided, uint8_t, half) #if __METAL_VERSION__ >= 220 CAST(cast_u8_i64, cast_u8_i64_strided, uint8_t, int64_t) #endif #if defined(__HAVE_BFLOAT__) CAST(cast_u8_bf16, cast_u8_bf16_strided, uint8_t, bfloat) #endif // f16 CAST(cast_f16_f32, cast_f16_f32_strided, half, float) CAST(cast_f16_u8, cast_f16_u8_strided, half, uint8_t) CAST(cast_f16_u32, cast_f16_u32_strided, half, uint32_t) CAST(cast_f16_i64, cast_f16_i64_strided, half, int64_t) #if defined(__HAVE_BFLOAT__) CAST_THROUGH(cast_f16_bf16, cast_f16_bf16_strided, half, bfloat, float) #endif // i64 CAST(cast_i64_f32, cast_i64_f32_strided, int64_t, float) CAST(cast_i64_u8, cast_i64_u8_strided, int64_t, uint8_t) CAST(cast_i64_u32, cast_i64_u32_strided, int64_t, uint32_t) CAST(cast_i64_f16, cast_i64_f16_strided, int64_t, half) #if defined(__HAVE_BFLOAT__) CAST_THROUGH(cast_i64_bf16, cast_i64_bf16_strided, int64_t, bfloat, float) #endif // f32 CAST(cast_f32_f16, cast_f32_f16_strided, float, half) CAST(cast_f32_u32, cast_f32_u32_strided, float, uint32_t) CAST(cast_f32_u8, cast_f32_u8_strided, float, uint8_t) CAST(cast_f32_i64, cast_f32_i64_strided, float, int64_t) #if defined(__HAVE_BFLOAT__) CAST(cast_f32_bf16, cast_f32_bf16_strided, float, bfloat) #endif // bf16 #if defined(__HAVE_BFLOAT__) CAST(cast_bf16_u32, cast_bf16_u32_strided, bfloat, uint32_t) CAST(cast_bf16_i64, cast_bf16_i64_strided, bfloat, int64_t) CAST(cast_bf16_f32, cast_bf16_f32_strided, bfloat, float) CAST_THROUGH(cast_bf16_u8, cast_bf16_u8_strided, bfloat, uint8_t, float) CAST_THROUGH(cast_bf16_f16, cast_bf16_f16_strided, bfloat, half, float) #endif
candle/candle-metal-kernels/src/cast.metal/0
{ "file_path": "candle/candle-metal-kernels/src/cast.metal", "repo_id": "candle", "token_count": 2045 }
37
use candle_metal_kernels::{call_unary_contiguous, call_unary_strided, unary, Kernels}; use half::{bf16, f16}; use metal::objc::rc::autoreleasepool; use metal::{Device, MTLResourceOptions}; use rand; use std::any::type_name; use std::time::Instant; fn main() { let device = Device::system_default().unwrap(); let kernels = Kernels::new(); let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>(); let f32_10k = (0..10000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f32_100k = (0..100000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f16_map = |v: &[f32]| v.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let f16_1k = f16_map(&f32_1k); let f16_10k = f16_map(&f32_10k); let f16_100k = f16_map(&f32_100k); let bf16_map = |v: &[f32]| v.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let bf16_1k = bf16_map(&f32_1k); let bf16_10k = bf16_map(&f32_10k); let bf16_100k = bf16_map(&f32_100k); let f32_ckernels = [ unary::contiguous::sin::FLOAT, unary::contiguous::cos::FLOAT, unary::contiguous::exp::FLOAT, unary::contiguous::sqr::FLOAT, unary::contiguous::sqrt::FLOAT, unary::contiguous::neg::FLOAT, unary::contiguous::copy::FLOAT, ]; let f32_skernels = [ unary::strided::sin::FLOAT, unary::strided::cos::FLOAT, unary::strided::exp::FLOAT, unary::strided::sqr::FLOAT, unary::strided::sqrt::FLOAT, unary::strided::neg::FLOAT, unary::strided::copy::FLOAT, ]; let f16_ckernels = [ unary::contiguous::sin::HALF, unary::contiguous::cos::HALF, unary::contiguous::exp::HALF, unary::contiguous::sqr::HALF, unary::contiguous::sqrt::HALF, unary::contiguous::neg::HALF, unary::contiguous::copy::HALF, ]; let f16_skernels = [ unary::strided::sin::HALF, unary::strided::cos::HALF, unary::strided::exp::HALF, unary::strided::sqr::HALF, unary::strided::sqrt::HALF, unary::strided::neg::HALF, unary::strided::copy::HALF, ]; let bf16_ckernels = [ unary::contiguous::sin::BFLOAT, unary::contiguous::cos::BFLOAT, unary::contiguous::exp::BFLOAT, unary::contiguous::sqr::BFLOAT, unary::contiguous::sqrt::BFLOAT, unary::contiguous::neg::BFLOAT, unary::contiguous::copy::BFLOAT, ]; let bf16_skernels = [ unary::strided::sin::BFLOAT, unary::strided::cos::BFLOAT, unary::strided::exp::BFLOAT, unary::strided::sqr::BFLOAT, unary::strided::sqrt::BFLOAT, unary::strided::neg::BFLOAT, unary::strided::copy::BFLOAT, ]; println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}", "dtype", "kernel", "size", "runs", "total time", "avg time" ); // f32 run_unary_bench(&device, &kernels, &f32_1k, f32_ckernels, f32_skernels); run_unary_bench(&device, &kernels, &f32_10k, f32_ckernels, f32_skernels); run_unary_bench(&device, &kernels, &f32_100k, f32_ckernels, f32_skernels); // f16 run_unary_bench(&device, &kernels, &f16_1k, f16_ckernels, f16_skernels); run_unary_bench(&device, &kernels, &f16_10k, f16_ckernels, f16_skernels); run_unary_bench(&device, &kernels, &f16_100k, f16_ckernels, f16_skernels); // bf16 run_unary_bench(&device, &kernels, &bf16_1k, bf16_ckernels, bf16_skernels); run_unary_bench(&device, &kernels, &bf16_10k, bf16_ckernels, bf16_skernels); run_unary_bench(&device, &kernels, &bf16_100k, bf16_ckernels, bf16_skernels); } fn run_unary_bench<T: Clone>( device: &Device, kernels: &Kernels, v: &[T], contiguous: [unary::contiguous::Kernel; 7], strided: [unary::strided::Kernel; 7], ) { let command_queue = device.new_command_queue(); let options = MTLResourceOptions::StorageModeManaged; let iterations = 10000; let input = device.new_buffer_with_data( v.as_ptr() as *const core::ffi::c_void, core::mem::size_of_val(v) as u64, options, ); let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options); // Contiguous for kernel_name in contiguous { let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_unary_contiguous( device, &command_buffer, kernels, kernel_name, v.len(), &input, &mut output, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), kernel_name.0, v.len(), iterations, total_time, total_time / iterations ); } // Strided let shape = vec![2, 5_000]; let strides = vec![2, 1]; let offset = 0; for kernel_name in &strided { let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_unary_strided( device, command_buffer, &kernels, kernel_name, &shape, &input, &strides, offset, &mut output, 0, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), kernel_name.0, v.len(), iterations, total_time, total_time / iterations ); } }
candle/candle-metal-kernels/tmp/unary.rs/0
{ "file_path": "candle/candle-metal-kernels/tmp/unary.rs", "repo_id": "candle", "token_count": 3489 }
38
//! Variable initialization. // This is based on: // https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/nn/init.py# use candle::{DType, Device, Result, Shape, Tensor, Var}; /// Number of features as input or output of a layer. /// In Kaiming initialization, choosing `FanIn` preserves /// the magnitude of the variance of the weights in the /// forward pass, choosing `FanOut` preserves this /// magnitude in the backward pass. #[derive(Debug, Copy, Clone)] pub enum FanInOut { FanIn, FanOut, } impl FanInOut { /// Compute the fan-in or fan-out value for a weight tensor of /// the specified dimensions. /// <https://github.com/pytorch/pytorch/blob/dbeacf11820e336e803bb719b7aaaf2125ae4d9c/torch/nn/init.py#L284> pub fn for_shape(&self, shape: &Shape) -> usize { let dims = shape.dims(); let receptive_field_size: usize = dims.iter().skip(2).product(); match &self { FanInOut::FanIn => { if dims.len() < 2 { 1 } else { dims[1] * receptive_field_size } } FanInOut::FanOut => { if dims.is_empty() { 1 } else { dims[0] * receptive_field_size } } } } } #[derive(Debug, Copy, Clone)] pub enum NormalOrUniform { Normal, Uniform, } /// The non-linear function that follows this layer. ReLU is the /// recommended value. #[derive(Debug, Copy, Clone)] pub enum NonLinearity { ReLU, Linear, Sigmoid, Tanh, SELU, ExplicitGain(f64), } impl NonLinearity { // https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/nn/init.py#L67 pub fn gain(&self) -> f64 { match *self { NonLinearity::ReLU => 2f64.sqrt(), NonLinearity::Tanh => 5. / 3., NonLinearity::Linear | NonLinearity::Sigmoid => 1., NonLinearity::SELU => 0.75, NonLinearity::ExplicitGain(g) => g, } } } /// Variable initializations. #[derive(Debug, Copy, Clone)] pub enum Init { /// Constant value. Const(f64), /// Random normal with some mean and standard deviation. Randn { mean: f64, stdev: f64 }, /// Uniform initialization between some lower and upper bounds. Uniform { lo: f64, up: f64 }, /// Kaiming uniform initialization. /// See "Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification" /// He, K. et al. (2015). This uses a uniform distribution. Kaiming { dist: NormalOrUniform, fan: FanInOut, non_linearity: NonLinearity, }, } pub const ZERO: Init = Init::Const(0.); pub const ONE: Init = Init::Const(1.); pub const DEFAULT_KAIMING_UNIFORM: Init = Init::Kaiming { dist: NormalOrUniform::Uniform, fan: FanInOut::FanIn, non_linearity: NonLinearity::ReLU, }; pub const DEFAULT_KAIMING_NORMAL: Init = Init::Kaiming { dist: NormalOrUniform::Normal, fan: FanInOut::FanIn, non_linearity: NonLinearity::ReLU, }; impl Init { /// Creates a new tensor with the specified shape, device, and initialization. pub fn var<S: Into<Shape>>(&self, s: S, dtype: DType, device: &Device) -> Result<Var> { match self { Self::Const(v) if *v == 0. => Var::zeros(s, dtype, device), Self::Const(v) if *v == 1. => Var::ones(s, dtype, device), Self::Const(cst) => { Var::from_tensor(&Tensor::ones(s, dtype, device)?.affine(*cst, 0.)?) } Self::Uniform { lo, up } => Var::rand_f64(*lo, *up, s, dtype, device), Self::Randn { mean, stdev } => Var::randn_f64(*mean, *stdev, s, dtype, device), Self::Kaiming { dist, fan, non_linearity, } => { let s = s.into(); let fan = fan.for_shape(&s); let gain = non_linearity.gain(); let std = gain / (fan as f64).sqrt(); match dist { NormalOrUniform::Uniform => { let bound = 3f64.sqrt() * std; Var::rand_f64(-bound, bound, s, dtype, device) } NormalOrUniform::Normal => Var::randn_f64(0., std, s, dtype, device), } } } } } impl Default for Init { fn default() -> Self { Self::Const(0.) } }
candle/candle-nn/src/init.rs/0
{ "file_path": "candle/candle-nn/src/init.rs", "repo_id": "candle", "token_count": 2212 }
39
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::to_vec0_round; use candle::{Device, Result, Tensor}; /* Equivalent python code: import torch import torch.nn.functional as F input = torch.tensor([ [ 1.1050, 0.3013, -1.5394, -2.1528, -0.8634], [ 1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [ 0.8318, 1.1154, -0.3610, 0.5351, 1.0830]]) target = torch.tensor([1, 0, 4]) print(F.nll_loss(F.log_softmax(input, dim=1), target)) print(F.cross_entropy(input, target)) */ #[test] fn nll_and_cross_entropy() -> Result<()> { let cpu = Device::Cpu; let input = Tensor::new( &[ [1.1050f32, 0.3013, -1.5394, -2.1528, -0.8634], [1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [0.8318, 1.1154, -0.3610, 0.5351, 1.0830], ], &cpu, )?; let target = Tensor::new(&[1u32, 0, 4], &cpu)?; let log_softmax = candle_nn::ops::log_softmax(&input, 1)?; let loss = candle_nn::loss::nll(&log_softmax, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); let loss = candle_nn::loss::cross_entropy(&input, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); Ok(()) } /* Equivalent python code: import torch import torch.nn.functional as F inp = torch.Tensor([[ 2.3611, -0.8813, -0.5006, -0.2178], [ 0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [ 1.3081, 0.6641, 1.1802, -0.2547], [ 0.5292, 0.7636, 0.3692, -0.8318]]) target = torch.Tensor([[0., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.]]) print(F.binary_cross_entropy_with_logits(inp, target)) */ #[test] fn binary_cross_entropy_with_logit() -> Result<()> { let cpu = Device::Cpu; let inp = [ [2.3611f32, -0.8813, -0.5006, -0.2178], [0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [1.3081, 0.6641, 1.1802, -0.2547], [0.5292, 0.7636, 0.3692, -0.8318], ]; let target = [ [0.0f32, 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.], ]; let inp = Tensor::new(&inp, &cpu)?; let target = Tensor::new(&target, &cpu)?; let loss = candle_nn::loss::binary_cross_entropy_with_logit(&inp, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 0.8224); Ok(()) }
candle/candle-nn/tests/loss.rs/0
{ "file_path": "candle/candle-nn/tests/loss.rs", "repo_id": "candle", "token_count": 1344 }
40
from typing import Union, Sequence class Tensor: """ This contains the type hints for the magic methodes of the `candle.Tensor` class. """ def __add__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __radd__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __sub__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Subtract a scalar from a tensor or one tensor from another. """ pass def __truediv__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Divide a tensor by a scalar or one tensor by another. """ pass def __mul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __rmul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __richcmp__(self, rhs: Union["Tensor", "Scalar"], op) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __getitem__(self, index: Union["Index", "Tensor", Sequence["Index"]]) -> "Tensor": """ Return a slice of a tensor. """ pass def __eq__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ne__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __lt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __le__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __gt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ge__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass
candle/candle-pyo3/_additional_typing/__init__.py/0
{ "file_path": "candle/candle-pyo3/_additional_typing/__init__.py", "repo_id": "candle", "token_count": 1174 }
41
# Generated content DO NOT EDIT from .. import onnx ONNXModel = onnx.ONNXModel ONNXTensorDescription = onnx.ONNXTensorDescription
candle/candle-pyo3/py_src/candle/onnx/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/onnx/__init__.py", "repo_id": "candle", "token_count": 46 }
42
import candle from candle import Tensor from candle.nn import Linear def test_linear_layer_can_be_constructed(): linear = Linear(10, 10) assert linear is not None def test_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536) def test_quantized_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_quantized_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536)
candle/candle-pyo3/tests/bindings/test_linear.py/0
{ "file_path": "candle/candle-pyo3/tests/bindings/test_linear.py", "repo_id": "candle", "token_count": 431 }
43
use crate::models::with_tracing::{linear_b as linear, Linear}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug, Clone)] pub struct Config { pub num_layers: usize, pub padded_vocab_size: usize, pub hidden_size: usize, pub ffn_hidden_size: usize, pub kv_channels: usize, pub num_attention_heads: usize, pub seq_length: usize, pub layernorm_epsilon: f64, pub rmsnorm: bool, pub apply_residual_connection_post_layernorm: bool, pub post_layer_norm: bool, pub add_bias_linear: bool, pub add_qkv_bias: bool, pub bias_dropout_fusion: bool, pub multi_query_attention: bool, pub multi_query_group_num: usize, pub apply_query_key_layer_scaling: bool, pub attention_softmax_in_fp32: bool, pub fp32_residual_connection: bool, } impl Config { pub fn glm3_6b() -> Self { Self { num_layers: 28, padded_vocab_size: 65024, hidden_size: 4096, ffn_hidden_size: 13696, kv_channels: 128, num_attention_heads: 32, seq_length: 8192, layernorm_epsilon: 1e-5, rmsnorm: true, apply_residual_connection_post_layernorm: false, post_layer_norm: true, add_bias_linear: false, add_qkv_bias: true, bias_dropout_fusion: true, multi_query_attention: true, multi_query_group_num: 2, apply_query_key_layer_scaling: true, attention_softmax_in_fp32: true, fp32_residual_connection: false, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { cache: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> { let rotary_dim = cfg.kv_channels; let n_elem = rotary_dim / 2; let inv_freq: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10_000f64.powf(i as f64 / n_elem as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)? .to_dtype(dtype)? .reshape((cfg.seq_length, 1))?; let freqs = t.matmul(&inv_freq)?; let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?; Ok(Self { cache }) } fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (seqlen, _b, np, _hn) = xs.dims4()?; let cache = self.cache.narrow(0, seqlen_offset, seqlen)?; let rot_dim = cache.dim(D::Minus2)? * 2; let (xs, xs_pass) = ( xs.narrow(D::Minus1, 0, rot_dim)?, xs.narrow(D::Minus1, rot_dim, rot_dim)?, ); let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?; let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?; let (xshaped0, xshaped1) = ( xshaped.i((.., .., .., .., 0))?, xshaped.i((.., .., .., .., 1))?, ); let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?); let xs_out = Tensor::stack( &[ (xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?, (xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?, ], D::Minus1, )?; let xs_out = xs_out.flatten_from(3)?; Tensor::cat(&[xs_out, xs_pass], D::Minus1) } } #[derive(Debug, Clone)] struct CoreAttention { coeff: Option<f64>, norm_factor: f64, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } impl CoreAttention { fn new(layer_number: usize, cfg: &Config) -> Result<Self> { let norm_factor = (cfg.kv_channels as f64).sqrt(); let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling { let coeff = f64::max(1.0, layer_number as f64); (norm_factor * coeff, Some(coeff)) } else { (norm_factor, None) }; Ok(Self { coeff, norm_factor }) } fn forward( &self, query_layer: &Tensor, key_layer: &Tensor, value_layer: &Tensor, attention_mask: &Option<Tensor>, ) -> Result<Tensor> { let output_size = ( query_layer.dim(1)?, // b query_layer.dim(2)?, // np query_layer.dim(0)?, // sq key_layer.dim(0)?, // sk ); let query_layer = query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?; let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?; let matmul_result = Tensor::matmul( &query_layer.transpose(0, 1)?, &key_layer.transpose(0, 1)?.transpose(1, 2)?, )?; let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?; let matmul_result = match self.coeff { None => matmul_result, Some(coeff) => (matmul_result * coeff)?, }; let attention_scores = match attention_mask { Some(mask) => masked_fill( &matmul_result, &mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?, f32::NEG_INFINITY, )?, None => matmul_result, }; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let output_size = ( value_layer.dim(1)?, value_layer.dim(2)?, query_layer.dim(0)?, value_layer.dim(3)?, ); let value_layer = value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?; let attention_probs = attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?; let context_layer = Tensor::matmul(&attention_probs, &value_layer.transpose(0, 1)?)?; let context_layer = context_layer.reshape(output_size)?; let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?; context_layer.flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SelfAttention { query_key_value: Linear, core_attention: CoreAttention, dense: Linear, multi_query_attention: bool, num_attention_heads_per_partition: usize, num_multi_query_groups_per_partition: usize, hidden_size_per_attention_head: usize, kv_cache: Option<(Tensor, Tensor)>, } impl SelfAttention { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let projection_size = cfg.kv_channels * cfg.num_attention_heads; let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads; let qkv_hidden_size = if cfg.multi_query_attention { projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num } else { 3 * projection_size }; let query_key_value = linear( cfg.hidden_size, qkv_hidden_size, cfg.add_bias_linear || cfg.add_qkv_bias, vb.pp("query_key_value"), )?; let core_attention = CoreAttention::new(layer_number, cfg)?; let dense = linear( cfg.hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense"), )?; Ok(Self { query_key_value, core_attention, dense, multi_query_attention: cfg.multi_query_attention, num_attention_heads_per_partition: cfg.num_attention_heads, num_multi_query_groups_per_partition: cfg.multi_query_group_num, hidden_size_per_attention_head: cfg.kv_channels, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let mixed_x_layer = xs.apply(&self.query_key_value)?; if !self.multi_query_attention { candle::bail!("only multi_query_attention=true is supported") } let hpa = self.hidden_size_per_attention_head; let query_layer = mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?; let key_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let value_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa + self.num_multi_query_groups_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let query_layer = query_layer.reshape(( query_layer.dim(0)?, query_layer.dim(1)?, self.num_attention_heads_per_partition, hpa, ))?; let key_layer = key_layer.reshape(( key_layer.dim(0)?, key_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; let value_layer = value_layer.reshape(( value_layer.dim(0)?, value_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; // Rotary embeddings. let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(0)?, }; let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?; let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?; // KV cache. let (key_layer, value_layer) = match &self.kv_cache { None => (key_layer, value_layer), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key_layer], 0)?; let v = Tensor::cat(&[prev_v, &value_layer], 0)?; (k, v) } }; self.kv_cache = Some((key_layer.clone(), value_layer.clone())); // Repeat KV. let ratio = self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition; let key_layer = { let (d0, d1, d2, d3) = key_layer.dims4()?; key_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let value_layer = { let (d0, d1, d2, d3) = value_layer.dims4()?; value_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let context_layer = self.core_attention .forward(&query_layer, &key_layer, &value_layer, attention_mask)?; let output = context_layer.apply(&self.dense)?; Ok(output) } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] struct MLP { dense_h_to_4h: Linear, dense_4h_to_h: Linear, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense_h_to_4h = linear( cfg.hidden_size, cfg.ffn_hidden_size * 2, cfg.add_bias_linear, vb.pp("dense_h_to_4h"), )?; let dense_4h_to_h = linear( cfg.ffn_hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense_4h_to_h"), )?; Ok(Self { dense_4h_to_h, dense_h_to_4h, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense_h_to_4h)? .apply(&candle_nn::Activation::Swiglu)? .apply(&self.dense_4h_to_h) } } #[derive(Debug, Clone)] struct Block { input_layernorm: candle_nn::LayerNorm, self_attention: SelfAttention, post_attention_layernorm: candle_nn::LayerNorm, mlp: MLP, apply_residual_connection_post_layernorm: bool, } impl Block { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let input_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? }; let post_attention_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? }; let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { input_layernorm, self_attention, post_attention_layernorm, mlp, apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm, }) } fn reset_kv_cache(&mut self) { self.self_attention.reset_kv_cache() } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let layernorm_output = xs.apply(&self.input_layernorm)?; let attention_output = self.self_attention .forward(&layernorm_output, attention_mask, rotary_emb)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { xs }; let layernorm_input = (residual + attention_output)?; let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?; let mlp_output = layernorm_output.apply(&self.mlp)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { &layernorm_input }; mlp_output + residual } } #[derive(Debug, Clone)] struct Transformer { layers: Vec<Block>, final_layernorm: Option<candle_nn::LayerNorm>, rotary_emb: RotaryEmbedding, } impl Transformer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_layers); for layer_index in 0..cfg.num_layers { let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?; layers.push(block) } let final_layernorm = if cfg.post_layer_norm { let ln = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? }; Some(ln) } else { None }; let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?; Ok(Self { layers, final_layernorm, rotary_emb, }) } fn reset_kv_cache(&mut self) { for block in self.layers.iter_mut() { block.reset_kv_cache() } } fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for block in self.layers.iter_mut() { xs = block.forward(&xs, attention_mask, &self.rotary_emb)? } match self.final_layernorm.as_ref() { None => Ok(xs), Some(ln) => xs.apply(ln), } } } #[derive(Debug, Clone)] struct Embedding { word_embeddings: candle_nn::Embedding, fp32_residual_connection: bool, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let word_embeddings = candle_nn::embedding( cfg.padded_vocab_size, cfg.hidden_size, vb.pp("word_embeddings"), )?; Ok(Self { word_embeddings, fp32_residual_connection: cfg.fp32_residual_connection, }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h if self.fp32_residual_connection { xs.to_dtype(candle::DType::F32) } else { xs.contiguous() } } } #[derive(Debug, Clone)] pub struct Model { embedding: Embedding, encoder: Transformer, output_layer: Linear, } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embedding"))?; let encoder = Transformer::new(cfg, vb.pp("encoder"))?; let output_layer = linear( cfg.hidden_size, cfg.padded_vocab_size, false, vb.pp("output_layer"), )?; Ok(Self { embedding, encoder, output_layer, }) } pub fn reset_kv_cache(&mut self) { self.encoder.reset_kv_cache() } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let input_embeds = xs.apply(&self.embedding)?; let attention_mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; let xs = self.encoder.forward(&input_embeds, &attention_mask)?; let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?; Ok(lm_logits) } }
candle/candle-transformers/src/models/chatglm.rs/0
{ "file_path": "candle/candle-transformers/src/models/chatglm.rs", "repo_id": "candle", "token_count": 10342 }
44
use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{embedding, linear_b as linear, Embedding, LayerNorm, Linear, Module, VarBuilder}; use serde::Deserialize; const MAX_SEQ_LEN: usize = 5000; fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<LayerNorm> { let (weight, bias) = match (vb.get(size, "weight"), vb.get(size, "bias")) { (Ok(weight), Ok(bias)) => (weight, bias), (Err(err), _) | (_, Err(err)) => { if let (Ok(weight), Ok(bias)) = (vb.get(size, "gamma"), vb.get(size, "beta")) { (weight, bias) } else { return Err(err); } } }; Ok(LayerNorm::new(weight, bias, eps)) } // https://raw.githubusercontent.com/huggingface/transformers/030c863aaa0165e98352b61697430bf69bf33755/src/transformers/models/falcon/configuration_falcon.py #[derive(Clone, Debug, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub layer_norm_epsilon: f64, pub initializer_range: f64, pub use_cache: bool, pub bos_token_id: u32, pub eos_token_id: u32, pub hidden_dropout: f64, pub attention_dropout: f64, pub n_head_kv: Option<usize>, pub alibi: bool, pub new_decoder_architecture: bool, pub multi_query: bool, pub parallel_attn: bool, pub bias: bool, } impl Default for Config { fn default() -> Self { Self { vocab_size: 65024, hidden_size: 4544, num_hidden_layers: 32, num_attention_heads: 71, layer_norm_epsilon: 1e-5, initializer_range: 0.02, use_cache: true, bos_token_id: 11, eos_token_id: 11, hidden_dropout: 0.0, attention_dropout: 0.0, n_head_kv: None, alibi: false, new_decoder_architecture: false, multi_query: true, parallel_attn: true, bias: false, } } } impl Config { pub fn validate(&self) -> Result<()> { if self.alibi { candle::bail!("alibi is not supported"); } if self.new_decoder_architecture { candle::bail!("new_decoder_architecture is not supported"); } if self.n_head_kv.is_some() { candle::bail!("n_head_kv is not supported"); } Ok(()) } // https://huggingface.co/tiiuae/falcon-7b/blob/main/config.json pub fn falcon7b() -> Self { // This is currently on par with the defaults, the defaults come from the Python default // arguments for the config initialization whereas the following come from the json config. Self { vocab_size: 65024, hidden_size: 4544, num_hidden_layers: 32, num_attention_heads: 71, layer_norm_epsilon: 1e-5, initializer_range: 0.02, use_cache: true, bos_token_id: 11, eos_token_id: 11, hidden_dropout: 0., attention_dropout: 0., n_head_kv: None, alibi: false, new_decoder_architecture: false, multi_query: true, parallel_attn: true, bias: false, } } fn head_dim(&self) -> usize { self.hidden_size / self.num_attention_heads } fn rotary(&self) -> bool { !self.alibi } } fn rotate_half(x: &Tensor) -> Result<Tensor> { let l = x.dim(D::Minus1)?; let x1 = x.narrow(D::Minus1, 0, l / 2)?; let x2 = x.narrow(D::Minus1, l / 2, l - l / 2)?; let x21 = Tensor::cat(&[&x2.neg()?, &x1], D::Minus1)?; Ok(x21) } #[derive(Debug, Clone)] struct FalconRotaryEmbedding { inv_freq: Tensor, cache: Option<(usize, Tensor, Tensor)>, } impl FalconRotaryEmbedding { fn load(device: &Device, cfg: &Config) -> Result<Self> { let head_dim = cfg.head_dim(); let inv_freq: Vec<_> = (0..head_dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / head_dim as f32)) .collect(); Ok(Self { inv_freq: Tensor::new(inv_freq.as_slice(), device)?, cache: None, }) } fn cos_sin( &mut self, seq_len: usize, device: &Device, dtype: DType, ) -> Result<(Tensor, Tensor)> { match &self.cache { Some((s, cos, sin)) if *s == seq_len => { return Ok((cos.clone(), sin.clone())); } _ => {} } let t = Tensor::arange(0, seq_len as u32, device)?.to_dtype(dtype)?; let inv_freq = self.inv_freq.to_dtype(dtype)?; let freqs = t.unsqueeze(1)?.matmul(&inv_freq.unsqueeze(0)?)?; let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; let cos = emb.cos()?; let sin = emb.sin()?; self.cache = Some((seq_len, cos.clone(), sin.clone())); Ok((cos, sin)) } fn forward( &mut self, query: &Tensor, key: &Tensor, past_kv_len: usize, ) -> Result<(Tensor, Tensor)> { let (_batch, seq_len, _head_dim) = query.dims3()?; let (cos, sin) = self.cos_sin(MAX_SEQ_LEN, query.device(), query.dtype())?; let cos = cos.narrow(0, past_kv_len, seq_len)?; let sin = sin.narrow(0, past_kv_len, seq_len)?; let qs = (query.broadcast_mul(&cos)? + &rotate_half(query)?.broadcast_mul(&sin)?)?; let ks = (key.broadcast_mul(&cos)? + &rotate_half(key)?.broadcast_mul(&sin)?)?; Ok((qs, ks)) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())? .to_dtype(on_false.dtype())? .broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct FalconAttention { query_key_value: Linear, dense: Linear, maybe_rotary: Option<FalconRotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, inv_norm_factor: f64, multi_query: bool, use_cache: bool, num_heads: usize, head_dim: usize, n_head_kv: usize, } impl FalconAttention { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let maybe_rotary = if cfg.rotary() { let rotary = FalconRotaryEmbedding::load(vb.device(), cfg)?; Some(rotary) } else { None }; let head_dim = cfg.head_dim(); let hidden_size = cfg.hidden_size; let qkv_out_dim = if cfg.multi_query { hidden_size + 2 * head_dim } else { 3 * hidden_size }; let query_key_value = linear(hidden_size, qkv_out_dim, cfg.bias, vb.pp("query_key_value"))?; let dense = linear(hidden_size, hidden_size, cfg.bias, vb.pp("dense"))?; Ok(Self { query_key_value, dense, maybe_rotary, kv_cache: None, inv_norm_factor: 1. / (head_dim as f64).sqrt(), multi_query: cfg.multi_query, use_cache: cfg.use_cache, num_heads: cfg.num_attention_heads, n_head_kv: cfg.n_head_kv.unwrap_or(1), head_dim, }) } fn split_heads(&self, fused_qkv: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let (b_sz, seq_len, _) = fused_qkv.dims3()?; if !self.multi_query { let fused_qkv = fused_qkv.reshape((b_sz, seq_len, self.num_heads, 3, self.head_dim))?; let q = fused_qkv.narrow(D::Minus2, 0, 1)?.squeeze(D::Minus2)?; let k = fused_qkv.narrow(D::Minus2, 1, 1)?.squeeze(D::Minus2)?; let v = fused_qkv.narrow(D::Minus2, 2, 1)?.squeeze(D::Minus2)?; Ok((q, k, v)) } else { let fused_qkv = fused_qkv.reshape((b_sz, seq_len, self.num_heads + 2, self.head_dim))?; let d = fused_qkv.dim(D::Minus2)?; let q = fused_qkv.narrow(D::Minus2, 0, d - 2)?; let k = fused_qkv.narrow(D::Minus2, d - 2, 1)?; let v = fused_qkv.narrow(D::Minus2, d - 1, 1)?; Ok((q, k, v)) } } fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, past_kv_len: usize) -> Result<Tensor> { let fused_qkv = self.query_key_value.forward(x)?; let head_dim = self.head_dim; let (query, key, value) = self.split_heads(&fused_qkv)?; let (b_sz, seq_len, _, _) = query.dims4()?; let query = query .transpose(1, 2)? .reshape((b_sz * self.num_heads, seq_len, head_dim))?; let key = key .transpose(1, 2)? .reshape((b_sz * self.n_head_kv, seq_len, head_dim))?; let value = value .transpose(1, 2)? .reshape((b_sz * self.n_head_kv, seq_len, head_dim))?; let (query, key) = if let Some(r) = &mut self.maybe_rotary { r.forward(&query, &key, past_kv_len)? } else { (query, key) }; let (mut key, mut value) = (key, value); if self.use_cache { if let Some((cache_k, cache_v)) = &self.kv_cache { // TODO: we could trim the tensors to MAX_SEQ_LEN so that this would work for // arbitrarily large sizes. key = Tensor::cat(&[cache_k, &key], 1)?.contiguous()?; value = Tensor::cat(&[cache_v, &value], 1)?.contiguous()?; } self.kv_cache = Some((key.clone(), value.clone())) } let query = query.reshape((b_sz * self.num_heads, seq_len, head_dim))?; let all_len = past_kv_len + seq_len; let key = key.reshape((b_sz * self.n_head_kv, all_len, head_dim))?; let value = value.reshape((b_sz * self.n_head_kv, all_len, head_dim))?; let (key, value) = if self.n_head_kv == 1 { ( key.broadcast_as((b_sz * self.num_heads, all_len, head_dim))?, value.broadcast_as((b_sz * self.num_heads, all_len, head_dim))?, ) } else { (key, value) }; // Only handle the case where alibi is None here, and non-flash attention. let attention_scores = (query.matmul(&key.t()?)? * self.inv_norm_factor)?; let attention_scores = match mask { None => attention_scores, Some(mask) => { let mask = masked_fill(&mask.to_dtype(DType::F32)?, mask, -1e9)? .to_dtype(query.dtype())?; attention_scores.broadcast_add(&mask.squeeze(1)?)? } }; let attention_scores = candle_nn::ops::softmax(&attention_scores.to_dtype(DType::F32)?, D::Minus1)? .to_dtype(x.dtype())?; let attn_output = attention_scores .matmul(&value)? .reshape((b_sz, self.num_heads, seq_len, head_dim))? .transpose(1, 2)? .reshape((b_sz, seq_len, self.num_heads * head_dim))?; let attn_output = self.dense.forward(&attn_output)?; Ok(attn_output) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct FalconMlp { dense_h_to_4h: Linear, dense_4h_to_h: Linear, } impl FalconMlp { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let b = cfg.bias; let dense_h_to_4h = linear(h, 4 * h, b, vb.pp("dense_h_to_4h"))?; let dense_4h_to_h = linear(4 * h, h, b, vb.pp("dense_4h_to_h"))?; Ok(Self { dense_h_to_4h, dense_4h_to_h, }) } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = self.dense_h_to_4h.forward(x)?.gelu()?; let x = self.dense_4h_to_h.forward(&x)?; Ok(x) } } #[derive(Debug, Clone)] struct FalconDecoderLayer { inp_layernorm: LayerNorm, self_attention: FalconAttention, post_attention_layernorm: Option<LayerNorm>, mlp: FalconMlp, parallel_attn: bool, } impl FalconDecoderLayer { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let mlp = FalconMlp::load(vb.pp("mlp"), cfg)?; let inp_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("input_layernorm"), )?; let self_attention = FalconAttention::load(vb.pp("self_attention"), cfg)?; let post_attention_layernorm = if cfg.parallel_attn { None } else { let ln = layer_norm( cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("post_attention_layernorm"), )?; Some(ln) }; Ok(Self { inp_layernorm, self_attention, post_attention_layernorm, mlp, parallel_attn: cfg.parallel_attn, }) } fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, past_kv_len: usize) -> Result<Tensor> { let residual = x.clone(); let ln_attn = self.inp_layernorm.forward(x)?; let attn_output = self.self_attention.forward(&ln_attn, mask, past_kv_len)?; let (residual, ln_mlp) = match &self.post_attention_layernorm { None => (residual, ln_attn), Some(pal) => { // This should include some dropout. let residual = (&attn_output + &residual)?; let ln_mlp = pal.forward(&residual)?; (residual, ln_mlp) } }; let mlp_output = self.mlp.forward(&ln_mlp)?; let mlp_output = if self.parallel_attn { (mlp_output + attn_output)? } else { mlp_output }; let output = (mlp_output + residual)?; Ok(output) } pub fn clear_kv_cache(&mut self) { self.self_attention.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Falcon { word_embeddings: Embedding, blocks: Vec<FalconDecoderLayer>, ln_f: LayerNorm, lm_head: Linear, config: Config, } fn make_causal_mask(t: usize) -> Result<Tensor> { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &Device::Cpu)?; Ok(mask) } fn prepare_attn_mask(b_sz: usize, seq_len: usize) -> Result<Tensor> { // let mask = Tensor::ones((b_sz, seq_len), DType::U32, &Device::Cpu)?; let mask = make_causal_mask(seq_len)?; let mask = mask.broadcast_as((b_sz, 1, seq_len, seq_len))?; Ok(mask) } impl Falcon { pub fn config(&self) -> &Config { &self.config } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let word_embeddings = embedding( cfg.vocab_size, cfg.hidden_size, vb.pp("transformer.word_embeddings"), )?; let blocks = (0..cfg.num_hidden_layers) .map(|i| FalconDecoderLayer::load(vb.pp(&format!("transformer.h.{i}")), &cfg)) .collect::<Result<Vec<_>>>()?; let ln_f = layer_norm( cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("transformer.ln_f"), )?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, false, vb.pp("lm_head"))?; Ok(Self { word_embeddings, blocks, ln_f, lm_head, config: cfg, }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let (b_sz, seq_len) = input_ids.dims2()?; let mut hidden_state = self.word_embeddings.forward(input_ids)?; let past_kv_len = match &self.blocks[0].self_attention.kv_cache { Some((k, _)) => k.dim(1)?, None => 0, }; let causal_mask = if seq_len <= 1 { None } else { Some(prepare_attn_mask(b_sz, seq_len)?.to_device(input_ids.device())?) }; for block in self.blocks.iter_mut() { hidden_state = block.forward(&hidden_state, causal_mask.as_ref(), past_kv_len)?; } let hidden_state = self.ln_f.forward(&hidden_state)?; let hidden_state = hidden_state.narrow(1, seq_len - 1, 1)?; let logits = self.lm_head.forward(&hidden_state)?.squeeze(1)?; Ok(logits) } pub fn clear_kv_cache(&mut self) { for block in self.blocks.iter_mut() { block.clear_kv_cache() } } }
candle/candle-transformers/src/models/falcon.rs/0
{ "file_path": "candle/candle-transformers/src/models/falcon.rs", "repo_id": "candle", "token_count": 8792 }
45
pub fn get_anyres_image_grid_shape( image_size: (u32, u32), grid_pinpoints: &[(u32, u32)], patch_size: u32, ) -> (u32, u32) { let (width, height) = select_best_resolution(image_size, grid_pinpoints); (width / patch_size, height / patch_size) } pub fn select_best_resolution( original_size: (u32, u32), possible_resolutions: &[(u32, u32)], ) -> (u32, u32) { let (original_width, original_height) = original_size; let mut best_fit = (0, 0); let original_width_f = original_width as f32; let original_height_f = original_height as f32; let mut max_effective_resolution = 0_u32; let mut min_wasted_resolution = u32::MAX; for (width, height) in possible_resolutions { let width_f = *width as f32; let height_f = *height as f32; let scale = (width_f / original_width_f).min(height_f / original_height_f); let (downscaled_width, downscaled_height) = ( (original_width_f * scale) as u32, (original_height_f * scale) as u32, ); let effective_resolution = std::cmp::min((*width) * (*height), downscaled_width * downscaled_height); let wasted_resolution = (*width) * (*height) - effective_resolution; if effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution) { best_fit = (*width, *height); max_effective_resolution = effective_resolution; min_wasted_resolution = wasted_resolution; } } best_fit }
candle/candle-transformers/src/models/llava/utils.rs/0
{ "file_path": "candle/candle-transformers/src/models/llava/utils.rs", "repo_id": "candle", "token_count": 689 }
46
use crate::models::mixformer::{Config as PhiConfig, MixFormerSequentialForCausalLM as PhiModel}; use crate::models::with_tracing::{layer_norm, linear_b, LayerNorm, Linear}; use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub phi_config: PhiConfig, pub vision_config: VisionConfig, } impl Config { pub fn v2() -> Self { Self { phi_config: PhiConfig::v1_5(), vision_config: VisionConfig::v2(), } } } fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let dim = q.dim(D::Minus1)?; let scale_factor = 1.0 / (dim as f64).sqrt(); let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?; candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v) } #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct VisionConfig { pub(crate) image_embedding_dim: usize, pub(crate) model_dim: usize, pub(crate) hidden_dim: usize, pub(crate) hidden_features: usize, pub(crate) embed_len: usize, pub(crate) embed_dim: usize, pub(crate) num_blocks: usize, pub(crate) num_heads: usize, pub(crate) act: candle_nn::Activation, } impl VisionConfig { pub fn v2() -> Self { Self { image_embedding_dim: 1152, model_dim: 2048, hidden_dim: 2048 * 4, hidden_features: 4304, embed_len: 729, embed_dim: 1152, num_blocks: 27, num_heads: 16, act: candle_nn::Activation::GeluPytorchTanh, } } } #[derive(Debug, Clone)] struct LinearPatchEmbedding { linear: Linear, } impl LinearPatchEmbedding { fn new(vb: VarBuilder) -> Result<Self> { let linear = linear_b(588, 1152, true, vb.pp("linear"))?; Ok(Self { linear }) } } impl Module for LinearPatchEmbedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.linear) } } #[derive(Debug, Clone)] struct Attention { num_heads: usize, head_dim: usize, qkv: Linear, proj: Linear, span: tracing::Span, } impl Attention { pub fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let qkv = linear_b(dim, dim * 3, true, vb.pp("qkv"))?; let proj = linear_b(dim, dim, true, vb.pp("proj"))?; Ok(Self { num_heads, head_dim: dim / num_heads, qkv, proj, span: tracing::span!(tracing::Level::TRACE, "vit-attn"), }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b, n, c) = xs.dims3()?; let qkv = xs .apply(&self.qkv)? .reshape((b, n, 3, self.num_heads, self.head_dim))? .permute((2, 0, 3, 1, 4))?; let (q, k, v) = ( qkv.i(0)?.contiguous()?, qkv.i(1)?.contiguous()?, qkv.i(2)?.contiguous()?, ); scaled_dot_product_attention(&q, &k, &v)? .transpose(1, 2)? .reshape((b, n, c))? .apply(&self.proj) } } #[derive(Debug, Clone)] struct VitBlock { attn: Attention, mlp: Mlp, norm1: LayerNorm, norm2: LayerNorm, span: tracing::Span, } impl VitBlock { fn new(vb: VarBuilder, dim: usize, num_heads: usize, cfg: &VisionConfig) -> Result<Self> { let attn = Attention::new(vb.pp("attn"), dim, num_heads)?; let mlp = Mlp::new(vb.pp("mlp"), dim, cfg.hidden_features, dim, cfg.act)?; let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?; let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?; Ok(Self { attn, mlp, norm1, norm2, span: tracing::span!(tracing::Level::TRACE, "vit-block"), }) } } impl Module for VitBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = xs.apply(&self.norm1)?.apply(&self.attn)?; let xs = (xs + &ys)?; let ys = xs.apply(&self.norm2)?.apply(&self.mlp)?; let xs = (&xs + &ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct VisionTransformer { patch_embed: LinearPatchEmbedding, pos_embed: Tensor, blocks: Vec<VitBlock>, norm: LayerNorm, span: tracing::Span, } impl VisionTransformer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let patch_embed = LinearPatchEmbedding::new(vb.pp("patch_embed"))?; let pos_embed = vb.get((1, cfg.embed_len, cfg.embed_dim), "pos_embed")?; let blocks = (0..cfg.num_blocks) .map(|i| { VitBlock::new( vb.pp(&format!("blocks.{}", i)), cfg.embed_dim, cfg.num_heads, cfg, ) }) .collect::<Result<_>>()?; let norm = layer_norm(cfg.embed_dim, 1e-5, vb.pp("norm"))?; Ok(Self { patch_embed, pos_embed, blocks, norm, span: tracing::span!(tracing::Level::TRACE, "vit"), }) } } impl Module for VisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = (&xs.apply(&self.patch_embed)? + &self.pos_embed)?; for block in self.blocks.iter() { xs = xs.apply(block)?; } xs.apply(&self.norm) } } #[derive(Debug, Clone)] pub struct Encoder { model: VisionTransformer, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let model = VisionTransformer::new(cfg, vb.pp("model.visual"))?; Ok(Self { model }) } } impl Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.model) } } #[derive(Debug, Clone)] struct Mlp { fc1: Linear, act: candle_nn::Activation, fc2: Linear, span: tracing::Span, } impl Mlp { fn new( vb: VarBuilder, in_features: usize, hidden_features: usize, out_features: usize, act: candle_nn::Activation, ) -> Result<Self> { let fc1 = linear_b(in_features, hidden_features, true, vb.pp("fc1"))?; let fc2 = linear_b(hidden_features, out_features, true, vb.pp("fc2"))?; Ok(Self { fc1, act, fc2, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Debug, Clone)] struct VisionProjection { mlp: Mlp, } impl VisionProjection { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mlp = Mlp::new( vb.pp("mlp"), cfg.image_embedding_dim, cfg.hidden_dim, cfg.model_dim, cfg.act, )?; Ok(Self { mlp }) } } impl Module for VisionProjection { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.mlp) } } #[derive(Debug, Clone)] pub struct VisionEncoder { encoder: Encoder, projection: VisionProjection, } impl VisionEncoder { pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let projection = VisionProjection::new(cfg, vb.pp("projection"))?; Ok(Self { encoder, projection, }) } } impl Module for VisionEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, c, hp1, wp2) = xs.dims4()?; let (p1, p2) = (14, 14); let h = hp1 / p1; let w = wp2 / p2; xs.reshape((b, c, h, p1, h, p2))? .permute((0, 2, 4, 1, 3, 5))? .reshape((b, h * w, c * p1 * p2))? .apply(&self.encoder)? .apply(&self.projection) } } #[derive(Debug, Clone)] pub struct Model { pub text_model: PhiModel, pub vision_encoder: VisionEncoder, } impl Model { pub fn new(config: &Config, vb: VarBuilder) -> Result<Self> { let text_model = PhiModel::new_v2(&config.phi_config, vb.pp("text_model"))?; let vision_encoder = VisionEncoder::new(&config.vision_config, vb.pp("vision_encoder"))?; Ok(Self { text_model, vision_encoder, }) } pub fn vision_encoder(&self) -> &VisionEncoder { &self.vision_encoder } pub fn text_model(&mut self) -> &mut PhiModel { &mut self.text_model } }
candle/candle-transformers/src/models/moondream.rs/0
{ "file_path": "candle/candle-transformers/src/models/moondream.rs", "repo_id": "candle", "token_count": 4454 }
47
use crate::models::moondream::{Config, VisionConfig}; use crate::models::quantized_mixformer::MixFormerSequentialForCausalLM as PhiModel; use crate::quantized_nn::{layer_norm, linear_b, Linear}; use crate::quantized_var_builder::VarBuilder; use candle::{IndexOp, Module, Result, Tensor, D}; fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let dim = q.dim(D::Minus1)?; let scale_factor = 1.0 / (dim as f64).sqrt(); let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?; candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v) } #[derive(Debug, Clone)] struct LinearPatchEmbedding { linear: Linear, } impl LinearPatchEmbedding { fn new(vb: VarBuilder) -> Result<Self> { let linear = linear_b(588, 1152, true, vb.pp("linear"))?; Ok(Self { linear }) } } impl Module for LinearPatchEmbedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.linear) } } #[derive(Debug, Clone)] struct Attention { num_heads: usize, head_dim: usize, qkv: Linear, proj: Linear, } impl Attention { pub fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let qkv = linear_b(dim, dim * 3, true, vb.pp("qkv"))?; let proj = linear_b(dim, dim, true, vb.pp("proj"))?; Ok(Self { num_heads, head_dim: dim / num_heads, qkv, proj, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = xs .apply(&self.qkv)? .reshape((b, n, 3, self.num_heads, self.head_dim))? .permute((2, 0, 3, 1, 4))?; let (q, k, v) = ( qkv.i(0)?.contiguous()?, qkv.i(1)?.contiguous()?, qkv.i(2)?.contiguous()?, ); scaled_dot_product_attention(&q, &k, &v)? .transpose(1, 2)? .reshape((b, n, c))? .apply(&self.proj) } } #[derive(Debug, Clone)] struct VitBlock { attn: Attention, mlp: Mlp, norm1: candle_nn::LayerNorm, norm2: candle_nn::LayerNorm, } impl VitBlock { fn new(vb: VarBuilder, dim: usize, num_heads: usize, cfg: &VisionConfig) -> Result<Self> { let attn = Attention::new(vb.pp("attn"), dim, num_heads)?; let mlp = Mlp::new(vb.pp("mlp"), dim, cfg.hidden_features, dim, cfg.act)?; let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?; let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?; Ok(Self { attn, mlp, norm1, norm2, }) } } impl Module for VitBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let ys = xs.apply(&self.norm1)?.apply(&self.attn)?; let xs = (xs + &ys)?; let ys = xs.apply(&self.norm2)?.apply(&self.mlp)?; let xs = (&xs + &ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct VisionTransformer { patch_embed: LinearPatchEmbedding, pos_embed: Tensor, blocks: Vec<VitBlock>, norm: candle_nn::LayerNorm, } impl VisionTransformer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let patch_embed = LinearPatchEmbedding::new(vb.pp("patch_embed"))?; let pos_embed = vb .get((1, cfg.embed_len, cfg.embed_dim), "pos_embed")? .dequantize(vb.device())?; let blocks = (0..cfg.num_blocks) .map(|i| { VitBlock::new( vb.pp(format!("blocks.{}", i)), cfg.embed_dim, cfg.num_heads, cfg, ) }) .collect::<Result<_>>()?; let norm = layer_norm(cfg.embed_dim, 1e-5, vb.pp("norm"))?; Ok(Self { patch_embed, pos_embed, blocks, norm, }) } } impl Module for VisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = (&xs.apply(&self.patch_embed)? + &self.pos_embed)?; for block in self.blocks.iter() { xs = xs.apply(block)?; } xs.apply(&self.norm) } } #[derive(Debug, Clone)] pub struct Encoder { model: VisionTransformer, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let model = VisionTransformer::new(cfg, vb.pp("model.visual"))?; Ok(Self { model }) } } impl Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.model) } } #[derive(Debug, Clone)] struct Mlp { fc1: Linear, act: candle_nn::Activation, fc2: Linear, } impl Mlp { fn new( vb: VarBuilder, in_features: usize, hidden_features: usize, out_features: usize, act: candle_nn::Activation, ) -> Result<Self> { let fc1 = linear_b(in_features, hidden_features, true, vb.pp("fc1"))?; let fc2 = linear_b(hidden_features, out_features, true, vb.pp("fc2"))?; Ok(Self { fc1, act, fc2 }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Debug, Clone)] struct VisionProjection { mlp: Mlp, } impl VisionProjection { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mlp = Mlp::new( vb.pp("mlp"), cfg.image_embedding_dim, cfg.hidden_dim, cfg.model_dim, cfg.act, )?; Ok(Self { mlp }) } } impl Module for VisionProjection { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.mlp) } } #[derive(Debug, Clone)] pub struct VisionEncoder { encoder: Encoder, projection: VisionProjection, } impl VisionEncoder { pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let projection = VisionProjection::new(cfg, vb.pp("projection"))?; Ok(Self { encoder, projection, }) } } impl Module for VisionEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, c, hp1, wp2) = xs.dims4()?; let (p1, p2) = (14, 14); let h = hp1 / p1; let w = wp2 / p2; xs.reshape((b, c, h, p1, h, p2))? .permute((0, 2, 4, 1, 3, 5))? .reshape((b, h * w, c * p1 * p2))? .apply(&self.encoder)? .apply(&self.projection) } } pub struct Model { pub text_model: PhiModel, pub vision_encoder: VisionEncoder, } impl Model { pub fn new(config: &Config, vb: VarBuilder) -> Result<Self> { let text_model = PhiModel::new_v2(&config.phi_config, vb.pp("text_model"))?; let vision_encoder = VisionEncoder::new(&config.vision_config, vb.pp("vision_encoder"))?; Ok(Self { text_model, vision_encoder, }) } pub fn vision_encoder(&self) -> &VisionEncoder { &self.vision_encoder } pub fn text_model(&mut self) -> &mut PhiModel { &mut self.text_model } }
candle/candle-transformers/src/models/quantized_moondream.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_moondream.rs", "repo_id": "candle", "token_count": 3668 }
48
use super::with_tracing::{layer_norm, linear_no_bias as linear, LayerNorm, Linear}; use candle::{IndexOp, Result, Tensor}; use candle_nn::{embedding, Embedding, Module, VarBuilder}; pub use crate::models::rwkv_v5::{Config, State, Tokenizer}; #[derive(Debug, Clone)] struct SelfAttention { key: Linear, receptance: Linear, value: Linear, gate: Linear, output: Linear, ln_x: candle_nn::GroupNorm, time_mix_x: Tensor, time_mix_w: Tensor, time_mix_key: Tensor, time_mix_value: Tensor, time_mix_receptance: Tensor, time_decay: Tensor, time_faaaa: Tensor, time_mix_gate: Tensor, time_decay_w1: Tensor, time_decay_w2: Tensor, time_mix_w1: Tensor, time_mix_w2: Tensor, layer_id: usize, n_attn_heads: usize, } impl SelfAttention { fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size; let attn_hidden_size = cfg.attention_hidden_size; let key = linear(hidden_size, attn_hidden_size, vb.pp("key"))?; let receptance = linear(hidden_size, attn_hidden_size, vb.pp("receptance"))?; let value = linear(hidden_size, attn_hidden_size, vb.pp("value"))?; let gate = linear(hidden_size, attn_hidden_size, vb.pp("gate"))?; let output = linear(attn_hidden_size, hidden_size, vb.pp("output"))?; let ln_x = candle_nn::group_norm( hidden_size / cfg.head_size, hidden_size, 1e-5, vb.pp("ln_x"), )?; let time_mix_x = vb.get((1, 1, cfg.hidden_size), "time_mix_x")?; let time_mix_w = vb.get((1, 1, cfg.hidden_size), "time_mix_w")?; let time_mix_key = vb.get((1, 1, cfg.hidden_size), "time_mix_key")?; let time_mix_value = vb.get((1, 1, cfg.hidden_size), "time_mix_value")?; let time_mix_receptance = vb.get((1, 1, cfg.hidden_size), "time_mix_receptance")?; let n_attn_heads = cfg.hidden_size / cfg.head_size; let time_decay = vb.get((1, 1, cfg.hidden_size), "time_decay")?; let time_faaaa = vb.get((n_attn_heads, cfg.head_size), "time_faaaa")?; let time_mix_gate = vb.get((1, 1, cfg.hidden_size), "time_mix_gate")?; let time_decay_w1 = vb.get((cfg.hidden_size, n_attn_heads * 2), "time_decay_w1")?; let time_decay_w2 = vb.get((n_attn_heads * 2, cfg.hidden_size), "time_decay_w2")?; let time_mix_w1 = vb.get((cfg.hidden_size, n_attn_heads * 5), "time_mix_w1")?; let time_mix_w2 = vb.get((5, n_attn_heads, cfg.hidden_size), "time_mix_w2")?; Ok(Self { key, value, receptance, gate, output, ln_x, time_mix_x, time_mix_w, time_mix_key, time_mix_value, time_mix_receptance, time_decay, time_faaaa, time_mix_gate, time_decay_w1, time_decay_w2, time_mix_w1, time_mix_w2, layer_id, n_attn_heads, }) } pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> { let h = self.n_attn_heads; let (b, t, s) = xs.dims3()?; let s = s / h; let (receptance, key, value, gate, w) = { // extract key-value let shifted = state.per_layer[self.layer_id].extract_key_value.clone(); let shifted = if shifted.rank() == 2 { shifted.unsqueeze(1)? } else { shifted }; let sx = (&shifted - xs)?; let xxx = (xs + &sx * &self.time_mix_x)?; let xxx = xxx .broadcast_matmul(&self.time_mix_w1)? .tanh()? .reshape((b * t, 5, ()))? .transpose(0, 1)?; let xxx = xxx.matmul(&self.time_mix_w2)?.reshape((5, b, t, ()))?; let (mw, mk, mv, mr, mg) = (xxx.i(0)?, xxx.i(1)?, xxx.i(2)?, xxx.i(3)?, xxx.i(4)?); let xw = (xs + &sx * (&self.time_mix_w + &mw)?)?; let xk = (xs + &sx * (&self.time_mix_key + &mk)?)?; let xv = (xs + &sx * (&self.time_mix_value + &mv)?)?; let xr = (xs + &sx * (&self.time_mix_receptance + &mr)?)?; let xg = (xs + &sx * (&self.time_mix_gate + &mg)?)?; let w = (&self.time_decay + xw.broadcast_matmul(&self.time_decay_w1)? .tanh()? .broadcast_matmul(&self.time_decay_w2)?)? .reshape(((), 1, 1))? .reshape((self.n_attn_heads, (), 1))?; let key = self.key.forward(&xk)?; let value = self.value.forward(&xv)?; let receptance = self.receptance.forward(&xr)?; let gate = candle_nn::ops::silu(&self.gate.forward(&xg)?)?; state.per_layer[self.layer_id].extract_key_value = xs.i((.., t - 1))?; (receptance, key, value, gate, w) }; // linear attention let mut state_ = state.per_layer[self.layer_id].linear_attention.clone(); let key = key.reshape((b, t, h, s))?.permute((0, 2, 3, 1))?; let value = value.reshape((b, t, h, s))?.transpose(1, 2)?; let receptance = receptance.reshape((b, t, h, s))?.transpose(1, 2)?; let w = w.exp()?.neg()?.exp()?; let time_faaaa = self.time_faaaa .reshape(((), 1, 1))? .reshape((self.n_attn_heads, (), 1))?; let mut out: Vec<Tensor> = Vec::with_capacity(t); for t_ in 0..t { let rt = receptance.i((.., .., t_..t_ + 1))?.contiguous()?; let kt = key.i((.., .., .., t_..t_ + 1))?.contiguous()?; let vt = value.i((.., .., t_..t_ + 1))?.contiguous()?; let at = kt.matmul(&vt)?; let rhs = (time_faaaa.broadcast_mul(&at)? + &state_)?; let out_ = rt.matmul(&rhs)?.squeeze(2)?; state_ = (&at + w.broadcast_mul(&state_))?; out.push(out_) } let out = Tensor::cat(&out, 1)?.reshape((b * t, h * s, 1))?; let out = out.apply(&self.ln_x)?.reshape((b, t, h * s))?; let out = (out * gate)?.apply(&self.output)?; state.per_layer[self.layer_id].linear_attention = state_; Ok(out) } } #[derive(Debug, Clone)] struct FeedForward { time_mix_key: Tensor, time_mix_receptance: Tensor, key: Linear, receptance: Linear, value: Linear, layer_id: usize, } impl FeedForward { fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let int_size = cfg .intermediate_size .unwrap_or(((cfg.hidden_size as f64 * 3.5) as usize) / 32 * 32); let key = linear(cfg.hidden_size, int_size, vb.pp("key"))?; let receptance = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("receptance"))?; let value = linear(int_size, cfg.hidden_size, vb.pp("value"))?; let time_mix_key = vb.get((1, 1, cfg.hidden_size), "time_mix_key")?; let time_mix_receptance = vb.get((1, 1, cfg.hidden_size), "time_mix_receptance")?; Ok(Self { key, receptance, value, time_mix_key, time_mix_receptance, layer_id, }) } fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> { let shifted = state.per_layer[self.layer_id] .feed_forward .broadcast_sub(xs)?; let key = (xs + shifted.broadcast_mul(&self.time_mix_key)?)?; let receptance = (xs + shifted.broadcast_mul(&self.time_mix_receptance)?)?; let key = key.apply(&self.key)?.relu()?.sqr()?; let value = key.apply(&self.value)?; let receptance = candle_nn::ops::sigmoid(&receptance.apply(&self.receptance)?)?; state.per_layer[self.layer_id].feed_forward = xs.i((.., xs.dim(1)? - 1))?; let xs = (receptance * value)?; Ok(xs) } } #[derive(Debug, Clone)] struct Block { pre_ln: Option<LayerNorm>, ln1: LayerNorm, ln2: LayerNorm, attention: SelfAttention, feed_forward: FeedForward, } impl Block { fn new(layer_id: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln1 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln1"))?; let ln2 = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("ln2"))?; let pre_ln = if layer_id == 0 { let ln = layer_norm(cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("pre_ln"))?; Some(ln) } else { None }; let attention = SelfAttention::new(layer_id, cfg, vb.pp("attention"))?; let feed_forward = FeedForward::new(layer_id, cfg, vb.pp("feed_forward"))?; Ok(Self { pre_ln, ln1, ln2, attention, feed_forward, }) } fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> { let xs = match self.pre_ln.as_ref() { None => xs.clone(), Some(pre_ln) => xs.apply(pre_ln)?, }; let attention = self.attention.forward(&xs.apply(&self.ln1)?, state)?; let xs = (xs + attention)?; let feed_forward = self.feed_forward.forward(&xs.apply(&self.ln2)?, state)?; let xs = (xs + feed_forward)?; Ok(xs) } } #[derive(Debug, Clone)] pub struct Model { embeddings: Embedding, blocks: Vec<Block>, ln_out: LayerNorm, head: Linear, rescale_every: usize, layers_are_rescaled: bool, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("rwkv"); let embeddings = embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embeddings"))?; let mut blocks = Vec::with_capacity(cfg.num_hidden_layers); let vb_b = vb_m.pp("blocks"); for block_index in 0..cfg.num_hidden_layers { let block = Block::new(block_index, cfg, vb_b.pp(block_index))?; blocks.push(block) } let ln_out = layer_norm(cfg.hidden_size, 1e-5, vb_m.pp("ln_out"))?; let head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("head"))?; Ok(Self { embeddings, blocks, ln_out, head, rescale_every: cfg.rescale_every, layers_are_rescaled: false, // This seem to only happen for the f16/bf16 dtypes. }) } pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> { let (_b_size, _seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.embeddings)?; for (block_idx, block) in self.blocks.iter().enumerate() { xs = block.forward(&xs, state)?; if self.layers_are_rescaled && (block_idx + 1) % self.rescale_every == 0 { xs = (xs / 2.)? } } let xs = xs.apply(&self.ln_out)?.apply(&self.head)?; state.pos += 1; Ok(xs) } }
candle/candle-transformers/src/models/rwkv_v6.rs/0
{ "file_path": "candle/candle-transformers/src/models/rwkv_v6.rs", "repo_id": "candle", "token_count": 5859 }
49
//! ResNet Building Blocks //! //! Some Residual Network blocks used in UNet models. //! //! Denoising Diffusion Implicit Models, K. He and al, 2015. //! https://arxiv.org/abs/1512.03385 use crate::models::with_tracing::{conv2d, Conv2d}; use candle::{Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; /// Configuration for a ResNet block. #[derive(Debug, Clone, Copy)] pub struct ResnetBlock2DConfig { /// The number of output channels, defaults to the number of input channels. pub out_channels: Option<usize>, pub temb_channels: Option<usize>, /// The number of groups to use in group normalization. pub groups: usize, pub groups_out: Option<usize>, /// The epsilon to be used in the group normalization operations. pub eps: f64, /// Whether to use a 2D convolution in the skip connection. When using None, /// such a convolution is used if the number of input channels is different from /// the number of output channels. pub use_in_shortcut: Option<bool>, // non_linearity: silu /// The final output is scaled by dividing by this value. pub output_scale_factor: f64, } impl Default for ResnetBlock2DConfig { fn default() -> Self { Self { out_channels: None, temb_channels: Some(512), groups: 32, groups_out: None, eps: 1e-6, use_in_shortcut: None, output_scale_factor: 1., } } } #[derive(Debug)] pub struct ResnetBlock2D { norm1: nn::GroupNorm, conv1: Conv2d, norm2: nn::GroupNorm, conv2: Conv2d, time_emb_proj: Option<nn::Linear>, conv_shortcut: Option<Conv2d>, span: tracing::Span, config: ResnetBlock2DConfig, } impl ResnetBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, config: ResnetBlock2DConfig, ) -> Result<Self> { let out_channels = config.out_channels.unwrap_or(in_channels); let conv_cfg = nn::Conv2dConfig { stride: 1, padding: 1, groups: 1, dilation: 1, }; let norm1 = nn::group_norm(config.groups, in_channels, config.eps, vs.pp("norm1"))?; let conv1 = conv2d(in_channels, out_channels, 3, conv_cfg, vs.pp("conv1"))?; let groups_out = config.groups_out.unwrap_or(config.groups); let norm2 = nn::group_norm(groups_out, out_channels, config.eps, vs.pp("norm2"))?; let conv2 = conv2d(out_channels, out_channels, 3, conv_cfg, vs.pp("conv2"))?; let use_in_shortcut = config .use_in_shortcut .unwrap_or(in_channels != out_channels); let conv_shortcut = if use_in_shortcut { let conv_cfg = nn::Conv2dConfig { stride: 1, padding: 0, groups: 1, dilation: 1, }; Some(conv2d( in_channels, out_channels, 1, conv_cfg, vs.pp("conv_shortcut"), )?) } else { None }; let time_emb_proj = match config.temb_channels { None => None, Some(temb_channels) => Some(nn::linear( temb_channels, out_channels, vs.pp("time_emb_proj"), )?), }; let span = tracing::span!(tracing::Level::TRACE, "resnet2d"); Ok(Self { norm1, conv1, norm2, conv2, time_emb_proj, span, config, conv_shortcut, }) } pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let shortcut_xs = match &self.conv_shortcut { Some(conv_shortcut) => conv_shortcut.forward(xs)?, None => xs.clone(), }; let xs = self.norm1.forward(xs)?; let xs = self.conv1.forward(&nn::ops::silu(&xs)?)?; let xs = match (temb, &self.time_emb_proj) { (Some(temb), Some(time_emb_proj)) => time_emb_proj .forward(&nn::ops::silu(temb)?)? .unsqueeze(D::Minus1)? .unsqueeze(D::Minus1)? .broadcast_add(&xs)?, _ => xs, }; let xs = self .conv2 .forward(&nn::ops::silu(&self.norm2.forward(&xs)?)?)?; (shortcut_xs + xs)? / self.config.output_scale_factor } }
candle/candle-transformers/src/models/stable_diffusion/resnet.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/resnet.rs", "repo_id": "candle", "token_count": 2284 }
50
use candle::{Module, Result, Tensor}; use candle_nn::VarBuilder; #[derive(Debug, Clone)] pub struct Embedding { inner: candle_nn::Embedding, span: tracing::Span, } impl Embedding { pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> { let inner = candle_nn::embedding(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn from_weights(weights: Tensor) -> Result<Self> { let (_in_size, out_size) = weights.dims2()?; let inner = candle_nn::Embedding::new(weights, out_size); let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn embeddings(&self) -> &Tensor { self.inner.embeddings() } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] pub struct Linear { inner: candle_nn::Linear, span: tracing::Span, } impl Linear { pub fn from_weights(weights: Tensor, bias: Option<Tensor>) -> Self { let inner = candle_nn::Linear::new(weights, bias); let span = tracing::span!(tracing::Level::TRACE, "linear"); Self { inner, span } } } pub fn linear_b(d1: usize, d2: usize, b: bool, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear_b(d1, d2, b, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } pub fn linear(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear_no_bias(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } impl Module for Linear { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } // Wrap the conv2d op to provide some tracing. #[derive(Debug, Clone)] pub struct Conv2d { inner: candle_nn::Conv2d, span: tracing::Span, } impl Module for Conv2d { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(x) } } pub fn conv2d( in_channels: usize, out_channels: usize, kernel_size: usize, cfg: candle_nn::Conv2dConfig, vs: candle_nn::VarBuilder, ) -> Result<Conv2d> { let span = tracing::span!(tracing::Level::TRACE, "conv2d"); let inner = candle_nn::conv2d(in_channels, out_channels, kernel_size, cfg, vs)?; Ok(Conv2d { inner, span }) } // QMatMul wrapper adding some tracing. #[derive(Clone)] pub struct QMatMul { inner: candle::quantized::QMatMul, span: tracing::Span, } impl QMatMul { pub fn new( out_dim: usize, in_dim: usize, vb: crate::quantized_var_builder::VarBuilder, ) -> Result<Self> { let ws = vb.get((in_dim, out_dim), "weight")?; let inner = candle::quantized::QMatMul::from_arc(ws)?; let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); Ok(Self { inner, span }) } pub fn from_weights(ws: std::sync::Arc<candle::quantized::QTensor>) -> Result<Self> { let inner = candle::quantized::QMatMul::from_arc(ws)?; let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); Ok(Self { inner, span }) } } impl Module for QMatMul { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } impl std::fmt::Debug for QMatMul { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "QMatMul") } } #[derive(Clone, Debug)] pub struct LayerNorm { inner: candle_nn::LayerNorm, span: tracing::Span, } impl LayerNorm { pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self { let inner = candle_nn::LayerNorm::new(weight, bias, eps); let span = tracing::span!(tracing::Level::TRACE, "layer-norm"); Self { inner, span } } } impl Module for LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } pub fn layer_norm<C: Into<candle_nn::LayerNormConfig>>( size: usize, c: C, vb: VarBuilder, ) -> Result<LayerNorm> { let inner = candle_nn::layer_norm(size, c, vb)?; let span = tracing::span!(tracing::Level::TRACE, "layer-norm"); Ok(LayerNorm { inner, span }) } #[derive(Debug, Clone)] pub struct RmsNorm { inner: candle_nn::RmsNorm, span: tracing::Span, } impl RmsNorm { pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "rms-norm"); let inner = candle_nn::rms_norm(size, eps, vb)?; Ok(Self { inner, span }) } pub fn forward_diff(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward_diff(x) } } impl Module for RmsNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(x) } }
candle/candle-transformers/src/models/with_tracing.rs/0
{ "file_path": "candle/candle-transformers/src/models/with_tracing.rs", "repo_id": "candle", "token_count": 2381 }
51
use candle::Result; use candle_transformers::object_detection::{ non_maximum_suppression, soft_non_maximum_suppression, Bbox, }; #[test] fn nms_basic() -> Result<()> { // Boxes based upon https://thepythoncode.com/article/non-maximum-suppression-using-opencv-in-python let mut bboxes = vec![vec![ Bbox { xmin: 245.0, ymin: 305.0, xmax: 575.0, ymax: 490.0, confidence: 0.9, data: (), }, // Box 1 Bbox { xmin: 235.0, ymin: 300.0, xmax: 485.0, ymax: 515.0, confidence: 0.8, data: (), }, // Box 2 Bbox { xmin: 305.0, ymin: 270.0, xmax: 540.0, ymax: 500.0, confidence: 0.6, data: (), }, // Box 3 ]]; non_maximum_suppression(&mut bboxes, 0.5); let bboxes = bboxes.into_iter().next().unwrap(); assert_eq!(bboxes.len(), 1); assert_eq!(bboxes[0].confidence, 0.9); Ok(()) } #[test] fn softnms_basic_functionality() -> Result<()> { let mut bboxes = vec![vec![ Bbox { xmin: 0.0, ymin: 0.0, xmax: 1.0, ymax: 1.0, confidence: 0.5, data: (), }, Bbox { xmin: 0.1, ymin: 0.1, xmax: 1.1, ymax: 1.1, confidence: 0.9, data: (), }, Bbox { xmin: 0.2, ymin: 0.2, xmax: 1.2, ymax: 1.2, confidence: 0.6, data: (), }, ]]; soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5)); // Should decay boxes following highest confidence box assert!(bboxes[0][0].confidence == 0.9); assert!(bboxes[0][1].confidence < 0.5); assert!(bboxes[0][2].confidence < 0.6); Ok(()) } #[test] fn softnms_confidence_decay() -> Result<()> { let mut bboxes = vec![vec![ Bbox { xmin: 0.0, ymin: 0.0, xmax: 1.0, ymax: 1.0, confidence: 0.9, data: (), }, // Reference box Bbox { xmin: 0.1, ymin: 0.1, xmax: 1.1, ymax: 1.1, confidence: 0.8, data: (), }, // Overlapping box ]]; soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5)); // Check that confidence of the overlapping box is decayed assert!(bboxes[0][0].confidence == 0.9); assert!(bboxes[0][1].confidence < 0.8); Ok(()) } #[test] fn softnms_confidence_threshold() -> Result<()> { let mut bboxes = vec![vec![ Bbox { xmin: 0.0, ymin: 0.0, xmax: 1.0, ymax: 1.0, confidence: 0.9, data: (), }, Bbox { xmin: 0.1, ymin: 0.1, xmax: 1.1, ymax: 1.1, confidence: 0.05, data: (), }, ]]; soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5)); // Box with confidence below the threshold should be removed assert_eq!(bboxes[0].len(), 2); assert_eq!(bboxes[0][0].confidence, 0.9); assert_eq!(bboxes[0][1].confidence, 0.00); Ok(()) } #[test] fn softnms_no_overlap() -> Result<()> { let mut bboxes = vec![vec![ Bbox { xmin: 0.0, ymin: 0.0, xmax: 1.0, ymax: 1.0, confidence: 0.9, data: (), }, Bbox { xmin: 2.0, ymin: 2.0, xmax: 3.0, ymax: 3.0, confidence: 0.8, data: (), }, ]]; soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5)); // Both boxes should remain as they do not significantly overlap assert_eq!(bboxes[0].len(), 2); assert_eq!(bboxes[0][0].confidence, 0.9); assert_eq!(bboxes[0][1].confidence, 0.8); Ok(()) } #[test] fn softnms_no_bbox() -> Result<()> { let mut bboxes: Vec<Vec<Bbox<()>>> = vec![]; soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5)); assert!(bboxes.is_empty()); Ok(()) } #[test] fn softnms_single_bbox() -> Result<()> { let mut bboxes = vec![vec![Bbox { xmin: 0.0, ymin: 0.0, xmax: 1.0, ymax: 1.0, confidence: 0.9, data: (), }]]; soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5)); assert_eq!(bboxes[0].len(), 1); Ok(()) } #[test] fn softnms_equal_confidence_overlap() -> Result<()> { let mut bboxes = vec![vec![ Bbox { xmin: 0.0, ymin: 0.0, xmax: 1.0, ymax: 1.0, confidence: 0.5, data: (), }, Bbox { xmin: 0.1, ymin: 0.1, xmax: 1.1, ymax: 1.1, confidence: 0.5, data: (), }, ]]; soft_non_maximum_suppression(&mut bboxes, Some(0.5), Some(0.1), Some(0.5)); // First box will be reference box, second box should be decayed // Implementation must change to have both be decayed assert_eq!(bboxes[0].len(), 2); assert!(bboxes[0][0].confidence == 0.5); assert!(bboxes[0][1].confidence < 0.5); Ok(()) }
candle/candle-transformers/tests/nms_tests.rs/0
{ "file_path": "candle/candle-transformers/tests/nms_tests.rs", "repo_id": "candle", "token_count": 3139 }
52
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Segment Anything Model (SAM) Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for image examples const MODEL_BASEURL = "https://huggingface.co/lmz/candle-sam/resolve/main/"; // models base url const MODELS = { sam_mobile_tiny: { url: "mobile_sam-tiny-vitt.safetensors", }, sam_base: { url: "sam_vit_b_01ec64.safetensors", }, }; const samWorker = new Worker("./samWorker.js", { type: "module" }); async function segmentPoints( modelURL, // URL to the weights file modelID, // model ID imageURL, // URL to the image file points // {x, y} points to prompt image ) { return new Promise((resolve, reject) => { function messageHandler(event) { console.log(event.data); if ("status" in event.data) { updateStatus(event.data); } if ("error" in event.data) { samWorker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete-embedding") { samWorker.removeEventListener("message", messageHandler); resolve(); } if (event.data.status === "complete") { samWorker.removeEventListener("message", messageHandler); resolve(event.data.output); } } samWorker.addEventListener("message", messageHandler); samWorker.postMessage({ modelURL, modelID, imageURL, points, }); }); } function updateStatus(statusMessage) { statusOutput.innerText = event.data.message; } let copyMaskURL = null; let copyImageURL = null; const clearBtn = document.querySelector("#clear-btn"); const maskBtn = document.querySelector("#mask-btn"); const undoBtn = document.querySelector("#undo-btn"); const downloadBtn = document.querySelector("#download-btn"); const canvas = document.querySelector("#canvas"); const mask = document.querySelector("#mask"); const ctxCanvas = canvas.getContext("2d"); const ctxMask = mask.getContext("2d"); const fileUpload = document.querySelector("#file-upload"); const dropArea = document.querySelector("#drop-area"); const dropButtons = document.querySelector("#drop-buttons"); const imagesExamples = document.querySelector("#image-select"); const modelSelection = document.querySelector("#model"); const statusOutput = document.querySelector("#output-status"); //add event listener to file input fileUpload.addEventListener("input", (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); clearImageCanvas(); copyImageURL = href; drawImageCanvas(href); setImageEmbeddings(href); togglePointMode(false); } }); // add event listener to drop-area dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); clearImageCanvas(); copyImageURL = href; drawImageCanvas(href); setImageEmbeddings(href); togglePointMode(false); } else if (url) { clearImageCanvas(); copyImageURL = url; drawImageCanvas(url); setImageEmbeddings(url); togglePointMode(false); } }); let hasImage = false; let isSegmenting = false; let isEmbedding = false; let currentImageURL = ""; let pointArr = []; let bgPointMode = false; //add event listener to image examples imagesExamples.addEventListener("click", (e) => { if (isEmbedding || isSegmenting) { return; } const target = e.target; if (target.nodeName === "IMG") { const href = target.src; clearImageCanvas(); copyImageURL = href; drawImageCanvas(href); setImageEmbeddings(href); } }); //add event listener to mask button maskBtn.addEventListener("click", () => { togglePointMode(); }); //add event listener to clear button clearBtn.addEventListener("click", () => { clearImageCanvas(); togglePointMode(false); pointArr = []; }); //add event listener to undo button undoBtn.addEventListener("click", () => { undoPoint(); }); // add event to download btn downloadBtn.addEventListener("click", async () => { // Function to load image blobs as Image elements asynchronously const loadImageAsync = (imageURL) => { return new Promise((resolve) => { const img = new Image(); img.onload = () => { resolve(img); }; img.crossOrigin = "anonymous"; img.src = imageURL; }); }; const originalImage = await loadImageAsync(copyImageURL); const maskImage = await loadImageAsync(copyMaskURL); // create main a board to draw const canvas = document.createElement("canvas"); const ctx = canvas.getContext("2d"); canvas.width = originalImage.width; canvas.height = originalImage.height; // Perform the mask operation ctx.drawImage(maskImage, 0, 0); ctx.globalCompositeOperation = "source-in"; ctx.drawImage(originalImage, 0, 0); // to blob const blobPromise = new Promise((resolve) => { canvas.toBlob(resolve); }); const blob = await blobPromise; const resultURL = URL.createObjectURL(blob); // download const link = document.createElement("a"); link.href = resultURL; link.download = "cutout.png"; link.click(); }); //add click event to canvas canvas.addEventListener("click", async (event) => { if (!hasImage || isEmbedding || isSegmenting) { return; } const backgroundMode = event.shiftKey ? bgPointMode^event.shiftKey : bgPointMode; const targetBox = event.target.getBoundingClientRect(); const x = (event.clientX - targetBox.left) / targetBox.width; const y = (event.clientY - targetBox.top) / targetBox.height; const ptsToRemove = []; for (const [idx, pts] of pointArr.entries()) { const d = Math.sqrt((pts[0] - x) ** 2 + (pts[1] - y) ** 2); if (d < 6 / targetBox.width) { ptsToRemove.push(idx); } } if (ptsToRemove.length > 0) { pointArr = pointArr.filter((_, idx) => !ptsToRemove.includes(idx)); } else { pointArr = [...pointArr, [x, y, !backgroundMode]]; } undoBtn.disabled = false; downloadBtn.disabled = false; if (pointArr.length == 0) { ctxMask.clearRect(0, 0, canvas.width, canvas.height); undoBtn.disabled = true; downloadBtn.disabled = true; return; } isSegmenting = true; const { maskURL } = await getSegmentationMask(pointArr); isSegmenting = false; copyMaskURL = maskURL; drawMask(maskURL, pointArr); }); async function undoPoint() { if (!hasImage || isEmbedding || isSegmenting) { return; } if (pointArr.length === 0) { return; } pointArr.pop(); if (pointArr.length === 0) { ctxMask.clearRect(0, 0, canvas.width, canvas.height); undoBtn.disabled = true; return; } isSegmenting = true; const { maskURL } = await getSegmentationMask(pointArr); isSegmenting = false; copyMaskURL = maskURL; drawMask(maskURL, pointArr); } function togglePointMode(mode) { bgPointMode = mode === undefined ? !bgPointMode : mode; maskBtn.querySelector("span").innerText = bgPointMode ? "Background Point" : "Mask Point"; if (bgPointMode) { maskBtn.querySelector("#mask-circle").setAttribute("hidden", ""); maskBtn.querySelector("#unmask-circle").removeAttribute("hidden"); } else { maskBtn.querySelector("#mask-circle").removeAttribute("hidden"); maskBtn.querySelector("#unmask-circle").setAttribute("hidden", ""); } } async function getSegmentationMask(points) { const modelID = modelSelection.value; const modelURL = MODEL_BASEURL + MODELS[modelID].url; const imageURL = currentImageURL; const { maskURL } = await segmentPoints( modelURL, modelID, imageURL, points ); return { maskURL }; } async function setImageEmbeddings(imageURL) { if (isEmbedding) { return; } canvas.classList.remove("cursor-pointer"); canvas.classList.add("cursor-wait"); clearBtn.disabled = true; const modelID = modelSelection.value; const modelURL = MODEL_BASEURL + MODELS[modelID].url; isEmbedding = true; await segmentPoints(modelURL, modelID, imageURL); canvas.classList.remove("cursor-wait"); canvas.classList.add("cursor-pointer"); clearBtn.disabled = false; isEmbedding = false; currentImageURL = imageURL; } function clearImageCanvas() { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxMask.clearRect(0, 0, canvas.width, canvas.height); hasImage = false; isEmbedding = false; isSegmenting = false; currentImageURL = ""; pointArr = []; clearBtn.disabled = true; canvas.parentElement.style.height = "auto"; dropButtons.classList.remove("invisible"); } function drawMask(maskURL, points) { if (!maskURL) { throw new Error("No mask URL provided"); } const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { mask.width = canvas.width; mask.height = canvas.height; ctxMask.save(); ctxMask.drawImage(canvas, 0, 0); ctxMask.globalCompositeOperation = "source-atop"; ctxMask.fillStyle = "rgba(255, 0, 0, 0.6)"; ctxMask.fillRect(0, 0, canvas.width, canvas.height); ctxMask.globalCompositeOperation = "destination-in"; ctxMask.drawImage(img, 0, 0); ctxMask.globalCompositeOperation = "source-over"; for (const pt of points) { if (pt[2]) { ctxMask.fillStyle = "rgba(0, 255, 255, 1)"; } else { ctxMask.fillStyle = "rgba(255, 255, 0, 1)"; } ctxMask.beginPath(); ctxMask.arc( pt[0] * canvas.width, pt[1] * canvas.height, 3, 0, 2 * Math.PI ); ctxMask.fill(); } ctxMask.restore(); }; img.src = maskURL; } function drawImageCanvas(imgURL) { if (!imgURL) { throw new Error("No image URL provided"); } ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctxCanvas.drawImage(img, 0, 0); canvas.parentElement.style.height = canvas.offsetHeight + "px"; hasImage = true; clearBtn.disabled = false; dropButtons.classList.add("invisible"); }; img.src = imgURL; } const observer = new ResizeObserver((entries) => { for (let entry of entries) { if (entry.target === canvas) { canvas.parentElement.style.height = canvas.offsetHeight + "px"; } } }); observer.observe(canvas); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]">🕯️</span> <div> <h1 class="text-5xl font-bold">Candle Segment Anything</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> Zero-shot image segmentation with <a href="https://segment-anything.com" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >Segment Anything Model (SAM)</a > and <a href="https://github.com/ChaoningZhang/MobileSAM" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >MobileSAM </a >. It runs in the browser with a WASM runtime built with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> <option value="sam_mobile_tiny" selected> Mobile SAM Tiny (40.6 MB) </option> <option value="sam_base">SAM Base (375 MB)</option> </select> </div> <div> <p class="text-xs italic max-w-lg"> <b>Note:</b> The model's first run may take a few seconds as it loads and caches the model in the browser, and then creates the image embeddings. Any subsequent clicks on points will be significantly faster. </p> </div> <div class="relative max-w-2xl"> <div class="flex justify-between items-center"> <div class="px-2 rounded-md inline text-xs"> <span id="output-status" class="m-auto font-light"></span> </div> <div class="flex gap-2"> <button id="mask-btn" title="Toggle Mask Point and Background Point" class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"> <span>Mask Point</span> <svg xmlns="http://www.w3.org/2000/svg" height="1em" viewBox="0 0 512 512"> <path id="mask-circle" d="M256 512a256 256 0 1 0 0-512 256 256 0 1 0 0 512z" /> <path id="unmask-circle" hidden d="M464 256a208 208 0 1 0-416 0 208 208 0 1 0 416 0zM0 256a256 256 0 1 1 512 0 256 256 0 1 1-512 0z" /> </svg> </button> <button id="undo-btn" disabled title="Undo Last Point" class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"> <svg xmlns="http://www.w3.org/2000/svg" height="1em" viewBox="0 0 512 512"> <path d="M48.5 224H40a24 24 0 0 1-24-24V72a24 24 0 0 1 41-17l41.6 41.6a224 224 0 1 1-1 317.8 32 32 0 0 1 45.3-45.3 160 160 0 1 0 1-227.3L185 183a24 24 0 0 1-17 41H48.5z" /> </svg> </button> <button id="clear-btn" disabled title="Clear Image" class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"> <svg class="" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 13 12" height="1em"> <path d="M1.6.7 12 11.1M12 .7 1.6 11.1" stroke="#2E3036" stroke-width="2" /> </svg> </button> </div> </div> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative p-20 w-full overflow-hidden"> <div id="drop-buttons" class="flex flex-col items-center justify-center space-y-1 text-center relative z-10"> <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg"> <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"> <span>Drag and drop your image here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" class="sr-only" /> </div> <canvas id="canvas" class="absolute w-full"></canvas> <canvas id="mask" class="pointer-events-none absolute w-full"></canvas> </div> <div class="text-right py-2"> <button id="share-btn" class="bg-white rounded-md hover:outline outline-orange-200 disabled:opacity-50 invisible"> <img src="https://huggingface.co/datasets/huggingface/badges/raw/main/share-to-community-sm.svg" /> </button> <button id="download-btn" title="Copy result (.png)" disabled class="p-1 px-2 text-xs font-medium bg-white rounded-2xl outline outline-gray-200 hover:outline-orange-200 disabled:opacity-50" > Download Cut-Out </button> </div> </div> <div> <div class="flex gap-3 items-center overflow-x-scroll" id="image-select"> <h3 class="font-medium">Examples:</h3> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg" class="cursor-pointer w-24 h-24 object-cover" /> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/segment-anything/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/segment-anything/lib-example.html", "repo_id": "candle", "token_count": 10333 }
53
[package] name = "tensor-tools" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] anyhow = { workspace = true } candle = { workspace = true } clap = { workspace = true } rayon = { workspace = true } safetensors = { workspace = true }
candle/tensor-tools/Cargo.toml/0
{ "file_path": "candle/tensor-tools/Cargo.toml", "repo_id": "candle", "token_count": 119 }
54
set -e npx lint-staged --config ./.husky/lint-stage-config.js
chat-ui/.husky/pre-commit/0
{ "file_path": "chat-ui/.husky/pre-commit", "repo_id": "chat-ui", "token_count": 27 }
55
{{- if .Values.infisical.enabled }} apiVersion: secrets.infisical.com/v1alpha1 kind: InfisicalSecret metadata: name: {{ include "name" $ }}-infisical-secret namespace: {{ $.Release.Namespace }} spec: authentication: universalAuth: credentialsRef: secretName: {{ .Values.infisical.operatorSecretName | quote }} secretNamespace: {{ .Values.infisical.operatorSecretNamespace | quote }} secretsScope: envSlug: {{ .Values.infisical.env | quote }} projectSlug: {{ .Values.infisical.project | quote }} secretsPath: / hostAPI: {{ .Values.infisical.url | quote }} managedSecretReference: creationPolicy: Owner secretName: {{ include "name" $ }}-secs secretNamespace: {{ .Release.Namespace | quote }} secretType: Opaque resyncInterval: {{ .Values.infisical.resyncInterval }} {{- end }}
chat-ui/chart/templates/infisical.yaml/0
{ "file_path": "chat-ui/chart/templates/infisical.yaml", "repo_id": "chat-ui", "token_count": 311 }
56
# Google | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | Chat UI can connect to the google Vertex API endpoints ([List of supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models)). To enable: 1. [Select](https://console.cloud.google.com/project) or [create](https://cloud.google.com/resource-manager/docs/creating-managing-projects#creating_a_project) a Google Cloud project. 1. [Enable billing for your project](https://cloud.google.com/billing/docs/how-to/modify-project). 1. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). 1. [Set up authentication with a service account](https://cloud.google.com/docs/authentication/getting-started) so you can access the API from your local workstation. The service account credentials file can be imported as an environmental variable: ```ini GOOGLE_APPLICATION_CREDENTIALS = clientid.json ``` Make sure your docker container has access to the file and the variable is correctly set. Afterwards Google Vertex endpoints can be configured as following: ```ini MODELS=`[ { "name": "gemini-1.5-pro", "displayName": "Vertex Gemini Pro 1.5", "endpoints" : [{ "type": "vertex", "project": "abc-xyz", "location": "europe-west3", "model": "gemini-1.5-pro-preview-0409", // model-name // Optional "safetyThreshold": "BLOCK_MEDIUM_AND_ABOVE", "apiEndpoint": "", // alternative api endpoint url, "tools": [{ "googleSearchRetrieval": { "disableAttribution": true } }] }] } ]` ``` ## GenAI Or use the Gemini API API provider [from](https://github.com/google-gemini/generative-ai-js#readme): > Make sure that you have an API key from Google Cloud Platform. To get an API key, follow the instructions [here](https://cloud.google.com/docs/authentication/api-keys). ```ini MODELS=`[ { "name": "gemini-1.5-flash", "displayName": "Gemini Flash 1.5", "multimodal": true, "endpoints": [ { "type": "genai", "apiKey": "abc...xyz" } ] // Optional "safetyThreshold": "BLOCK_MEDIUM_AND_ABOVE", }, { "name": "gemini-1.5-pro", "displayName": "Gemini Pro 1.5", "multimodal": false, "endpoints": [ { "type": "genai", "apiKey": "abc...xyz" } ] } ]` ```
chat-ui/docs/source/configuration/models/providers/google.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/google.md", "repo_id": "chat-ui", "token_count": 998 }
57
# Running Locally You may start an instance locally for non-production use cases. For production use cases, please see the other installation options. ## Configuration The default config for Chat UI is stored in the `.env` file. You will need to override some values to get Chat UI to run locally. Start by creating a `.env.local` file in the root of the repository as per the [configuration section](../configuration/overview). The bare minimum config you need to get Chat UI to run locally is the following: ```ini MONGODB_URL=<the URL to your MongoDB instance> HF_TOKEN=<your access token> # find your token at hf.co/settings/token ``` ## Database The chat history is stored in a MongoDB instance, and having a DB instance available is needed for Chat UI to work. You can use a local MongoDB instance. The easiest way is to spin one up using docker with persistence: ```bash docker run -d -p 27017:27017 -v mongo-chat-ui:/data --name mongo-chat-ui mongo:latest ``` In which case the url of your DB will be `MONGODB_URL=mongodb://localhost:27017`. Alternatively, you can use a [free MongoDB Atlas](https://www.mongodb.com/pricing) instance for this, Chat UI should fit comfortably within their free tier. After which you can set the `MONGODB_URL` variable in `.env.local` to match your instance. ## Starting the server ```bash npm ci # install dependencies npm run build # build the project npm run preview -- --open # start the server with & open your instance at http://localhost:4173 ```
chat-ui/docs/source/installation/local.md/0
{ "file_path": "chat-ui/docs/source/installation/local.md", "repo_id": "chat-ui", "token_count": 416 }
58
<script lang="ts"> export let title = ""; export let classNames = ""; </script> <div class="flex items-center rounded-xl bg-gray-100 p-1 text-sm dark:bg-gray-800 {classNames}"> <span class="from-primary-300 text-primary-700 dark:from-primary-900 dark:text-primary-400 mr-2 inline-flex items-center rounded-lg bg-gradient-to-br px-2 py-1 text-xxs font-medium uppercase leading-3" >New</span > {title} <div class="ml-auto shrink-0"> <slot /> </div> </div>
chat-ui/src/lib/components/AnnouncementBanner.svelte/0
{ "file_path": "chat-ui/src/lib/components/AnnouncementBanner.svelte", "repo_id": "chat-ui", "token_count": 185 }
59
<script lang="ts"> import { page } from "$app/stores"; import { getHref } from "$lib/utils/getHref"; import PaginationArrow from "./PaginationArrow.svelte"; export let classNames = ""; export let numItemsPerPage: number; export let numTotalItems: number; const ELLIPSIS_IDX = -1 as const; $: numTotalPages = Math.ceil(numTotalItems / numItemsPerPage); $: pageIndex = parseInt($page.url.searchParams.get("p") ?? "0"); $: pageIndexes = getPageIndexes(pageIndex, numTotalPages); function getPageIndexes(pageIdx: number, nTotalPages: number) { let pageIdxs: number[] = []; const NUM_EXTRA_BUTTONS = 2; // The number of page links to show on either side of the current page link. const minIdx = 0; const maxIdx = nTotalPages - 1; pageIdxs = [pageIdx]; // forward for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) { const newPageIdx = pageIdx + i; if (newPageIdx > maxIdx) { continue; } pageIdxs.push(newPageIdx); } if (maxIdx - pageIdxs[pageIdxs.length - 1] > 1) { pageIdxs.push(...[ELLIPSIS_IDX, maxIdx]); } else if (maxIdx - pageIdxs[pageIdxs.length - 1] === 1) { pageIdxs.push(maxIdx); } // backward for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) { const newPageIdx = pageIdx - i; if (newPageIdx < minIdx) { continue; } pageIdxs.unshift(newPageIdx); } if (pageIdxs[0] - minIdx > 1) { pageIdxs.unshift(...[minIdx, ELLIPSIS_IDX]); } else if (pageIdxs[0] - minIdx === 1) { pageIdxs.unshift(minIdx); } return pageIdxs; } </script> {#if numTotalPages > 1} <nav> <ul class="flex select-none items-center justify-between space-x-2 text-gray-700 dark:text-gray-300 sm:justify-center {classNames}" > <li> <PaginationArrow href={getHref($page.url, { newKeys: { p: (pageIndex - 1).toString() } })} direction="previous" isDisabled={pageIndex - 1 < 0} /> </li> {#each pageIndexes as pageIdx} <li class="hidden sm:block"> <a class=" rounded-lg px-2.5 py-1 {pageIndex === pageIdx ? 'bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-800 dark:text-yellow-500 dark:ring-gray-700' : ''} " class:pointer-events-none={pageIdx === ELLIPSIS_IDX || pageIndex === pageIdx} href={getHref($page.url, { newKeys: { p: pageIdx.toString() } })} > {pageIdx === ELLIPSIS_IDX ? "..." : pageIdx + 1} </a> </li> {/each} <li> <PaginationArrow href={getHref($page.url, { newKeys: { p: (pageIndex + 1).toString() } })} direction="next" isDisabled={pageIndex + 1 >= numTotalPages} /> </li> </ul> </nav> {/if}
chat-ui/src/lib/components/Pagination.svelte/0
{ "file_path": "chat-ui/src/lib/components/Pagination.svelte", "repo_id": "chat-ui", "token_count": 1210 }
60
<script lang="ts"> import { createEventDispatcher } from "svelte"; import { base } from "$app/paths"; import { goto } from "$app/navigation"; import type { Model } from "$lib/types/Model"; import type { Assistant } from "$lib/types/Assistant"; import { useSettingsStore } from "$lib/stores/settings"; import { formatUserCount } from "$lib/utils/formatUserCount"; import IconGear from "~icons/bi/gear-fill"; import IconInternet from "../icons/IconInternet.svelte"; import CarbonExport from "~icons/carbon/export"; import CarbonCheckmark from "~icons/carbon/checkmark"; import CarbonRenew from "~icons/carbon/renew"; import CarbonUserMultiple from "~icons/carbon/user-multiple"; import CarbonTools from "~icons/carbon/tools"; import { share } from "$lib/utils/share"; import { env as envPublic } from "$env/dynamic/public"; import { page } from "$app/stores"; export let models: Model[]; export let assistant: Pick< Assistant, | "avatar" | "name" | "rag" | "dynamicPrompt" | "modelId" | "createdByName" | "exampleInputs" | "_id" | "description" | "userCount" | "tools" >; const dispatch = createEventDispatcher<{ message: string }>(); $: hasRag = assistant?.rag?.allowAllDomains || (assistant?.rag?.allowedDomains?.length ?? 0) > 0 || (assistant?.rag?.allowedLinks?.length ?? 0) > 0 || assistant?.dynamicPrompt; const prefix = envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || $page.url.origin}${base}`; $: shareUrl = `${prefix}/assistant/${assistant?._id}`; let isCopied = false; const settings = useSettingsStore(); </script> <div class="flex h-full w-full flex-col content-center items-center justify-center pb-52"> <div class="relative mt-auto rounded-2xl bg-gray-100 text-gray-600 dark:border-gray-800 dark:bg-gray-800/60 dark:text-gray-300" > <div class="mt-3 flex min-w-[80dvw] items-center gap-4 p-4 pr-1 sm:min-w-[440px] md:p-8 xl:gap-8" > {#if assistant.avatar} <img src={`${base}/settings/assistants/${assistant._id.toString()}/avatar.jpg?hash=${ assistant.avatar }`} alt="avatar" class="size-16 flex-none rounded-full object-cover max-sm:self-start md:size-32" /> {:else} <div class="flex size-12 flex-none items-center justify-center rounded-full bg-gray-300 object-cover text-xl font-bold uppercase text-gray-500 dark:bg-gray-600 max-sm:self-start sm:text-4xl md:size-32" > {assistant?.name[0]} </div> {/if} <div class="flex h-full flex-col gap-2 text-balance"> <p class="-mb-1">Assistant</p> <p class="text-xl font-bold sm:text-2xl">{assistant.name}</p> {#if assistant.description} <p class="line-clamp-6 text-sm text-gray-500 dark:text-gray-400"> {assistant.description} </p> {/if} {#if assistant?.tools?.length} <div class="flex h-5 w-fit items-center gap-1 rounded-full bg-purple-500/10 pl-1 pr-2 text-xs" title="This assistant uses the websearch." > <CarbonTools class="text-sm text-purple-600" /> Has tools </div> {/if} {#if hasRag} <div class="flex h-5 w-fit items-center gap-1 rounded-full bg-blue-500/10 pl-1 pr-2 text-xs" title="This assistant uses the websearch." > <IconInternet classNames="text-sm text-blue-600" /> Has internet access </div> {/if} {#if assistant.createdByName} <p class="pt-1 text-sm text-gray-400 dark:text-gray-500"> Created by <a class="hover:underline" href="{base}/assistants?user={assistant.createdByName}"> {assistant.createdByName} </a> {#if assistant.userCount && assistant.userCount > 1} <span class="mx-1">·</span> <div class="inline-flex items-baseline gap-1 text-sm text-gray-400 dark:text-gray-500" title="Number of users" > <CarbonUserMultiple class="text-xxs" />{formatUserCount(assistant.userCount)} users </div> {/if} </p> {/if} </div> </div> <div class="absolute right-3 top-3 md:right-4 md:top-4"> <div class="flex flex-row items-center gap-1"> <button class="flex h-7 items-center gap-1.5 rounded-full border bg-white px-2.5 py-1 text-gray-800 shadow-sm hover:shadow-inner dark:border-gray-700 dark:bg-gray-700 dark:text-gray-300/90 dark:hover:bg-gray-800 max-sm:px-1.5 md:text-sm" on:click={() => { if (!isCopied) { share(shareUrl, assistant.name); isCopied = true; setTimeout(() => { isCopied = false; }, 2000); } }} > {#if isCopied} <CarbonCheckmark class="text-xxs text-green-600 max-sm:text-xs" /> <span class="text-green-600 max-sm:hidden"> Link copied </span> {:else} <CarbonExport class="text-xxs max-sm:text-xs" /> <span class="max-sm:hidden"> Share </span> {/if} </button> <a href="{base}/settings/assistants/{assistant._id.toString()}" class="flex h-7 items-center gap-1.5 rounded-full border bg-white px-2.5 py-1 text-gray-800 shadow-sm hover:shadow-inner dark:border-gray-700 dark:bg-gray-700 dark:text-gray-300/90 dark:hover:bg-gray-800 md:text-sm" ><IconGear class="text-xxs" />Settings</a > </div> </div> <button on:click={() => { settings.instantSet({ activeModel: models[0].name, }); goto(`${base}/`); }} class="absolute -bottom-6 right-2 inline-flex items-center justify-center text-xs text-gray-600 underline hover:brightness-50 dark:text-gray-400 dark:hover:brightness-110" > <CarbonRenew class="mr-1.5 text-xxs" /> Reset to default model </button> </div> {#if assistant.exampleInputs} <div class="mx-auto mt-auto w-full gap-8 sm:-mb-8"> <div class="md:col-span-2 md:mt-6"> <div class="grid grid-cols-1 gap-3 {assistant.exampleInputs.length > 1 ? 'md:grid-cols-2' : ''}" > {#each assistant.exampleInputs as example} <button type="button" class="truncate whitespace-nowrap rounded-xl border bg-gray-50 px-3 py-2 text-left text-smd text-gray-600 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-300 dark:hover:bg-gray-700" on:click={() => dispatch("message", example)} > {example} </button> {/each} </div> </div> </div> {/if} </div>
chat-ui/src/lib/components/chat/AssistantIntroduction.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/AssistantIntroduction.svelte", "repo_id": "chat-ui", "token_count": 2792 }
61
// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850 import { logger } from "$lib/server/logger"; import { collections } from "$lib/server/database"; import { onExit } from "./exitHandler"; export class AbortedGenerations { private static instance: AbortedGenerations; private abortedGenerations: Map<string, Date> = new Map(); private constructor() { const interval = setInterval(this.updateList, 1000); onExit(() => clearInterval(interval)); } public static getInstance(): AbortedGenerations { if (!AbortedGenerations.instance) { AbortedGenerations.instance = new AbortedGenerations(); } return AbortedGenerations.instance; } public getList(): Map<string, Date> { return this.abortedGenerations; } private async updateList() { try { const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray(); this.abortedGenerations = new Map( aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt]) ); } catch (err) { logger.error(err); } } }
chat-ui/src/lib/server/abortedGenerations.ts/0
{ "file_path": "chat-ui/src/lib/server/abortedGenerations.ts", "repo_id": "chat-ui", "token_count": 373 }
62
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import type { TextGenerationStreamOutput, TextGenerationStreamToken } from "@huggingface/inference"; import { endpointTgi, endpointTgiParametersSchema } from "./tgi/endpointTgi"; import { z } from "zod"; import endpointAws, { endpointAwsParametersSchema } from "./aws/endpointAws"; import { endpointOAIParametersSchema, endpointOai } from "./openai/endpointOai"; import endpointLlamacpp, { endpointLlamacppParametersSchema } from "./llamacpp/endpointLlamacpp"; import endpointOllama, { endpointOllamaParametersSchema } from "./ollama/endpointOllama"; import endpointVertex, { endpointVertexParametersSchema } from "./google/endpointVertex"; import endpointGenAI, { endpointGenAIParametersSchema } from "./google/endpointGenAI"; import { endpointBedrock, endpointBedrockParametersSchema } from "./aws/endpointBedrock"; import { endpointAnthropic, endpointAnthropicParametersSchema, } from "./anthropic/endpointAnthropic"; import { endpointAnthropicVertex, endpointAnthropicVertexParametersSchema, } from "./anthropic/endpointAnthropicVertex"; import type { Model } from "$lib/types/Model"; import endpointCloudflare, { endpointCloudflareParametersSchema, } from "./cloudflare/endpointCloudflare"; import { endpointCohere, endpointCohereParametersSchema } from "./cohere/endpointCohere"; import endpointLangserve, { endpointLangserveParametersSchema, } from "./langserve/endpointLangserve"; import type { Tool, ToolCall, ToolResult } from "$lib/types/Tool"; export type EndpointMessage = Omit<Message, "id">; // parameters passed when generating text export interface EndpointParameters { messages: EndpointMessage[]; preprompt?: Conversation["preprompt"]; continueMessage?: boolean; // used to signal that the last message will be extended generateSettings?: Partial<Model["parameters"]>; tools?: Tool[]; toolResults?: ToolResult[]; isMultimodal?: boolean; } interface CommonEndpoint { weight: number; } type TextGenerationStreamOutputWithTools = TextGenerationStreamOutput & { token: TextGenerationStreamToken & { toolCalls?: ToolCall[] }; }; // type signature for the endpoint export type Endpoint = ( params: EndpointParameters ) => Promise<AsyncGenerator<TextGenerationStreamOutputWithTools, void, void>>; // generator function that takes in parameters for defining the endpoint and return the endpoint export type EndpointGenerator<T extends CommonEndpoint> = (parameters: T) => Endpoint; // list of all endpoint generators export const endpoints = { tgi: endpointTgi, anthropic: endpointAnthropic, anthropicvertex: endpointAnthropicVertex, bedrock: endpointBedrock, aws: endpointAws, openai: endpointOai, llamacpp: endpointLlamacpp, ollama: endpointOllama, vertex: endpointVertex, genai: endpointGenAI, cloudflare: endpointCloudflare, cohere: endpointCohere, langserve: endpointLangserve, }; export const endpointSchema = z.discriminatedUnion("type", [ endpointAnthropicParametersSchema, endpointAnthropicVertexParametersSchema, endpointAwsParametersSchema, endpointBedrockParametersSchema, endpointOAIParametersSchema, endpointTgiParametersSchema, endpointLlamacppParametersSchema, endpointOllamaParametersSchema, endpointVertexParametersSchema, endpointGenAIParametersSchema, endpointCloudflareParametersSchema, endpointCohereParametersSchema, endpointLangserveParametersSchema, ]); export default endpoints;
chat-ui/src/lib/server/endpoints/endpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/endpoints.ts", "repo_id": "chat-ui", "token_count": 1056 }
63
import { isURLLocal } from "./isURLLocal"; import { describe, expect, it } from "vitest"; describe("isURLLocal", async () => { it("should return true for localhost", async () => { expect(await isURLLocal(new URL("http://localhost"))).toBe(true); }); it("should return true for 127.0.0.1", async () => { expect(await isURLLocal(new URL("http://127.0.0.1"))).toBe(true); }); it("should return true for 127.254.254.254", async () => { expect(await isURLLocal(new URL("http://127.254.254.254"))).toBe(true); }); it("should return false for huggingface.co", async () => { expect(await isURLLocal(new URL("https://huggingface.co/"))).toBe(false); }); it("should return true for 127.0.0.1.nip.io", async () => { expect(await isURLLocal(new URL("http://127.0.0.1.nip.io"))).toBe(true); }); it("should fail on ipv6", async () => { await expect(isURLLocal(new URL("http://[::1]"))).rejects.toThrow(); }); it("should fail on ipv6 --1.sslip.io", async () => { await expect(isURLLocal(new URL("http://--1.sslip.io"))).rejects.toThrow(); }); it("should fail on invalid domain names", async () => { await expect( isURLLocal(new URL("http://34329487239847329874923948732984.com/")) ).rejects.toThrow(); }); });
chat-ui/src/lib/server/isURLLocal.spec.ts/0
{ "file_path": "chat-ui/src/lib/server/isURLLocal.spec.ts", "repo_id": "chat-ui", "token_count": 492 }
64
import { env } from "$env/dynamic/private"; import { Client } from "@gradio/client"; import { SignJWT } from "jose"; import JSON5 from "json5"; import { MessageToolUpdateType, MessageUpdateType, type MessageToolUpdate, } from "$lib/types/MessageUpdate"; import { logger } from "$lib/server/logger"; export async function* callSpace<TInput extends unknown[], TOutput extends unknown[]>( name: string, func: string, parameters: TInput, ipToken: string | undefined, uuid: string ): AsyncGenerator<MessageToolUpdate, TOutput, undefined> { class CustomClient extends Client { fetch(input: RequestInfo | URL, init?: RequestInit): Promise<Response> { init = init || {}; init.headers = { ...(init.headers || {}), ...(ipToken ? { "X-IP-Token": ipToken } : {}), }; return super.fetch(input, init); } } const client = await CustomClient.connect(name, { hf_token: (env.HF_TOKEN ?? env.HF_ACCESS_TOKEN) as unknown as `hf_${string}`, events: ["status", "data"], }); const job = client.submit(func, parameters); let data; for await (const output of job) { if (output.type === "data") { data = output.data as TOutput; } if (output.type === "status") { if (output.stage === "error") { logger.error(output.message); throw new Error(output.message); } if (output.eta) { yield { type: MessageUpdateType.Tool, subtype: MessageToolUpdateType.ETA, eta: output.eta, uuid, }; } } } if (!data) { throw new Error("No data found in tool call"); } return data; } export async function getIpToken(ip: string, username?: string) { const ipTokenSecret = env.IP_TOKEN_SECRET; if (!ipTokenSecret) { return; } return await new SignJWT({ ip, user: username }) .setProtectedHeader({ alg: "HS256" }) .setIssuedAt() .setExpirationTime("1m") .sign(new TextEncoder().encode(ipTokenSecret)); } export { toolHasName } from "$lib/utils/tools"; export async function extractJson(text: string): Promise<unknown[]> { const calls: string[] = []; let codeBlocks = Array.from(text.matchAll(/```json\n(.*?)```/gs)) .map(([, block]) => block) // remove trailing comma .map((block) => block.trim().replace(/,$/, "")); // if there is no code block, try to find the first json object // by trimming the string and trying to parse with JSON5 if (codeBlocks.length === 0) { const start = [text.indexOf("["), text.indexOf("{")] .filter((i) => i !== -1) .reduce((a, b) => Math.max(a, b), -Infinity); const end = [text.lastIndexOf("]"), text.lastIndexOf("}")] .filter((i) => i !== -1) .reduce((a, b) => Math.min(a, b), Infinity); if (start === -Infinity || end === Infinity) { return [""]; } const json = text.substring(start, end + 1); codeBlocks = [json]; } // grab only the capture group from the regex match for (const block of codeBlocks) { // make it an array if it's not already let call = JSON5.parse(block); if (!Array.isArray(call)) { call = [call]; } calls.push(call); } return calls.flat(); }
chat-ui/src/lib/server/tools/utils.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/utils.ts", "repo_id": "chat-ui", "token_count": 1149 }
65
import type { WebSearchScrapedSource, WebSearchSource } from "$lib/types/WebSearch"; import type { MessageWebSearchUpdate } from "$lib/types/MessageUpdate"; import { withPage } from "./playwright"; import { spatialParser } from "./parser"; import { htmlToMarkdownTree } from "../markdown/tree"; import { timeout } from "$lib/utils/timeout"; import { makeGeneralUpdate } from "../update"; import { MetricsServer } from "$lib/server/metrics"; import { logger } from "$lib/server/logger"; export const scrape = (maxCharsPerElem: number) => async function* ( source: WebSearchSource ): AsyncGenerator<MessageWebSearchUpdate, WebSearchScrapedSource | undefined, undefined> { try { const startTime = Date.now(); MetricsServer.getMetrics().webSearch.pageFetchCount.inc(); const page = await scrapeUrl(source.link, maxCharsPerElem); MetricsServer.getMetrics().webSearch.pageFetchDuration.observe(Date.now() - startTime); yield makeGeneralUpdate({ message: "Browsing webpage", args: [source.link], }); return { ...source, page }; } catch (e) { MetricsServer.getMetrics().webSearch.pageFetchCountError.inc(); logger.error(e, `Error scraping webpage: ${source.link}`); } }; export async function scrapeUrl(url: string, maxCharsPerElem: number) { return withPage(url, async (page, res) => { if (!res) throw Error("Failed to load page"); // Check if it's a non-html content type that we can handle directly // TODO: direct mappings to markdown can be added for markdown, csv and others const contentType = res.headers()["content-type"] ?? ""; if ( contentType.includes("text/plain") || contentType.includes("text/markdown") || contentType.includes("application/json") || contentType.includes("application/xml") || contentType.includes("text/csv") ) { const title = await page.title(); const content = await page.content(); return { title, markdownTree: htmlToMarkdownTree( title, [{ tagName: "p", attributes: {}, content: [content] }], maxCharsPerElem ), }; } const scrapedOutput = await timeout(page.evaluate(spatialParser), 2000) .then(({ elements, ...parsed }) => ({ ...parsed, markdownTree: htmlToMarkdownTree(parsed.title, elements, maxCharsPerElem), })) .catch((cause) => { throw Error("Parsing failed", { cause }); }); return scrapedOutput; }); }
chat-ui/src/lib/server/websearch/scrape/scrape.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/scrape/scrape.ts", "repo_id": "chat-ui", "token_count": 836 }
66
import { writable } from "svelte/store"; export const ERROR_MESSAGES = { default: "Oops, something went wrong.", authOnly: "You have to be logged in.", rateLimited: "You are sending too many messages. Try again later.", }; export const error = writable<string | null>(null);
chat-ui/src/lib/stores/errors.ts/0
{ "file_path": "chat-ui/src/lib/stores/errors.ts", "repo_id": "chat-ui", "token_count": 85 }
67
import type { ObjectId } from "mongodb"; export interface MigrationResult { _id: ObjectId; name: string; status: "success" | "failure" | "ongoing"; }
chat-ui/src/lib/types/MigrationResult.ts/0
{ "file_path": "chat-ui/src/lib/types/MigrationResult.ts", "repo_id": "chat-ui", "token_count": 53 }
68
/** * A debounce function that works in both browser and Nodejs. * For pure Nodejs work, prefer the `Debouncer` class. */ export function debounce<T extends unknown[]>( callback: (...rest: T) => unknown, limit: number ): (...rest: T) => void { let timer: ReturnType<typeof setTimeout>; return function (...rest) { clearTimeout(timer); timer = setTimeout(() => { callback(...rest); }, limit); }; }
chat-ui/src/lib/utils/debounce.ts/0
{ "file_path": "chat-ui/src/lib/utils/debounce.ts", "repo_id": "chat-ui", "token_count": 138 }
69
type UUID = ReturnType<typeof crypto.randomUUID>; export function randomUUID(): UUID { // Only on old safari / ios if (!("randomUUID" in crypto)) { return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) => ( Number(c) ^ (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4))) ).toString(16) ) as UUID; } return crypto.randomUUID(); }
chat-ui/src/lib/utils/randomUuid.ts/0
{ "file_path": "chat-ui/src/lib/utils/randomUuid.ts", "repo_id": "chat-ui", "token_count": 166 }
70
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import { v4 } from "uuid"; export function convertLegacyConversation( conv: Pick<Conversation, "messages" | "rootMessageId" | "preprompt"> ): Pick<Conversation, "messages" | "rootMessageId" | "preprompt"> { if (conv.rootMessageId) return conv; // not a legacy conversation if (conv.messages.length === 0) return conv; // empty conversation const messages = [ { from: "system", content: conv.preprompt ?? "", createdAt: new Date(), updatedAt: new Date(), id: v4(), } satisfies Message, ...conv.messages, ]; const rootMessageId = messages[0].id; const newMessages = messages.map((message, index) => { return { ...message, ancestors: messages.slice(0, index).map((m) => m.id), children: index < messages.length - 1 ? [messages[index + 1].id] : [], }; }); return { ...conv, rootMessageId, messages: newMessages, }; }
chat-ui/src/lib/utils/tree/convertLegacyConversation.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/convertLegacyConversation.ts", "repo_id": "chat-ui", "token_count": 354 }
71
import { collections } from "$lib/server/database.js"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import type { CommunityToolDB } from "$lib/types/Tool.js"; import { ObjectId } from "mongodb"; export async function GET({ params, locals }) { // XXX: feature_flag_tools if (!locals.user?.isEarlyAccess) { return new Response("Not early access", { status: 403 }); } const toolId = params.toolId; try { const configTool = toolFromConfigs.find((el) => el._id.toString() === toolId); if (configTool) { return Response.json({ _id: toolId, displayName: configTool.displayName, color: configTool.color, icon: configTool.icon, createdByName: undefined, }); } else { // try community tools const tool = await collections.tools .findOne<CommunityToolDB>({ _id: new ObjectId(toolId) }) .then((tool) => tool ? { _id: tool._id.toString(), displayName: tool.displayName, color: tool.color, icon: tool.icon, createdByName: tool.createdByName, featured: tool.featured, } : undefined ); if (!tool || !tool.featured) { return new Response(`Tool "${toolId}" not found`, { status: 404 }); } return Response.json(tool); } } catch (e) { return new Response(`Tool "${toolId}" not found`, { status: 404 }); } }
chat-ui/src/routes/api/tools/[toolId]/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/tools/[toolId]/+server.ts", "repo_id": "chat-ui", "token_count": 544 }
72
import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; import type { RequestHandler } from "./$types"; import { downloadFile } from "$lib/server/files/downloadFile"; export const GET: RequestHandler = async ({ locals, params }) => { const sha256 = z.string().parse(params.sha256); const userId = locals.user?._id ?? locals.sessionId; // check user if (!userId) { error(401, "Unauthorized"); } if (params.id.length !== 7) { const convId = new ObjectId(z.string().parse(params.id)); // check if the user has access to the conversation const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { error(404, "Conversation not found"); } } else { // look for the conversation in shared conversations const conv = await collections.sharedConversations.findOne({ _id: params.id, }); if (!conv) { error(404, "Conversation not found"); } } const { value, mime } = await downloadFile(sha256, params.id); const b64Value = Buffer.from(value, "base64"); return new Response(b64Value, { headers: { "Content-Type": mime ?? "application/octet-stream", "Content-Security-Policy": "default-src 'none'; script-src 'none'; style-src 'none'; sandbox;", "Content-Length": b64Value.length.toString(), "Accept-Range": "bytes", }, }); };
chat-ui/src/routes/conversation/[id]/output/[sha256]/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/output/[sha256]/+server.ts", "repo_id": "chat-ui", "token_count": 521 }
73
import { redirect, type LoadEvent } from "@sveltejs/kit"; export const load = async ({ params, url }: LoadEvent) => { const leafId = url.searchParams.get("leafId"); throw redirect(302, "../conversation/" + params.id + `?leafId=${leafId}`); };
chat-ui/src/routes/r/[id]/+page.ts/0
{ "file_path": "chat-ui/src/routes/r/[id]/+page.ts", "repo_id": "chat-ui", "token_count": 84 }
74
<script lang="ts"> import { base } from "$app/paths"; import { clickOutside } from "$lib/actions/clickOutside"; import { afterNavigate, goto } from "$app/navigation"; import { useSettingsStore } from "$lib/stores/settings"; import CarbonCheckmark from "~icons/carbon/checkmark"; import { fade, fly } from "svelte/transition"; let previousPage: string = base; afterNavigate(({ from }) => { if (!from?.url.pathname.includes("settings")) { previousPage = from?.url.toString() || previousPage; } }); const settings = useSettingsStore(); </script> <div class="fixed inset-0 z-20 flex items-center justify-center bg-black/80 backdrop-blur-sm dark:bg-black/50" in:fade > <dialog in:fly={{ y: 100 }} open use:clickOutside={() => { if (window?.getSelection()?.toString()) { return; } goto(previousPage); }} class="h-[95dvh] w-[90dvw] overflow-hidden rounded-2xl bg-white shadow-2xl outline-none sm:h-[85dvh] xl:w-[1200px] 2xl:h-[75dvh]" > <slot /> {#if $settings.recentlySaved} <div class="absolute bottom-4 right-4 m-2 flex items-center gap-1.5 rounded-full border border-gray-300 bg-gray-200 px-3 py-1 text-black" > <CarbonCheckmark class="text-green-500" /> Saved </div> {/if} </dialog> </div>
chat-ui/src/routes/settings/+layout.svelte/0
{ "file_path": "chat-ui/src/routes/settings/+layout.svelte", "repo_id": "chat-ui", "token_count": 513 }
75
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none"> <path fill="#FFD21E" d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z" /> <path fill="#32343D" d="M19.63 12.48c.37.14.52.9.9.7.71-.38.98-1.27.6-1.98a1.46 1.46 0 0 0-1.98-.61 1.47 1.47 0 0 0-.6 1.99c.17.34.74-.21 1.08-.1ZM12.72 12.48c-.37.14-.52.9-.9.7a1.47 1.47 0 0 1-.6-1.98 1.46 1.46 0 0 1 1.98-.61c.71.38.98 1.27.6 1.99-.18.34-.74-.21-1.08-.1ZM16.24 19.55c2.89 0 3.82-2.58 3.82-3.9 0-1.33-1.71.7-3.82.7-2.1 0-3.8-2.03-3.8-.7 0 1.32.92 3.9 3.8 3.9Z" /> <path fill="#FF323D" d="M18.56 18.8c-.57.44-1.33.75-2.32.75-.92 0-1.65-.27-2.2-.68.3-.63.87-1.11 1.55-1.32.12-.03.24.17.36.38.12.2.24.4.37.4s.26-.2.39-.4.26-.4.38-.36a2.56 2.56 0 0 1 1.47 1.23Z" /> </svg>
chat-ui/static/huggingchat/logo.svg/0
{ "file_path": "chat-ui/static/huggingchat/logo.svg", "repo_id": "chat-ui", "token_count": 523 }
76
# Security Policy ## Supported Versions <!-- Use this section to tell people about which versions of your project are currently being supported with security updates. | Version | Supported | | ------- | ------------------ | | 5.1.x | :white_check_mark: | | 5.0.x | :x: | | 4.0.x | :white_check_mark: | | < 4.0 | :x: | --> Each major version is currently being supported with security updates. | Version | Supported | |---------|--------------------| | 1.x.x | :white_check_mark: | | 2.x.x | :white_check_mark: | ## Reporting a Vulnerability <!-- Use this section to tell people how to report a vulnerability. Tell them where to go, how often they can expect to get an update on a reported vulnerability, what to expect if the vulnerability is accepted or declined, etc. --> To report a security vulnerability, please contact: security@huggingface.co
datasets/SECURITY.md/0
{ "file_path": "datasets/SECURITY.md", "repo_id": "datasets", "token_count": 306 }
77
# docstyle-ignore INSTALL_CONTENT = """ # Datasets installation ! pip install datasets transformers # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/datasets.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] default_branch_name = "main" version_prefix = ""
datasets/docs/source/_config.py/0
{ "file_path": "datasets/docs/source/_config.py", "repo_id": "datasets", "token_count": 118 }
78
# Create a dataset card Each dataset should have a dataset card to promote responsible usage and inform users of any potential biases within the dataset. This idea was inspired by the Model Cards proposed by [Mitchell, 2018](https://arxiv.org/abs/1810.03993). Dataset cards help users understand a dataset's contents, the context for using the dataset, how it was created, and any other considerations a user should be aware of. Creating a dataset card is easy and can be done in just a few steps: 1. Go to your dataset repository on the [Hub](https://hf.co/new-dataset) and click on **Create Dataset Card** to create a new `README.md` file in your repository. 2. Use the **Metadata UI** to select the tags that describe your dataset. You can add a license, language, pretty_name, the task_categories, size_categories, and any other tags that you think are relevant. These tags help users discover and find your dataset on the Hub. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui.png"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui-dark.png"/> </div> <Tip> For a complete, but not required, set of tag options you can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1). This'll have a few more tag options like `multilinguality` and `language_creators` which are useful but not absolutely necessary. </Tip> 3. Click on the **Import dataset card template** link to automatically create a template with all the relevant fields to complete. Fill out the template sections to the best of your ability. Take a look at the [Dataset Card Creation Guide](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md) for more detailed information about what to include in each section of the card. For fields you are unable to complete, you can write **[More Information Needed]**. 4. Once you're done, commit the changes to the `README.md` file and you'll see the completed dataset card on your repository. YAML also allows you to customize the way your dataset is loaded by [defining splits and/or configurations](./repository_structure#define-your-splits-and-subsets-in-yaml) without the need to write any code. Feel free to take a look at the [SNLI](https://huggingface.co/datasets/snli), [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail), and [Allociné](https://huggingface.co/datasets/allocine) dataset cards as examples to help you get started.
datasets/docs/source/dataset_card.mdx/0
{ "file_path": "datasets/docs/source/dataset_card.mdx", "repo_id": "datasets", "token_count": 756 }
79
# Load text data This guide shows you how to load text datasets. To learn how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>. Text files are one of the most common file types for storing a dataset. By default, 🤗 Datasets samples a text file line by line to build the dataset. ```py >>> from datasets import load_dataset >>> dataset = load_dataset("text", data_files={"train": ["my_text_1.txt", "my_text_2.txt"], "test": "my_test_file.txt"}) # Load from a directory >>> dataset = load_dataset("text", data_dir="path/to/text/dataset") ``` To sample a text file by paragraph or even an entire document, use the `sample_by` parameter: ```py # Sample by paragraph >>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="paragraph") # Sample by document >>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="document") ``` You can also use grep patterns to load specific files: ```py >>> from datasets import load_dataset >>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz") ``` To load remote text files via HTTP, pass the URLs instead: ```py >>> dataset = load_dataset("text", data_files="https://huggingface.co/datasets/lhoestq/test/resolve/main/some_text.txt") ```
datasets/docs/source/nlp_load.mdx/0
{ "file_path": "datasets/docs/source/nlp_load.mdx", "repo_id": "datasets", "token_count": 482 }
80
# Overview Welcome to the 🤗 Datasets tutorials! These beginner-friendly tutorials will guide you through the fundamentals of working with 🤗 Datasets. You'll load and prepare a dataset for training with your machine learning framework of choice. Along the way, you'll learn how to load different dataset configurations and splits, interact with and see what's inside your dataset, preprocess, and share a dataset to the [Hub](https://huggingface.co/datasets). The tutorials assume some basic knowledge of Python and a machine learning framework like PyTorch or TensorFlow. If you're already familiar with these, feel free to check out the [quickstart](./quickstart) to see what you can do with 🤗 Datasets. <Tip> The tutorials only cover the basic skills you need to use 🤗 Datasets. There are many other useful functionalities and applications that aren't discussed here. If you're interested in learning more, take a look at [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course. </Tip> If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10). Let's get started! 🏁
datasets/docs/source/tutorial.md/0
{ "file_path": "datasets/docs/source/tutorial.md", "repo_id": "datasets", "token_count": 311 }
81
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal logger = logging.get_logger(__name__) DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) def interleave_datasets( datasets: List[DatasetType], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType: """ Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ``` """ from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets.") for i, dataset in enumerate(datasets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.") if dataset_type is Dataset: return _interleave_map_style_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) else: return _interleave_iterable_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) def concatenate_datasets( dsets: List[DatasetType], info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, axis: int = 0, ) -> DatasetType: """ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`]. Args: dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. axis (`{0, 1}`, defaults to `0`): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Example: ```py >>> ds3 = concatenate_datasets([ds1, ds2]) ``` """ if not dsets: raise ValueError("Unable to concatenate an empty list of datasets.") for i, dataset in enumerate(dsets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis) else: return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
datasets/src/datasets/combine.py/0
{ "file_path": "datasets/src/datasets/combine.py", "repo_id": "datasets", "token_count": 4607 }
82
# SPDX-License-Identifier: Apache-2.0 # Copyright 2023 The HuggingFace Authors. from typing import Any, Dict, List, Optional, Union from huggingface_hub import HfFileSystem from . import config from .table import CastError from .utils.track import TrackedIterableFromGenerator, tracked_list, tracked_str class DatasetsError(Exception): """Base class for exceptions in this library.""" class DefunctDatasetError(DatasetsError): """The dataset has been defunct.""" class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError): """FileNotFoundError raised by this library.""" class DataFilesNotFoundError(FileNotFoundDatasetsError): """No (supported) data files found.""" class DatasetNotFoundError(FileNotFoundDatasetsError): """Dataset not found. Raised when trying to access: - a missing dataset, or - a private/gated dataset and the user is not authenticated. """ class DatasetBuildError(DatasetsError): pass class ManualDownloadError(DatasetBuildError): pass class FileFormatError(DatasetBuildError): pass class DatasetGenerationError(DatasetBuildError): pass class DatasetGenerationCastError(DatasetGenerationError): @classmethod def from_cast_error( cls, cast_error: CastError, builder_name: str, gen_kwargs: Dict[str, Any], token: Optional[Union[bool, str]], ) -> "DatasetGenerationCastError": explanation_message = ( f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}" ) formatted_tracked_gen_kwargs: List[str] = [] for gen_kwarg in gen_kwargs.values(): if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterableFromGenerator)): continue while ( isinstance(gen_kwarg, (tracked_list, TrackedIterableFromGenerator)) and gen_kwarg.last_item is not None ): gen_kwarg = gen_kwarg.last_item if isinstance(gen_kwarg, tracked_str): gen_kwarg = gen_kwarg.get_origin() if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"): resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg) gen_kwarg = "hf://" + resolved_path.unresolve() if "@" + resolved_path.revision in gen_kwarg: gen_kwarg = ( gen_kwarg.replace("@" + resolved_path.revision, "", 1) + f" (at revision {resolved_path.revision})" ) formatted_tracked_gen_kwargs.append(str(gen_kwarg)) if formatted_tracked_gen_kwargs: explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}" help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)" return cls("An error occurred while generating the dataset" + explanation_message + help_message) class ChecksumVerificationError(DatasetsError): """Error raised during checksums verifications of downloaded files.""" class UnexpectedDownloadedFileError(ChecksumVerificationError): """Some downloaded files were not expected.""" class ExpectedMoreDownloadedFilesError(ChecksumVerificationError): """Some files were supposed to be downloaded but were not.""" class NonMatchingChecksumError(ChecksumVerificationError): """The downloaded file checksum don't match the expected checksum.""" class SplitsVerificationError(DatasetsError): """Error raised during splits verifications.""" class UnexpectedSplitsError(SplitsVerificationError): """The expected splits of the downloaded file is missing.""" class ExpectedMoreSplitsError(SplitsVerificationError): """Some recorded splits are missing.""" class NonMatchingSplitsSizesError(SplitsVerificationError): """The splits sizes don't match the expected splits sizes."""
datasets/src/datasets/exceptions.py/0
{ "file_path": "datasets/src/datasets/exceptions.py", "repo_id": "datasets", "token_count": 1552 }
83
import time from itertools import chain from typing import Optional, Union from huggingface_hub import ( CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi, HfFileSystem, ) from huggingface_hub.utils import HfHubHTTPError import datasets.config from datasets.info import DatasetInfosDict from datasets.inspect import get_dataset_config_names, get_dataset_default_config_name from datasets.load import load_dataset, load_dataset_builder from datasets.utils.metadata import MetadataConfigs def convert_to_parquet( repo_id: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, trust_remote_code: Optional[bool] = None, ) -> CommitInfo: """Convert Hub [script-based dataset](dataset_script) to Parquet [data-only dataset](repository_structure), so that the dataset viewer will be supported. This function: - makes a copy of the script on the "main" branch into a dedicated branch called "script" (if it does not already exist) - creates a pull request to the Hub dataset to convert it to Parquet files (and deletes the script from the main branch) If in the future you need to recreate the Parquet files from the "script" branch, pass the `revision="script"` argument. Note that you should pass the `trust_remote_code=True` argument only if you trust the remote code to be executed locally on your machine. Args: repo_id (`str`): ID of the source Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. revision (`str`, *optional*): Branch of the source Hub dataset repository. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. trust_remote_code (`bool`, defaults to `False`): Whether you trust the remote code of the Hub script-based dataset to be executed locally on your machine. This option should only be set to `True` for repositories where you have read the code and which you trust. <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> Returns: `huggingface_hub.CommitInfo` """ print(f"{repo_id}") configs = get_dataset_config_names(repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code) print(f"{configs = }") default_config = get_dataset_default_config_name( repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code ) print(f"{default_config = }") if default_config: config = default_config configs.remove(default_config) else: config = configs.pop(0) print(f"{config = }") dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) commit_info = dataset.push_to_hub( repo_id, config_name=config, commit_message="Convert dataset to Parquet", commit_description="Convert dataset to Parquet.", create_pr=True, token=token, set_default=default_config is not None, ) time.sleep(5) pr_revision, pr_url = commit_info.pr_revision, commit_info.pr_url for config in configs: print(f"{config = }") dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) dataset.push_to_hub( repo_id, config_name=config, commit_message=f"Add '{config}' config data files", revision=pr_revision, token=token, ) time.sleep(5) _delete_files(repo_id, revision=pr_revision, token=token) if not revision: api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) try: api.create_branch(repo_id, branch="script", repo_type="dataset", token=token, exist_ok=True) except HfHubHTTPError: pass print(f"You can find your PR to convert the dataset to Parquet at: {pr_url}") return commit_info def delete_from_hub( repo_id: str, config_name: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, ) -> CommitInfo: """Delete a dataset configuration from a [data-only dataset](repository_structure) on the Hub. Args: repo_id (`str`): ID of the Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. config_name (`str`): Name of the dataset configuration. revision (`str`, *optional*): Branch to delete the configuration from. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. Returns: `huggingface_hub.CommitInfo` """ operations = [] # data_files fs = HfFileSystem(endpoint=datasets.config.HF_ENDPOINT, token=token) builder = load_dataset_builder(repo_id, config_name, revision=revision, token=token, trust_remote_code=False) for data_file in chain(*builder.config.data_files.values()): data_file_resolved_path = fs.resolve_path(data_file) if data_file_resolved_path.repo_id == repo_id: operations.append(CommitOperationDelete(path_in_repo=data_file_resolved_path.path_in_repo)) # README.md dataset_card = DatasetCard.load(repo_id) # config_names if dataset_card.data.get("config_names", None) and config_name in dataset_card.data["config_names"]: dataset_card.data["config_names"].remove(config_name) # metadata_configs metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card.data) if metadata_configs: _ = metadata_configs.pop(config_name, None) dataset_card_data = DatasetCardData() metadata_configs.to_dataset_card_data(dataset_card_data) if datasets.config.METADATA_CONFIGS_FIELD in dataset_card_data: dataset_card.data[datasets.config.METADATA_CONFIGS_FIELD] = dataset_card_data[ datasets.config.METADATA_CONFIGS_FIELD ] else: _ = dataset_card.data.pop(datasets.config.METADATA_CONFIGS_FIELD, None) # dataset_info dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card.data) if dataset_infos: _ = dataset_infos.pop(config_name, None) dataset_card_data = DatasetCardData() dataset_infos.to_dataset_card_data(dataset_card_data) if "dataset_info" in dataset_card_data: dataset_card.data["dataset_info"] = dataset_card_data["dataset_info"] else: _ = dataset_card.data.pop("dataset_info", None) # Commit operations.append( CommitOperationAdd(path_in_repo=datasets.config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) commit_info = api.create_commit( repo_id, operations=operations, commit_message=f"Delete '{config_name}' config", commit_description=f"Delete '{config_name}' config.", token=token, repo_type="dataset", revision=revision, create_pr=True, ) print(f"You can find your PR to delete the dataset config at: {commit_info.pr_url}") return commit_info def _delete_files(dataset_id, revision=None, token=None): dataset_name = dataset_id.split("/")[-1] hf_api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) repo_files = hf_api.list_repo_files( dataset_id, repo_type="dataset", ) if repo_files: legacy_json_file = [] python_files = [] data_files = [] for filename in repo_files: if filename in {".gitattributes", "README.md"}: continue elif filename == f"{dataset_name}.py": hf_api.delete_file( filename, dataset_id, repo_type="dataset", revision=revision, commit_message="Delete loading script", ) elif filename == "dataset_infos.json": legacy_json_file.append(filename) elif filename.endswith(".py"): python_files.append(filename) else: data_files.append(filename) if legacy_json_file: hf_api.delete_file( "dataset_infos.json", dataset_id, repo_type="dataset", revision=revision, commit_message="Delete legacy dataset_infos.json", ) if python_files: for filename in python_files: hf_api.delete_file( filename, dataset_id, repo_type="dataset", revision=revision, commit_message="Delete loading script auxiliary file", ) if data_files: for filename in data_files: hf_api.delete_file( filename, dataset_id, repo_type="dataset", revision=revision, commit_message="Delete data file", )
datasets/src/datasets/hub.py/0
{ "file_path": "datasets/src/datasets/hub.py", "repo_id": "datasets", "token_count": 4128 }
84
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql from .text import text from .webdataset import webdataset def _hash_python_lines(lines: List[str]) -> str: filtered_lines = [] for line in lines: line = re.sub(r"#.*", "", line) # remove comments if line: filtered_lines.append(line) full_str = "\n".join(filtered_lines) # Make a hash from all this code full_bytes = full_str.encode("utf-8") return insecure_hashlib.sha256(full_bytes).hexdigest() # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())), } # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES_2_15_HASHES = { "csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d", "json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96", "pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202", "parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1", "arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137", "text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34", "imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5", "audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c", } # Used to infer the module to use based on the data files extensions _EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".geoparquet": ("parquet", {}), ".gpq": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), ".tar": ("webdataset", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name _MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) for _module in _MODULE_TO_EXTENSIONS: _MODULE_TO_EXTENSIONS[_module].append(".zip")
datasets/src/datasets/packaged_modules/__init__.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/__init__.py", "repo_id": "datasets", "token_count": 1528 }
85
import io import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets import datasets.config from datasets.table import table_cast from datasets.utils.file_utils import readline logger = datasets.utils.logging.get_logger(__name__) def ujson_dumps(*args, **kwargs): try: return pd.io.json.ujson_dumps(*args, **kwargs) except AttributeError: # Before pandas-2.2.0, ujson_dumps was renamed to dumps: import ujson_dumps as dumps return pd.io.json.dumps(*args, **kwargs) def ujson_loads(*args, **kwargs): try: return pd.io.json.ujson_loads(*args, **kwargs) except AttributeError: # Before pandas-2.2.0, ujson_loads was renamed to loads: import ujson_loads as loads return pd.io.json.loads(*args, **kwargs) def pandas_read_json(path_or_buf, **kwargs): if datasets.config.PANDAS_VERSION.major >= 2: kwargs["dtype_backend"] = "pyarrow" return pd.read_json(path_or_buf, **kwargs) @dataclass class JsonConfig(datasets.BuilderConfig): """BuilderConfig for JSON.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" encoding_errors: Optional[str] = None field: Optional[str] = None use_threads: bool = True # deprecated block_size: Optional[int] = None # deprecated chunksize: int = 10 << 20 # 10MB newlines_in_values: Optional[bool] = None def __post_init__(self): super().__post_init__() class Json(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = JsonConfig def _info(self): if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead") self.config.chunksize = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported") return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): type = self.config.features.arrow_schema.field(column_name).type pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # If the file is one json object and if we need to look at the items in one specific field if self.config.field is not None: with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: dataset = ujson_loads(f.read()) # We keep only the field we are interested in dataset = dataset[self.config.field] df = pandas_read_json(io.StringIO(ujson_dumps(dataset))) if df.columns.tolist() == [0]: df.columns = list(self.config.features) if self.config.features else ["text"] pa_table = pa.Table.from_pandas(df, preserve_index=False) yield file_idx, self._cast_table(pa_table) # If the file has one json object per line else: with open(file, "rb") as f: batch_idx = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small block_size = max(self.config.chunksize // 32, 16 << 10) encoding_errors = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: batch = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(f) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8") try: while True: try: pa_table = paj.read_json( io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(e, pa.ArrowInvalid) and "straddling" not in str(e) or block_size > len(batch) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( file, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f: df = pandas_read_json(f) except ValueError: logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}") raise e if df.columns.tolist() == [0]: df.columns = list(self.config.features) if self.config.features else ["text"] try: pa_table = pa.Table.from_pandas(df, preserve_index=False) except pa.ArrowInvalid as e: logger.error( f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}" ) raise ValueError( f"Failed to convert pandas DataFrame to Arrow Table from file {file}." ) from None yield file_idx, self._cast_table(pa_table) break yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1
datasets/src/datasets/packaged_modules/json/json.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/json/json.py", "repo_id": "datasets", "token_count": 4543 }
86
import importlib.util import os import tempfile from pathlib import PurePath from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union import fsspec import numpy as np from .features import Sequence from .utils import logging from .utils import tqdm as hf_tqdm if TYPE_CHECKING: from .arrow_dataset import Dataset # noqa: F401 try: from elasticsearch import Elasticsearch # noqa: F401 except ImportError: pass try: import faiss # noqa: F401 except ImportError: pass _has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None _has_faiss = importlib.util.find_spec("faiss") is not None logger = logging.get_logger(__name__) class MissingIndex(Exception): pass class SearchResults(NamedTuple): scores: List[float] indices: List[int] class BatchedSearchResults(NamedTuple): total_scores: List[List[float]] total_indices: List[List[int]] class NearestExamplesResults(NamedTuple): scores: List[float] examples: dict class BatchedNearestExamplesResults(NamedTuple): total_scores: List[List[float]] total_examples: List[dict] class BaseIndex: """Base class for indexing""" def search(self, query, k: int = 10, **kwargs) -> SearchResults: """ To implement. This method has to return the scores and the indices of the retrieved examples given a certain query. """ raise NotImplementedError def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults: """Find the nearest examples indices to the query. Args: queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index. k (`int`): The number of examples to retrieve per query. Ouput: total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query. total_indices (`List[List[int]]`): The indices of the retrieved examples per query. """ total_scores, total_indices = [], [] for query in queries: scores, indices = self.search(query, k) total_scores.append(scores) total_indices.append(indices) return BatchedSearchResults(total_scores, total_indices) def save(self, file: Union[str, PurePath]): """Serialize the index on disk""" raise NotImplementedError @classmethod def load(cls, file: Union[str, PurePath]) -> "BaseIndex": """Deserialize the index from disk""" raise NotImplementedError class ElasticSearchIndex(BaseIndex): """ Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity. An Elasticsearch server needs to be accessible, and a python client is declared with ``` es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}]) ``` for example. """ def __init__( self, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["Elasticsearch"] = None, es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): if not _has_elasticsearch: raise ImportError( "You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`" ) if es_client is not None and (host is not None or port is not None): raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.") host = host or "localhost" port = port or 9200 import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features from elasticsearch import Elasticsearch # noqa: F811 self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}]) self.es_index_name = ( es_index_name if es_index_name is not None else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name) ) self.es_index_config = ( es_index_config if es_index_config is not None else { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}}, } ) def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None): """ Add documents to the index. If the documents are inside a certain column, you can specify it using the `column` argument. """ index_name = self.es_index_name index_config = self.es_index_config self.es_client.indices.create(index=index_name, body=index_config) number_of_docs = len(documents) progress = hf_tqdm(unit="docs", total=number_of_docs) successes = 0 def passage_generator(): if column is not None: for i, example in enumerate(documents): yield {"text": example[column], "_id": i} else: for i, example in enumerate(documents): yield {"text": example, "_id": i} # create the ES index import elasticsearch as es for ok, action in es.helpers.streaming_bulk( client=self.es_client, index=index_name, actions=passage_generator(), ): progress.update(1) successes += ok if successes != len(documents): logger.warning( f"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}" ) logger.info(f"Indexed {successes:d} documents") def search(self, query: str, k=10, **kwargs) -> SearchResults: """Find the nearest examples indices to the query. Args: query (`str`): The query as a string. k (`int`): The number of examples to retrieve. Ouput: scores (`List[List[float]`): The retrieval scores of the retrieved examples. indices (`List[List[int]]`): The indices of the retrieved examples. """ response = self.es_client.search( index=self.es_index_name, body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k}, **kwargs, ) hits = response["hits"]["hits"] return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits]) def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults: import concurrent.futures total_scores, total_indices = [None] * len(queries), [None] * len(queries) with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)} for future in concurrent.futures.as_completed(future_to_index): index = future_to_index[future] results: SearchResults = future.result() total_scores[index] = results.scores total_indices[index] = results.indices return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores) class FaissIndex(BaseIndex): """ Dense index using Faiss. It is used to index vectors. Faiss is a library for efficient similarity search and clustering of dense vectors. It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM. You can find more information about Faiss here: - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU """ def __init__( self, device: Optional[Union[int, List[int]]] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, ): """ Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory """ if string_factory is not None and custom_index is not None: raise ValueError("Please specify either `string_factory` or `custom_index` but not both.") if device is not None and custom_index is not None: raise ValueError( "Cannot pass both 'custom_index' and 'device'. " "Pass 'custom_index' already transferred to the target device instead." ) self.device = device self.string_factory = string_factory self.metric_type = metric_type self.faiss_index = custom_index if not _has_faiss: raise ImportError( "You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. " "A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. " "Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available." ) def add_vectors( self, vectors: Union[np.array, "Dataset"], column: Optional[str] = None, batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: Optional[bool] = None, ): """ Add vectors to the index. If the arrays are inside a certain column, you can specify it using the `column` argument. """ import faiss # noqa: F811 if column and not isinstance(vectors.features[column], Sequence): raise ValueError( f"Wrong feature type for column '{column}'. Expected 1d array, got {vectors.features[column]}" ) # Create index if self.faiss_index is None: size = len(vectors[0]) if column is None else len(vectors[0][column]) if self.string_factory is not None: if self.metric_type is None: index = faiss.index_factory(size, self.string_factory) else: index = faiss.index_factory(size, self.string_factory, self.metric_type) else: if self.metric_type is None: index = faiss.IndexFlat(size) else: index = faiss.IndexFlat(size, self.metric_type) self.faiss_index = self._faiss_index_to_device(index, self.device) logger.info(f"Created faiss index of type {type(self.faiss_index)}") # Set verbosity level if faiss_verbose is not None: self.faiss_index.verbose = faiss_verbose if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None: self.faiss_index.index.verbose = faiss_verbose if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None: self.faiss_index.quantizer.verbose = faiss_verbose if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None: self.faiss_index.clustering_index.verbose = faiss_verbose # Train if train_size is not None: train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column] logger.info(f"Training the index with the first {len(train_vecs)} vectors") self.faiss_index.train(train_vecs) else: logger.info("Ignored the training step of the faiss index as `train_size` is None.") # Add vectors logger.info(f"Adding {len(vectors)} vectors to the faiss index") for i in hf_tqdm(range(0, len(vectors), batch_size)): vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column] self.faiss_index.add(vecs) @staticmethod def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index": """ Sends a faiss index to a device. A device can either be a positive integer (GPU id), a negative integer (all GPUs), or a list of positive integers (select GPUs to use), or `None` for CPU. """ # If device is not specified, then it runs on CPU. if device is None: return index import faiss # noqa: F811 # If the device id is given as an integer if isinstance(device, int): # Positive integers are directly mapped to GPU ids if device > -1: faiss_res = faiss.StandardGpuResources() index = faiss.index_cpu_to_gpu(faiss_res, device, index) # And negative integers mean using all GPUs else: index = faiss.index_cpu_to_all_gpus(index) # Device ids given as a list mean mapping to those devices specified. elif isinstance(device, (list, tuple)): index = faiss.index_cpu_to_gpus_list(index, gpus=list(device)) else: raise TypeError( f"The argument type: {type(device)} is not expected. " + "Please pass in either nothing, a positive int, a negative int, or a list of positive ints." ) return index def search(self, query: np.array, k=10, **kwargs) -> SearchResults: """Find the nearest examples indices to the query. Args: query (`np.array`): The query as a numpy array. k (`int`): The number of examples to retrieve. Ouput: scores (`List[List[float]`): The retrieval scores of the retrieved examples. indices (`List[List[int]]`): The indices of the retrieved examples. """ if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1): raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)") queries = query.reshape(1, -1) if not queries.flags.c_contiguous: queries = np.asarray(queries, order="C") scores, indices = self.faiss_index.search(queries, k, **kwargs) return SearchResults(scores[0], indices[0].astype(int)) def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults: """Find the nearest examples indices to the queries. Args: queries (`np.array`): The queries as a numpy array. k (`int`): The number of examples to retrieve. Ouput: total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query. total_indices (`List[List[int]]`): The indices of the retrieved examples per query. """ if len(queries.shape) != 2: raise ValueError("Shape of query must be 2D") if not queries.flags.c_contiguous: queries = np.asarray(queries, order="C") scores, indices = self.faiss_index.search(queries, k, **kwargs) return BatchedSearchResults(scores, indices.astype(int)) def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None): """Serialize the FaissIndex on disk""" import faiss # noqa: F811 if self.device is not None and isinstance(self.device, (int, list, tuple)): index = faiss.index_gpu_to_cpu(self.faiss_index) else: index = self.faiss_index with fsspec.open(str(file), "wb", **(storage_options or {})) as f: faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write))) @classmethod def load( cls, file: Union[str, PurePath], device: Optional[Union[int, List[int]]] = None, storage_options: Optional[Dict] = None, ) -> "FaissIndex": """Deserialize the FaissIndex from disk""" import faiss # noqa: F811 # Instances of FaissIndex is essentially just a wrapper for faiss indices. faiss_index = cls(device=device) with fsspec.open(str(file), "rb", **(storage_options or {})) as f: index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read))) faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device) return faiss_index class IndexableMixin: """Add indexing features to `datasets.Dataset`""" def __init__(self): self._indexes: Dict[str, BaseIndex] = {} def __len__(self): raise NotImplementedError def __getitem__(self, key): raise NotImplementedError def is_index_initialized(self, index_name: str) -> bool: return index_name in self._indexes def _check_index_is_initialized(self, index_name: str): if not self.is_index_initialized(index_name): raise MissingIndex( f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first." ) def list_indexes(self) -> List[str]: """List the `colindex_nameumns`/identifiers of all the attached indexes.""" return list(self._indexes) def get_index(self, index_name: str) -> BaseIndex: """List the `index_name`/identifiers of all the attached indexes. Args: index_name (`str`): Index name. Returns: [`BaseIndex`] """ self._check_index_is_initialized(index_name) return self._indexes[index_name] def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[Union[int, List[int]]] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of the specified column. You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below). You can find more information about Faiss here: - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory Args: column (`str`): The column of the vectors to add to the index. index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. By default it corresponds to `column`. device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP. metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. <Added version="2.4.0"/> train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. """ index_name = index_name if index_name is not None else column faiss_index = FaissIndex( device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index ) faiss_index.add_vectors( self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose ) self._indexes[index_name] = faiss_index def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[Union[int, List[int]]] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, batch_size: int = 1000, train_size: Optional[int] = None, faiss_verbose: bool = False, ): """Add a dense index using Faiss for fast retrieval. The index is created using the vectors of `external_arrays`. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). You can find more information about Faiss here: - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory Args: external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`. It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP. metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. <Added version="2.4.0"/> train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index. faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. """ faiss_index = FaissIndex( device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index ) faiss_index.add_vectors( external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose ) self._indexes[index_name] = faiss_index def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None): """Save a FaissIndex on disk. Args: index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`). storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.11.0"/> """ index = self.get_index(index_name) if not isinstance(index, FaissIndex): raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'") index.save(file, storage_options=storage_options) logger.info(f"Saved FaissIndex {index_name} at {file}") def load_faiss_index( self, index_name: str, file: Union[str, PurePath], device: Optional[Union[int, List[int]]] = None, storage_options: Optional[Dict] = None, ): """Load a FaissIndex from disk. If you want to do additional configurations, you can have access to the faiss index object by doing `.get_index(index_name).faiss_index` to make it fit your needs. Args: index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`). device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.11.0"/> """ index = FaissIndex.load(file, device=device, storage_options=storage_options) if index.faiss_index.ntotal != len(self): raise ValueError( f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples." ) self._indexes[index_name] = index logger.info(f"Loaded FaissIndex {index_name} from {file}") def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["Elasticsearch"] = None, es_index_name: Optional[str] = None, es_index_config: Optional[dict] = None, ): """Add a text index using ElasticSearch for fast retrieval. Args: column (`str`): The column of the documents to add to the index. index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`. By default it corresponds to `column`. host (Optional `str`, defaults to localhost): host of where ElasticSearch is running port (Optional `str`, defaults to 9200): port of where ElasticSearch is running es_client (Optional `elasticsearch.Elasticsearch`): The elasticsearch client used to create the index if host and port are None. es_index_name (Optional `str`): The elasticsearch index name used to create the index. es_index_config (Optional `dict`): The configuration of the elasticsearch index. Default config is: Config:: { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } """ index_name = index_name if index_name is not None else column es_index = ElasticSearchIndex( host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config ) es_index.add_documents(self, column=column) self._indexes[index_name] = es_index def load_elasticsearch_index( self, index_name: str, es_index_name: str, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["Elasticsearch"] = None, es_index_config: Optional[dict] = None, ): """Load an existing text index using ElasticSearch for fast retrieval. Args: index_name (`str`): The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`. es_index_name (`str`): The name of elasticsearch index to load. host (`str`, *optional*, defaults to `localhost`): Host of where ElasticSearch is running. port (`str`, *optional*, defaults to `9200`): Port of where ElasticSearch is running. es_client (`elasticsearch.Elasticsearch`, *optional*): The elasticsearch client used to create the index if host and port are `None`. es_index_config (`dict`, *optional*): The configuration of the elasticsearch index. Default config is: ``` { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "text": { "type": "text", "analyzer": "standard", "similarity": "BM25" }, } }, } ``` """ self._indexes[index_name] = ElasticSearchIndex( host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config ) def drop_index(self, index_name: str): """Drop the index with the specified column. Args: index_name (`str`): The `index_name`/identifier of the index. """ del self._indexes[index_name] def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults: """Find the nearest examples indices in the dataset to the query. Args: index_name (`str`): The name/identifier of the index. query (`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve. Returns: `(scores, indices)`: A tuple of `(scores, indices)` where: - **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples - **indices** (`List[List[int]]`): the indices of the retrieved examples """ self._check_index_is_initialized(index_name) return self._indexes[index_name].search(query, k, **kwargs) def search_batch( self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs ) -> BatchedSearchResults: """Find the nearest examples indices in the dataset to the query. Args: index_name (`str`): The `index_name`/identifier of the index. queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve per query. Returns: `(total_scores, total_indices)`: A tuple of `(total_scores, total_indices)` where: - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query - **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query """ self._check_index_is_initialized(index_name) return self._indexes[index_name].search_batch(queries, k, **kwargs) def get_nearest_examples( self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs ) -> NearestExamplesResults: """Find the nearest examples in the dataset to the query. Args: index_name (`str`): The index_name/identifier of the index. query (`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve. Returns: `(scores, examples)`: A tuple of `(scores, examples)` where: - **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples - **examples** (`dict`): the retrieved examples """ self._check_index_is_initialized(index_name) scores, indices = self.search(index_name, query, k, **kwargs) top_indices = [i for i in indices if i >= 0] return NearestExamplesResults(scores[: len(top_indices)], self[top_indices]) def get_nearest_examples_batch( self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs ) -> BatchedNearestExamplesResults: """Find the nearest examples in the dataset to the query. Args: index_name (`str`): The `index_name`/identifier of the index. queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index. k (`int`): The number of examples to retrieve per query. Returns: `(total_scores, total_examples)`: A tuple of `(total_scores, total_examples)` where: - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query - **total_examples** (`List[dict]`): the retrieved examples per query """ self._check_index_is_initialized(index_name) total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs) total_scores = [ scores_i[: len([i for i in indices_i if i >= 0])] for scores_i, indices_i in zip(total_scores, total_indices) ] total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices] return BatchedNearestExamplesResults(total_scores, total_samples)
datasets/src/datasets/search.py/0
{ "file_path": "datasets/src/datasets/search.py", "repo_id": "datasets", "token_count": 15341 }
87
# Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logging utilities.""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from .tqdm import ( # noqa: F401 # imported for backward compatibility disable_progress_bar, enable_progress_bar, is_progress_bar_enabled, tqdm, ) log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING def _get_default_logging_level(): """ If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to ``_default_log_level`` """ env_level_str = os.getenv("DATASETS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DATASETS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(logging.StreamHandler()) library_root_logger.setLevel(_get_default_logging_level()) def _reset_library_root_logger() -> None: library_root_logger = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET) def get_logger(name: Optional[str] = None) -> logging.Logger: """Return a logger with the specified name. This function can be used in dataset scripts. """ if name is None: name = _get_library_name() return logging.getLogger(name) def get_verbosity() -> int: """Return the current level for the HuggingFace datasets library's root logger. Returns: Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. <Tip> HuggingFace datasets library has following logging levels: - `datasets.logging.CRITICAL`, `datasets.logging.FATAL` - `datasets.logging.ERROR` - `datasets.logging.WARNING`, `datasets.logging.WARN` - `datasets.logging.INFO` - `datasets.logging.DEBUG` </Tip> """ return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """Set the level for the Hugging Face Datasets library's root logger. Args: verbosity: Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. """ _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the level for the Hugging Face datasets library's root logger to `INFO`. This will display most of the logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`. """ return set_verbosity(INFO) def set_verbosity_warning(): """Set the level for the Hugging Face datasets library's root logger to `WARNING`. This will display only the warning and errors logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`. """ return set_verbosity(WARNING) def set_verbosity_debug(): """Set the level for the Hugging Face datasets library's root logger to `DEBUG`. This will display all the logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`. """ return set_verbosity(DEBUG) def set_verbosity_error(): """Set the level for the Hugging Face datasets library's root logger to `ERROR`. This will display only the errors logging information and tqdm bars. Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`. """ return set_verbosity(ERROR) def disable_propagation() -> None: """Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _get_library_root_logger().propagate = False def enable_propagation() -> None: """Enable propagation of the library log outputs. Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has been configured. """ _get_library_root_logger().propagate = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger()
datasets/src/datasets/utils/logging.py/0
{ "file_path": "datasets/src/datasets/utils/logging.py", "repo_id": "datasets", "token_count": 1934 }
88
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Version utils.""" import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _VERSION_REG = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$") @total_ordering @dataclass class Version: """Dataset version `MAJOR.MINOR.PATCH`. Args: version_str (`str`): The dataset version. description (`str`): A description of what is new in this version. major (`str`): minor (`str`): patch (`str`): Example: ```py >>> VERSION = datasets.Version("1.0.0") ``` """ version_str: str description: Optional[str] = None major: Optional[Union[str, int]] = None minor: Optional[Union[str, int]] = None patch: Optional[Union[str, int]] = None def __post_init__(self): self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str) def __repr__(self): return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def tuple(self): return self.major, self.minor, self.patch def _validate_operand(self, other): if isinstance(other, str): return Version(other) elif isinstance(other, Version): return other raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.") def __eq__(self, other): try: other = self._validate_operand(other) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__(self, other): other = self._validate_operand(other) return self.tuple < other.tuple def __hash__(self): return hash(_version_tuple_to_str(self.tuple)) @classmethod def from_dict(cls, dic): field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dic.items() if k in field_names}) def _to_yaml_string(self) -> str: return self.version_str def _str_to_version_tuple(version_str): """Return the tuple (major, minor, patch) version extracted from the str.""" res = _VERSION_REG.match(version_str) if not res: raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.") return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")]) def _version_tuple_to_str(version_tuple): """Return the str version from the version tuple (major, minor, patch).""" return ".".join(str(v) for v in version_tuple)
datasets/src/datasets/utils/version.py/0
{ "file_path": "datasets/src/datasets/utils/version.py", "repo_id": "datasets", "token_count": 1291 }
89
import csv import os import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.csv import CsvDatasetReader, CsvDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _check_csv_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_csv_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_csv_features(features, csv_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = CsvDatasetReader(csv_path, features=features, cache_dir=cache_dir).read() _check_csv_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_csv_split(split, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, split=split).read() _check_csv_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path): if issubclass(path_type, str): path = csv_path elif issubclass(path_type, list): path = [csv_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = CsvDatasetReader(path, cache_dir=cache_dir).read() _check_csv_dataset(dataset, expected_features) def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_csv_datasetdict_reader_keep_in_memory(keep_in_memory, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_csv_datasetdict_reader_features(features, csv_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = CsvDatasetReader({"train": csv_path}, features=features, cache_dir=cache_dir).read() _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_csv_datasetdict_reader_split(split, csv_path, tmp_path): if split: path = {split: csv_path} else: path = {"train": csv_path, "test": csv_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = CsvDatasetReader(path, cache_dir=cache_dir).read() _check_csv_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def iter_csv_file(csv_path): with open(csv_path, encoding="utf-8") as csvfile: yield from csv.reader(csvfile) def test_dataset_to_csv(csv_path, tmp_path): cache_dir = tmp_path / "cache" output_csv = os.path.join(cache_dir, "tmp.csv") dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read() CsvDatasetWriter(dataset["train"], output_csv, num_proc=1).write() original_csv = iter_csv_file(csv_path) expected_csv = iter_csv_file(output_csv) for row1, row2 in zip(original_csv, expected_csv): assert row1 == row2 def test_dataset_to_csv_multiproc(csv_path, tmp_path): cache_dir = tmp_path / "cache" output_csv = os.path.join(cache_dir, "tmp.csv") dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read() CsvDatasetWriter(dataset["train"], output_csv, num_proc=2).write() original_csv = iter_csv_file(csv_path) expected_csv = iter_csv_file(output_csv) for row1, row2 in zip(original_csv, expected_csv): assert row1 == row2 def test_dataset_to_csv_invalidproc(csv_path, tmp_path): cache_dir = tmp_path / "cache" output_csv = os.path.join(cache_dir, "tmp.csv") dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read() with pytest.raises(ValueError): CsvDatasetWriter(dataset["train"], output_csv, num_proc=0) def test_dataset_to_csv_fsspec(dataset, mockfs): dataset_path = "mock://my_dataset.csv" writer = CsvDatasetWriter(dataset, dataset_path, storage_options=mockfs.storage_options) assert writer.write() > 0 assert mockfs.isfile(dataset_path) with fsspec.open(dataset_path, "rb", **mockfs.storage_options) as f: assert f.read()
datasets/tests/io/test_csv.py/0
{ "file_path": "datasets/tests/io/test_csv.py", "repo_id": "datasets", "token_count": 2970 }
90
import os from datasets.utils._filelock import FileLock def test_long_path(tmpdir): filename = "a" * 1000 + ".lock" lock1 = FileLock(str(tmpdir / filename)) assert lock1.lock_file.endswith(".lock") assert not lock1.lock_file.endswith(filename) assert len(os.path.basename(lock1.lock_file)) <= 255
datasets/tests/test_filelock.py/0
{ "file_path": "datasets/tests/test_filelock.py", "repo_id": "datasets", "token_count": 120 }
91
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected", [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10)]), ({"num_shards": 10, "max_num_jobs": 10}, [range(i, i + 1) for i in range(10)]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1)]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0, 4), range(4, 7), range(7, 10)]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0, 1), range(1, 2), range(2, 3)]), ], ) def test_distribute_shards(kwargs, expected): out = _distribute_shards(**kwargs) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected", [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ], ) def test_split_gen_kwargs(gen_kwargs, max_num_jobs, expected): out = _split_gen_kwargs(gen_kwargs, max_num_jobs) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected", [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ], ) def test_number_of_shards_in_gen_kwargs(gen_kwargs, expected): if expected is RuntimeError: with pytest.raises(expected): _number_of_shards_in_gen_kwargs(gen_kwargs) else: out = _number_of_shards_in_gen_kwargs(gen_kwargs) assert out == expected
datasets/tests/test_sharding_utils.py/0
{ "file_path": "datasets/tests/test_sharding_utils.py", "repo_id": "datasets", "token_count": 977 }
92
<jupyter_start><jupyter_text>Unit 2: Q-Learning with FrozenLake-v1 ⛄ and Taxi-v3 🚕In this notebook, **you'll code your first Reinforcement Learning agent from scratch** to play FrozenLake ❄️ using Q-Learning, share it with the community, and experiment with different configurations.⬇️ Here is an example of what **you will achieve in just a couple of minutes.** ⬇️ 🎮 Environments:- [FrozenLake-v1](https://gymnasium.farama.org/environments/toy_text/frozen_lake/)- [Taxi-v3](https://gymnasium.farama.org/environments/toy_text/taxi/)📚 RL-Library:- Python and NumPy- [Gymnasium](https://gymnasium.farama.org/)We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to use **Gymnasium**, the environment library.- Be able to code a Q-Learning agent from scratch.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning Course In this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments**And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 **Study [Q-Learning by reading Unit 2](https://huggingface.co/deep-rl-course/unit2/introduction)** 🤗 A small recap of Q-Learning *Q-Learning* **is the RL algorithm that**:- Trains *Q-Function*, an **action-value function** that encoded, in internal memory, by a *Q-table* **that contains all the state-action pair values.**- Given a state and action, our Q-Function **will search the Q-table for the corresponding value.** - When the training is done,**we have an optimal Q-Function, so an optimal Q-Table.** - And if we **have an optimal Q-function**, wehave an optimal policy, since we **know for, each state, the best action to take.**But, in the beginning, our **Q-Table is useless since it gives arbitrary value for each state-action pair (most of the time we initialize the Q-Table to 0 values)**. But, as we’ll explore the environment and update our Q-Table it will give us better and better approximationsThis is the Q-Learning pseudocode: Let's code our first Reinforcement Learning algorithm 🚀 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push your trained Taxi model to the Hub and **get a result of >= 4.5**.To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Install dependencies and create a virtual display 🔽In the notebook, we'll need to generate a replay video. To do so, with Colab, **we need to have a virtual screen to render the environment** (and thus record the frames).Hence the following cell will install the libraries and create and run a virtual screen 🖥We’ll install multiple ones:- `gymnasium`: Contains the FrozenLake-v1 ⛄ and Taxi-v3 🚕 environments.- `pygame`: Used for the FrozenLake-v1 and Taxi-v3 UI.- `numpy`: Used for handling our Q-table.The Hugging Face Hub 🤗 works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations and other features that will allow you to easily collaborate with others.You can see here all the Deep RL models available (if they use Q Learning) here 👉 https://huggingface.co/models?other=q-learning<jupyter_code>!pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit2/requirements-unit2.txt !sudo apt-get update !sudo apt-get install -y python3-opengl !apt install ffmpeg xvfb !pip3 install pyvirtualdisplay<jupyter_output><empty_output><jupyter_text>To make sure the new installed libraries are used, **sometimes it's required to restart the notebook runtime**. The next cell will force the **runtime to crash, so you'll need to connect again and run the code starting from here**. Thanks to this trick, **we will be able to run our virtual screen.**<jupyter_code>import os os.kill(os.getpid(), 9) # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Import the packages 📦In addition to the installed libraries, we also use:- `random`: To generate random numbers (that will be useful for epsilon-greedy policy).- `imageio`: To generate a replay video.<jupyter_code>import numpy as np import gymnasium as gym import random import imageio import os import tqdm import pickle5 as pickle from tqdm.notebook import tqdm<jupyter_output><empty_output><jupyter_text>We're now ready to code our Q-Learning algorithm 🔥 Part 1: Frozen Lake ⛄ (non slippery version) Create and understand [FrozenLake environment ⛄]((https://gymnasium.farama.org/environments/toy_text/frozen_lake/)---💡 A good habit when you start to use an environment is to check its documentation👉 https://gymnasium.farama.org/environments/toy_text/frozen_lake/---We're going to train our Q-Learning agent **to navigate from the starting state (S) to the goal state (G) by walking only on frozen tiles (F) and avoid holes (H)**.We can have two sizes of environment:- `map_name="4x4"`: a 4x4 grid version- `map_name="8x8"`: a 8x8 grid versionThe environment has two modes:- `is_slippery=False`: The agent always moves **in the intended direction** due to the non-slippery nature of the frozen lake (deterministic).- `is_slippery=True`: The agent **may not always move in the intended direction** due to the slippery nature of the frozen lake (stochastic). For now let's keep it simple with the 4x4 map and non-slippery.We add a parameter called `render_mode` that specifies how the environment should be visualised. In our case because we **want to record a video of the environment at the end, we need to set render_mode to rgb_array**.As [explained in the documentation](https://gymnasium.farama.org/api/env/gymnasium.Env.render) “rgb_array”: Return a single frame representing the current state of the environment. A frame is a np.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image.<jupyter_code># Create the FrozenLake-v1 environment using 4x4 map and non-slippery version and render_mode="rgb_array" env = gym.make() # TODO use the correct parameters<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>env = gym.make("FrozenLake-v1", map_name="4x4", is_slippery=False, render_mode="rgb_array")<jupyter_output><empty_output><jupyter_text>You can create your own custom grid like this:```pythondesc=["SFFF", "FHFH", "FFFH", "HFFG"]gym.make('FrozenLake-v1', desc=desc, is_slippery=True)```but we'll use the default environment for now. Let's see what the Environment looks like:<jupyter_code># We create our environment with gym.make("<name_of_the_environment>")- `is_slippery=False`: The agent always moves in the intended direction due to the non-slippery nature of the frozen lake (deterministic). print("_____OBSERVATION SPACE_____ \n") print("Observation Space", env.observation_space) print("Sample observation", env.observation_space.sample()) # Get a random observation<jupyter_output><empty_output><jupyter_text>We see with `Observation Space Shape Discrete(16)` that the observation is an integer representing the **agent’s current position as current_row * ncols + current_col (where both the row and col start at 0)**.For example, the goal position in the 4x4 map can be calculated as follows: 3 * 4 + 3 = 15. The number of possible observations is dependent on the size of the map. **For example, the 4x4 map has 16 possible observations.**For instance, this is what state = 0 looks like:<jupyter_code>print("\n _____ACTION SPACE_____ \n") print("Action Space Shape", env.action_space.n) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The action space (the set of possible actions the agent can take) is discrete with 4 actions available 🎮:- 0: GO LEFT- 1: GO DOWN- 2: GO RIGHT- 3: GO UPReward function 💰:- Reach goal: +1- Reach hole: 0- Reach frozen: 0 Create and Initialize the Q-table 🗄️(👀 Step 1 of the pseudocode)It's time to initialize our Q-table! To know how many rows (states) and columns (actions) to use, we need to know the action and observation space. We already know their values from before, but we'll want to obtain them programmatically so that our algorithm generalizes for different environments. Gym provides us a way to do that: `env.action_space.n` and `env.observation_space.n`<jupyter_code>state_space = print("There are ", state_space, " possible states") action_space = print("There are ", action_space, " possible actions") # Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros. np.zeros needs a tuple (a,b) def initialize_q_table(state_space, action_space): Qtable = return Qtable Qtable_frozenlake = initialize_q_table(state_space, action_space)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>state_space = env.observation_space.n print("There are ", state_space, " possible states") action_space = env.action_space.n print("There are ", action_space, " possible actions") # Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros def initialize_q_table(state_space, action_space): Qtable = np.zeros((state_space, action_space)) return Qtable Qtable_frozenlake = initialize_q_table(state_space, action_space)<jupyter_output><empty_output><jupyter_text>Define the greedy policy 🤖Remember we have two policies since Q-Learning is an **off-policy** algorithm. This means we're using a **different policy for acting and updating the value function**.- Epsilon-greedy policy (acting policy)- Greedy-policy (updating policy)The greedy policy will also be the final policy we'll have when the Q-learning agent completes training. The greedy policy is used to select an action using the Q-table.<jupyter_code>def greedy_policy(Qtable, state): # Exploitation: take the action with the highest state, action value action = return action<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def greedy_policy(Qtable, state): # Exploitation: take the action with the highest state, action value action = np.argmax(Qtable[state][:]) return action<jupyter_output><empty_output><jupyter_text>Define the epsilon-greedy policy 🤖Epsilon-greedy is the training policy that handles the exploration/exploitation trade-off.The idea with epsilon-greedy:- With *probability 1 - ɛ* : **we do exploitation** (i.e. our agent selects the action with the highest state-action pair value).- With *probability ɛ*: we do **exploration** (trying a random action).As the training continues, we progressively **reduce the epsilon value since we will need less and less exploration and more exploitation.**<jupyter_code>def epsilon_greedy_policy(Qtable, state, epsilon): # Randomly generate a number between 0 and 1 random_num = # if random_num > greater than epsilon --> exploitation if random_num > epsilon: # Take the action with the highest value given a state # np.argmax can be useful here action = # else --> exploration else: action = # Take a random action return action<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def epsilon_greedy_policy(Qtable, state, epsilon): # Randomly generate a number between 0 and 1 random_num = random.uniform(0,1) # if random_num > greater than epsilon --> exploitation if random_num > epsilon: # Take the action with the highest value given a state # np.argmax can be useful here action = greedy_policy(Qtable, state) # else --> exploration else: action = env.action_space.sample() return action<jupyter_output><empty_output><jupyter_text>Define the hyperparameters ⚙️The exploration related hyperparamters are some of the most important ones.- We need to make sure that our agent **explores enough of the state space** to learn a good value approximation. To do that, we need to have progressive decay of the epsilon.- If you decrease epsilon too fast (too high decay_rate), **you take the risk that your agent will be stuck**, since your agent didn't explore enough of the state space and hence can't solve the problem.<jupyter_code># Training parameters n_training_episodes = 10000 # Total training episodes learning_rate = 0.7 # Learning rate # Evaluation parameters n_eval_episodes = 100 # Total number of test episodes # Environment parameters env_id = "FrozenLake-v1" # Name of the environment max_steps = 99 # Max steps per episode gamma = 0.95 # Discounting rate eval_seed = [] # The evaluation seed of the environment # Exploration parameters max_epsilon = 1.0 # Exploration probability at start min_epsilon = 0.05 # Minimum exploration probability decay_rate = 0.0005 # Exponential decay rate for exploration prob<jupyter_output><empty_output><jupyter_text>Create the training loop methodThe training loop goes like this:```For episode in the total of training episodes:Reduce epsilon (since we need less and less exploration)Reset the environment For step in max timesteps: Choose the action At using epsilon greedy policy Take the action (a) and observe the outcome state(s') and reward (r) Update the Q-value Q(s,a) using Bellman equation Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] If done, finish the episode Our next state is the new state```<jupyter_code>def train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable): for episode in tqdm(range(n_training_episodes)): # Reduce epsilon (because we need less and less exploration) epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode) # Reset the environment state, info = env.reset() step = 0 terminated = False truncated = False # repeat for step in range(max_steps): # Choose the action At using epsilon greedy policy action = # Take action At and observe Rt+1 and St+1 # Take the action (a) and observe the outcome state(s') and reward (r) new_state, reward, terminated, truncated, info = # Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] Qtable[state][action] = # If terminated or truncated finish the episode if terminated or truncated: break # Our next state is the new state state = new_state return Qtable<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable): for episode in tqdm(range(n_training_episodes)): # Reduce epsilon (because we need less and less exploration) epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode) # Reset the environment state, info = env.reset() step = 0 terminated = False truncated = False # repeat for step in range(max_steps): # Choose the action At using epsilon greedy policy action = epsilon_greedy_policy(Qtable, state, epsilon) # Take action At and observe Rt+1 and St+1 # Take the action (a) and observe the outcome state(s') and reward (r) new_state, reward, terminated, truncated, info = env.step(action) # Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] Qtable[state][action] = Qtable[state][action] + learning_rate * (reward + gamma * np.max(Qtable[new_state]) - Qtable[state][action]) # If terminated or truncated finish the episode if terminated or truncated: break # Our next state is the new state state = new_state return Qtable<jupyter_output><empty_output><jupyter_text>Train the Q-Learning agent 🏃<jupyter_code>Qtable_frozenlake = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_frozenlake)<jupyter_output><empty_output><jupyter_text>Let's see what our Q-Learning table looks like now 👀<jupyter_code>Qtable_frozenlake<jupyter_output><empty_output><jupyter_text>The evaluation method 📝- We defined the evaluation method that we're going to use to test our Q-Learning agent.<jupyter_code>def evaluate_agent(env, max_steps, n_eval_episodes, Q, seed): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param max_steps: Maximum number of steps per episode :param n_eval_episodes: Number of episode to evaluate the agent :param Q: The Q-table :param seed: The evaluation seed array (for taxi-v3) """ episode_rewards = [] for episode in tqdm(range(n_eval_episodes)): if seed: state, info = env.reset(seed=seed[episode]) else: state, info = env.reset() step = 0 truncated = False terminated = False total_rewards_ep = 0 for step in range(max_steps): # Take the action (index) that have the maximum expected future reward given that state action = greedy_policy(Q, state) new_state, reward, terminated, truncated, info = env.step(action) total_rewards_ep += reward if terminated or truncated: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward<jupyter_output><empty_output><jupyter_text>Evaluate our Q-Learning agent 📈- Usually, you should have a mean reward of 1.0- The **environment is relatively easy** since the state space is really small (16). What you can try to do is [to replace it with the slippery version](https://gymnasium.farama.org/environments/toy_text/frozen_lake/), which introduces stochasticity, making the environment more complex.<jupyter_code># Evaluate our Agent mean_reward, std_reward = evaluate_agent(env, max_steps, n_eval_episodes, Qtable_frozenlake, eval_seed) print(f"Mean_reward={mean_reward:.2f} +/- {std_reward:.2f}")<jupyter_output><empty_output><jupyter_text>Publish our trained model to the Hub 🔥Now that we saw good results after the training, **we can publish our trained model to the Hub 🤗 with one line of code**.Here's an example of a Model Card: Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent. Do not modify this code<jupyter_code>from huggingface_hub import HfApi, snapshot_download from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import json def record_video(env, Qtable, out_directory, fps=1): """ Generate a replay video of the agent :param env :param Qtable: Qtable of our agent :param out_directory :param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1) """ images = [] terminated = False truncated = False state, info = env.reset(seed=random.randint(0,500)) img = env.render() images.append(img) while not terminated or truncated: # Take the action (index) that have the maximum expected future reward given that state action = np.argmax(Qtable[state][:]) state, reward, terminated, truncated, info = env.step(action) # We directly put next_state = state for recording logic img = env.render() images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def push_to_hub( repo_id, model, env, video_fps=1, local_repo_path="hub" ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the Hub :param repo_id: repo_id: id of the model repository from the Hugging Face Hub :param env :param video_fps: how many frame per seconds to record our video replay (with taxi-v3 and frozenlake-v1 we use 1) :param local_repo_path: where the local repository is """ _, repo_name = repo_id.split("/") eval_env = env api = HfApi() # Step 1: Create the repo repo_url = api.create_repo( repo_id=repo_id, exist_ok=True, ) # Step 2: Download files repo_local_path = Path(snapshot_download(repo_id=repo_id)) # Step 3: Save the model if env.spec.kwargs.get("map_name"): model["map_name"] = env.spec.kwargs.get("map_name") if env.spec.kwargs.get("is_slippery", "") == False: model["slippery"] = False # Pickle the model with open((repo_local_path) / "q-learning.pkl", "wb") as f: pickle.dump(model, f) # Step 4: Evaluate the model and build JSON with evaluation metrics mean_reward, std_reward = evaluate_agent( eval_env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"] ) evaluate_data = { "env_id": model["env_id"], "mean_reward": mean_reward, "n_eval_episodes": model["n_eval_episodes"], "eval_datetime": datetime.datetime.now().isoformat() } # Write a JSON file called "results.json" that will contain the # evaluation results with open(repo_local_path / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 5: Create the model card env_name = model["env_id"] if env.spec.kwargs.get("map_name"): env_name += "-" + env.spec.kwargs.get("map_name") if env.spec.kwargs.get("is_slippery", "") == False: env_name += "-" + "no_slippery" metadata = {} metadata["tags"] = [env_name, "q-learning", "reinforcement-learning", "custom-implementation"] # Add metrics eval = metadata_eval_result( model_pretty_name=repo_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_name, dataset_id=env_name, ) # Merges both dictionaries metadata = {**metadata, **eval} model_card = f""" # **Q-Learning** Agent playing1 **{env_id}** This is a trained model of a **Q-Learning** agent playing **{env_id}** . ## Usage ```python model = load_from_hub(repo_id="{repo_id}", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ``` """ evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) readme_path = repo_local_path / "README.md" readme = "" print(readme_path.exists()) if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) # Step 6: Record a video video_path = repo_local_path / "replay.mp4" record_video(env, model["qtable"], video_path, video_fps) # Step 7. Push everything to the Hub api.upload_folder( repo_id=repo_id, folder_path=repo_local_path, path_in_repo=".", ) print("Your model is pushed to the Hub. You can view your model here: ", repo_url)<jupyter_output><empty_output><jupyter_text>.By using `push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the Hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share an agent with the community that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `push_to_hub()` function- Let's create **the model dictionary that contains the hyperparameters and the Q_table**.<jupyter_code>model = { "env_id": env_id, "max_steps": max_steps, "n_training_episodes": n_training_episodes, "n_eval_episodes": n_eval_episodes, "eval_seed": eval_seed, "learning_rate": learning_rate, "gamma": gamma, "max_epsilon": max_epsilon, "min_epsilon": min_epsilon, "decay_rate": decay_rate, "qtable": Qtable_frozenlake }<jupyter_output><empty_output><jupyter_text>Let's fill the `push_to_hub` function:- `repo_id`: the name of the Hugging Face Hub Repository that will be created/updated `(repo_id = {username}/{repo_name})`💡 A good `repo_id` is `{username}/q-{env_id}`- `model`: our model dictionary containing the hyperparameters and the Qtable.- `env`: the environment.- `commit_message`: message of the commit<jupyter_code>model username = "" # FILL THIS repo_name = "q-FrozenLake-v1-4x4-noSlippery" push_to_hub( repo_id=f"{username}/{repo_name}", model=model, env=env)<jupyter_output><empty_output><jupyter_text>Congrats 🥳 you've just implemented from scratch, trained, and uploaded your first Reinforcement Learning agent.FrozenLake-v1 no_slippery is very simple environment, let's try a harder one 🔥. Part 2: Taxi-v3 🚖 Create and understand [Taxi-v3 🚕](https://gymnasium.farama.org/environments/toy_text/taxi/)---💡 A good habit when you start to use an environment is to check its documentation👉 https://gymnasium.farama.org/environments/toy_text/taxi/---In `Taxi-v3` 🚕, there are four designated locations in the grid world indicated by R(ed), G(reen), Y(ellow), and B(lue).When the episode starts, **the taxi starts off at a random square** and the passenger is at a random location. The taxi drives to the passenger’s location, **picks up the passenger**, drives to the passenger’s destination (another one of the four specified locations), and then **drops off the passenger**. Once the passenger is dropped off, the episode ends.<jupyter_code>env = gym.make("Taxi-v3", render_mode="rgb_array")<jupyter_output><empty_output><jupyter_text>There are **500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger** (including the case when the passenger is in the taxi), and **4 destination locations.**<jupyter_code>state_space = env.observation_space.n print("There are ", state_space, " possible states") action_space = env.action_space.n print("There are ", action_space, " possible actions")<jupyter_output><empty_output><jupyter_text>The action space (the set of possible actions the agent can take) is discrete with **6 actions available 🎮**:- 0: move south- 1: move north- 2: move east- 3: move west- 4: pickup passenger- 5: drop off passengerReward function 💰:- -1 per step unless other reward is triggered.- +20 delivering passenger.- -10 executing “pickup” and “drop-off” actions illegally.<jupyter_code># Create our Q table with state_size rows and action_size columns (500x6) Qtable_taxi = initialize_q_table(state_space, action_space) print(Qtable_taxi) print("Q-table shape: ", Qtable_taxi .shape)<jupyter_output><empty_output><jupyter_text>Define the hyperparameters ⚙️⚠ DO NOT MODIFY EVAL_SEED: the eval_seed array **allows us to evaluate your agent with the same taxi starting positions for every classmate**<jupyter_code># Training parameters n_training_episodes = 25000 # Total training episodes learning_rate = 0.7 # Learning rate # Evaluation parameters n_eval_episodes = 100 # Total number of test episodes # DO NOT MODIFY EVAL_SEED eval_seed = [16,54,165,177,191,191,120,80,149,178,48,38,6,125,174,73,50,172,100,148,146,6,25,40,68,148,49,167,9,97,164,176,61,7,54,55, 161,131,184,51,170,12,120,113,95,126,51,98,36,135,54,82,45,95,89,59,95,124,9,113,58,85,51,134,121,169,105,21,30,11,50,65,12,43,82,145,152,97,106,55,31,85,38, 112,102,168,123,97,21,83,158,26,80,63,5,81,32,11,28,148] # Evaluation seed, this ensures that all classmates agents are trained on the same taxi starting position # Each seed has a specific starting state # Environment parameters env_id = "Taxi-v3" # Name of the environment max_steps = 99 # Max steps per episode gamma = 0.95 # Discounting rate # Exploration parameters max_epsilon = 1.0 # Exploration probability at start min_epsilon = 0.05 # Minimum exploration probability decay_rate = 0.005 # Exponential decay rate for exploration prob<jupyter_output><empty_output><jupyter_text>Train our Q-Learning agent 🏃<jupyter_code>Qtable_taxi = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_taxi) Qtable_taxi<jupyter_output><empty_output><jupyter_text>Create a model dictionary 💾 and publish our trained model to the Hub 🔥- We create a model dictionary that will contain all the training hyperparameters for reproducibility and the Q-Table.<jupyter_code>model = { "env_id": env_id, "max_steps": max_steps, "n_training_episodes": n_training_episodes, "n_eval_episodes": n_eval_episodes, "eval_seed": eval_seed, "learning_rate": learning_rate, "gamma": gamma, "max_epsilon": max_epsilon, "min_epsilon": min_epsilon, "decay_rate": decay_rate, "qtable": Qtable_taxi } username = "" # FILL THIS repo_name = "" # FILL THIS push_to_hub( repo_id=f"{username}/{repo_name}", model=model, env=env)<jupyter_output><empty_output><jupyter_text>Now that it's on the Hub, you can compare the results of your Taxi-v3 with your classmates using the leaderboard 🏆 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard Part 3: Load from Hub 🔽What's amazing with Hugging Face Hub 🤗 is that you can easily load powerful models from the community.Loading a saved model from the Hub is really easy:1. You go https://huggingface.co/models?other=q-learning to see the list of all the q-learning saved models.2. You select one and copy its repo_id 3. Then we just need to use `load_from_hub` with:- The repo_id- The filename: the saved model inside the repo. Do not modify this code<jupyter_code>from urllib.error import HTTPError from huggingface_hub import hf_hub_download def load_from_hub(repo_id: str, filename: str) -> str: """ Download a model from Hugging Face Hub. :param repo_id: id of the model repository from the Hugging Face Hub :param filename: name of the model zip file from the repository """ # Get the model from the Hub, download and cache the model on your local disk pickle_model = hf_hub_download( repo_id=repo_id, filename=filename ) with open(pickle_model, 'rb') as f: downloaded_model_file = pickle.load(f) return downloaded_model_file<jupyter_output><empty_output><jupyter_text>.<jupyter_code>model = load_from_hub(repo_id="ThomasSimonini/q-Taxi-v3", filename="q-learning.pkl") # Try to use another model print(model) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) model = load_from_hub(repo_id="ThomasSimonini/q-FrozenLake-v1-no-slippery", filename="q-learning.pkl") # Try to use another model env = gym.make(model["env_id"], is_slippery=False) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])<jupyter_output><empty_output>
deep-rl-class/notebooks/unit2/unit2.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit2/unit2.ipynb", "repo_id": "deep-rl-class", "token_count": 11160 }
93
# Additional Readings [[additional-readings]] These are **optional readings** if you want to go deeper. ## Deep Reinforcement Learning [[deep-rl]] - [Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto Chapter 1, 2 and 3](http://incompleteideas.net/book/RLbook2020.pdf) - [Foundations of Deep RL Series, L1 MDPs, Exact Solution Methods, Max-ent RL by Pieter Abbeel](https://youtu.be/2GwBez0D20A) - [Spinning Up RL by OpenAI Part 1: Key concepts of RL](https://spinningup.openai.com/en/latest/spinningup/rl_intro.html) ## Gym [[gym]] - [Getting Started With OpenAI Gym: The Basic Building Blocks](https://blog.paperspace.com/getting-started-with-openai-gym/) - [Make your own Gym custom environment](https://www.gymlibrary.dev/content/environment_creation/)
deep-rl-class/units/en/unit1/additional-readings.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/additional-readings.mdx", "repo_id": "deep-rl-class", "token_count": 246 }
94
# Glossary [[glossary]] This is a community-created glossary. Contributions are welcomed! ### Strategies to find the optimal policy - **Policy-based methods.** The policy is usually trained with a neural network to select what action to take given a state. In this case it is the neural network which outputs the action that the agent should take instead of using a value function. Depending on the experience received by the environment, the neural network will be re-adjusted and will provide better actions. - **Value-based methods.** In this case, a value function is trained to output the value of a state or a state-action pair that will represent our policy. However, this value doesn't define what action the agent should take. In contrast, we need to specify the behavior of the agent given the output of the value function. For example, we could decide to adopt a policy to take the action that always leads to the biggest reward (Greedy Policy). In summary, the policy is a Greedy Policy (or whatever decision the user takes) that uses the values of the value-function to decide the actions to take. ### Among the value-based methods, we can find two main strategies - **The state-value function.** For each state, the state-value function is the expected return if the agent starts in that state and follows the policy until the end. - **The action-value function.** In contrast to the state-value function, the action-value calculates for each state and action pair the expected return if the agent starts in that state, takes that action, and then follows the policy forever after. ### Epsilon-greedy strategy: - Common strategy used in reinforcement learning that involves balancing exploration and exploitation. - Chooses the action with the highest expected reward with a probability of 1-epsilon. - Chooses a random action with a probability of epsilon. - Epsilon is typically decreased over time to shift focus towards exploitation. ### Greedy strategy: - Involves always choosing the action that is expected to lead to the highest reward, based on the current knowledge of the environment. (Only exploitation) - Always chooses the action with the highest expected reward. - Does not include any exploration. - Can be disadvantageous in environments with uncertainty or unknown optimal actions. ### Off-policy vs on-policy algorithms - **Off-policy algorithms:** A different policy is used at training time and inference time - **On-policy algorithms:** The same policy is used during training and inference ### Monte Carlo and Temporal Difference learning strategies - **Monte Carlo (MC):** Learning at the end of the episode. With Monte Carlo, we wait until the episode ends and then we update the value function (or policy function) from a complete episode. - **Temporal Difference (TD):** Learning at each step. With Temporal Difference Learning, we update the value function (or policy function) at each step without requiring a complete episode. If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [Ramón Rueda](https://github.com/ramon-rd) - [Hasarindu Perera](https://github.com/hasarinduperera/) - [Arkady Arkhangorodsky](https://github.com/arkadyark/)
deep-rl-class/units/en/unit2/glossary.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/glossary.mdx", "repo_id": "deep-rl-class", "token_count": 760 }
95
# From Q-Learning to Deep Q-Learning [[from-q-to-dqn]] We learned that **Q-Learning is an algorithm we use to train our Q-Function**, an **action-value function** that determines the value of being at a particular state and taking a specific action at that state. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function.jpg" alt="Q-function"/> </figure> The **Q comes from "the Quality" of that action at that state.** Internally, our Q-function is encoded by **a Q-table, a table where each cell corresponds to a state-action pair value.** Think of this Q-table as **the memory or cheat sheet of our Q-function.** The problem is that Q-Learning is a *tabular method*. This becomes a problem if the states and actions spaces **are not small enough to be represented efficiently by arrays and tables**. In other words: it is **not scalable**. Q-Learning worked well with small state space environments like: - FrozenLake, we had 16 states. - Taxi-v3, we had 500 states. But think of what we're going to do today: we will train an agent to learn to play Space Invaders, a more complex game, using the frames as input. As **[Nikita Melkozerov mentioned](https://twitter.com/meln1k), Atari environments** have an observation space with a shape of (210, 160, 3)*, containing values ranging from 0 to 255 so that gives us \\(256^{210 \times 160 \times 3} = 256^{100800}\\) possible observations (for comparison, we have approximately \\(10^{80}\\) atoms in the observable universe). * A single frame in Atari is composed of an image of 210x160 pixels. Given that the images are in color (RGB), there are 3 channels. This is why the shape is (210, 160, 3). For each pixel, the value can go from 0 to 255. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari.jpg" alt="Atari State Space"/> Therefore, the state space is gigantic; due to this, creating and updating a Q-table for that environment would not be efficient. In this case, the best idea is to approximate the Q-values using a parametrized Q-function \\(Q_{\theta}(s,a)\\) . This neural network will approximate, given a state, the different Q-values for each possible action at that state. And that's exactly what Deep Q-Learning does. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/deep.jpg" alt="Deep Q Learning"/> Now that we understand Deep Q-Learning, let's dive deeper into the Deep Q-Network.
deep-rl-class/units/en/unit3/from-q-to-dqn.mdx/0
{ "file_path": "deep-rl-class/units/en/unit3/from-q-to-dqn.mdx", "repo_id": "deep-rl-class", "token_count": 733 }
96
# Conclusion Congrats on finishing this unit! You’ve just trained your first ML-Agents and shared it to the Hub 🥳. The best way to learn is to **practice and try stuff**. Why not try another environment? [ML-Agents has 18 different environments](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Learning-Environment-Examples.md). For instance: - [Worm](https://singularite.itch.io/worm), where you teach a worm to crawl. - [Walker](https://singularite.itch.io/walker), where you teach an agent to walk towards a goal. Check the documentation to find out how to train them and to see the list of already integrated MLAgents environments on the Hub: https://github.com/huggingface/ml-agents#getting-started <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/envs-unity.jpeg" alt="Example envs"/> In the next unit, we're going to learn about multi-agents. You're going to train your first multi-agents to compete in Soccer and Snowball fight against other classmate's agents. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballfight.gif" alt="Snownball fight"/> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unit5/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 430 }
97
# Conclusion That’s all for today. Congrats on finishing this unit and the tutorial! The best way to learn is to practice and try stuff. **Why not train another agent with a different configuration?** And don’t hesitate from time to time to check the [leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos) See you in Unit 8 🔥 ## Keep Learning, Stay awesome 🤗
deep-rl-class/units/en/unit7/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 117 }
98
# Visualize the Clipped Surrogate Objective Function Don't worry. **It's normal if this seems complex to handle right now**. But we're going to see what this Clipped Surrogate Objective Function looks like, and this will help you to visualize better what's going on. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/recap.jpg" alt="PPO"/> <figcaption><a href="https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf">Table from "Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick</a></figcaption> </figure> We have six different situations. Remember first that we take the minimum between the clipped and unclipped objectives. ## Case 1 and 2: the ratio is between the range In situations 1 and 2, **the clipping does not apply since the ratio is between the range** \\( [1 - \epsilon, 1 + \epsilon] \\) In situation 1, we have a positive advantage: the **action is better than the average** of all the actions in that state. Therefore, we should encourage our current policy to increase the probability of taking that action in that state. Since the ratio is between intervals, **we can increase our policy's probability of taking that action at that state.** In situation 2, we have a negative advantage: the action is worse than the average of all actions at that state. Therefore, we should discourage our current policy from taking that action in that state. Since the ratio is between intervals, **we can decrease the probability that our policy takes that action at that state.** ## Case 3 and 4: the ratio is below the range <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/recap.jpg" alt="PPO"/> <figcaption><a href="https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf">Table from "Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick</a></figcaption> </figure> If the probability ratio is lower than \\( [1 - \epsilon] \\), the probability of taking that action at that state is much lower than with the old policy. If, like in situation 3, the advantage estimate is positive (A>0), then **you want to increase the probability of taking that action at that state.** But if, like situation 4, the advantage estimate is negative, **we don't want to decrease further** the probability of taking that action at that state. Therefore, the gradient is = 0 (since we're on a flat line), so we don't update our weights. ## Case 5 and 6: the ratio is above the range <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/recap.jpg" alt="PPO"/> <figcaption><a href="https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf">Table from "Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick</a></figcaption> </figure> If the probability ratio is higher than \\( [1 + \epsilon] \\), the probability of taking that action at that state in the current policy is **much higher than in the former policy.** If, like in situation 5, the advantage is positive, **we don't want to get too greedy**. We already have a higher probability of taking that action at that state than the former policy. Therefore, the gradient is = 0 (since we're on a flat line), so we don't update our weights. If, like in situation 6, the advantage is negative, we want to decrease the probability of taking that action at that state. So if we recap, **we only update the policy with the unclipped objective part**. When the minimum is the clipped objective part, we don't update our policy weights since the gradient will equal 0. So we update our policy only if: - Our ratio is in the range \\( [1 - \epsilon, 1 + \epsilon] \\) - Our ratio is outside the range, but **the advantage leads to getting closer to the range** - Being below the ratio but the advantage is > 0 - Being above the ratio but the advantage is < 0 **You might wonder why, when the minimum is the clipped ratio, the gradient is 0.** When the ratio is clipped, the derivative in this case will not be the derivative of the \\( r_t(\theta) * A_t \\) but the derivative of either \\( (1 - \epsilon)* A_t\\) or the derivative of \\( (1 + \epsilon)* A_t\\) which both = 0. To summarize, thanks to this clipped surrogate objective, **we restrict the range that the current policy can vary from the old one.** Because we remove the incentive for the probability ratio to move outside of the interval since the clip forces the gradient to be zero. If the ratio is > \\( 1 + \epsilon \\) or < \\( 1 - \epsilon \\) the gradient will be equal to 0. The final Clipped Surrogate Objective Loss for PPO Actor-Critic style looks like this, it's a combination of Clipped Surrogate Objective function, Value Loss Function and Entropy bonus: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ppo-objective.jpg" alt="PPO objective"/> That was quite complex. Take time to understand these situations by looking at the table and the graph. **You must understand why this makes sense.** If you want to go deeper, the best resource is the article ["Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization" by Daniel Bick, especially part 3.4](https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf).
deep-rl-class/units/en/unit8/visualize.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/visualize.mdx", "repo_id": "deep-rl-class", "token_count": 1594 }
99
# An Introduction to Unreal Learning Agents [Learning Agents](https://dev.epicgames.com/community/learning/tutorials/8OWY/unreal-engine-learning-agents-introduction) is an Unreal Engine (UE) plugin that allows you **to train AI characters using machine learning (ML) in Unreal**. It's an exciting new plugin where you can create unique environments using Unreal Engine and train your agents. Let's see how you can **get started and train a car to drive in an Unreal Engine Environment**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/learning-agents-car.png" alt="Learning Agents"/> <figcaption>Source: [Learning Agents Driving Car Tutorial](https://dev.epicgames.com/community/learning/tutorials/qj2O/unreal-engine-learning-to-drive)</figcaption> </figure> ## Case 1: I don't know anything about Unreal Engine and Beginners in Unreal Engine If you're new to Unreal Engine, don't be scared! We listed two courses you need to follow to be able to use Learning Agents: 1. Master the Basics: Begin by watching this course [your first hour in Unreal Engine 5](https://dev.epicgames.com/community/learning/courses/ZpX/your-first-hour-in-unreal-engine-5/E7L/introduction-to-your-first-hour-in-unreal-engine-5). This comprehensive course will **lay down the foundational knowledge you need to use Unreal**. 2. Dive into Blueprints: Explore the world of Blueprints, the visual scripting component of Unreal Engine. [This video course](https://youtu.be/W0brCeJNMqk?si=zy4t4t1l6FMIzbpz) will familiarize you with this essential tool. Armed with the basics, **you're now prepared to play with Learning Agents**: 3. Get the Big Picture of Learning Agents by [reading this informative overview](https://dev.epicgames.com/community/learning/tutorials/8OWY/unreal-engine-learning-agents-introduction). 4. [Teach a Car to Drive using Reinforcement Learning in Learning Agents](https://dev.epicgames.com/community/learning/tutorials/qj2O/unreal-engine-learning-to-drive). 5. [Check Imitation Learning with the Unreal Engine 5.3 Learning Agents Plugin](https://www.youtube.com/watch?v=NwYUNlFvajQ) ## Case 2: I'm familiar with Unreal For those already acquainted with Unreal Engine, you can jump straight into Learning Agents with these two tutorials: 1. Get the Big Picture of Learning Agents by [reading this informative overview](https://dev.epicgames.com/community/learning/tutorials/8OWY/unreal-engine-learning-agents-introduction). 2. [Teach a Car to Drive using Reinforcement Learning in Learning Agents](https://dev.epicgames.com/community/learning/tutorials/qj2O/unreal-engine-learning-to-drive). . 3. [Check Imitation Learning with the Unreal Engine 5.3 Learning Agents Plugin](https://www.youtube.com/watch?v=NwYUNlFvajQ)
deep-rl-class/units/en/unitbonus3/learning-agents.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/learning-agents.mdx", "repo_id": "deep-rl-class", "token_count": 804 }
100
<!--- Copyright 2022 - The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/en/imgs/diffusers_library.jpg" width="400"/> <br> <p> <p align="center"> <a href="https://github.com/huggingface/diffusers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue"></a> <a href="https://github.com/huggingface/diffusers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg"></a> <a href="https://pepy.tech/project/diffusers"><img alt="GitHub release" src="https://static.pepy.tech/badge/diffusers/month"></a> <a href="CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg"></a> <a href="https://twitter.com/diffuserslib"><img alt="X account" src="https://img.shields.io/twitter/url/https/twitter.com/diffuserslib.svg?style=social&label=Follow%20%40diffuserslib"></a> </p> 🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction). 🤗 Diffusers offers three core components: - State-of-the-art [diffusion pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) that can be run in inference with just a few lines of code. - Interchangeable noise [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview) for different diffusion speeds and output quality. - Pretrained [models](https://huggingface.co/docs/diffusers/api/models/overview) that can be used as building blocks, and combined with schedulers, for creating your own end-to-end diffusion systems. ## Installation We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation. ### PyTorch With `pip` (official package): ```bash pip install --upgrade diffusers[torch] ``` With `conda` (maintained by the community): ```sh conda install -c conda-forge diffusers ``` ### Flax With `pip` (official package): ```bash pip install --upgrade diffusers[flax] ``` ### Apple Silicon (M1/M2) support Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide. ## Quickstart Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 30,000+ checkpoints): ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipeline.to("cuda") pipeline("An image of a squirrel in Picasso style").images[0] ``` You can also dig into the models and schedulers toolbox to build your own diffusion system: ```python from diffusers import DDPMScheduler, UNet2DModel from PIL import Image import torch scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda") scheduler.set_timesteps(50) sample_size = model.config.sample_size noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") input = noise for t in scheduler.timesteps: with torch.no_grad(): noisy_residual = model(input, t).sample prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample input = prev_noisy_sample image = (input / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy()[0] image = Image.fromarray((image * 255).round().astype("uint8")) image ``` Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to launch your diffusion journey today! ## How to navigate the documentation | **Documentation** | **What can I learn?** | |---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. | | [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading_overview) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. | | [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/pipeline_overview) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. | | [Optimization](https://huggingface.co/docs/diffusers/optimization/opt_overview) | Guides for how to optimize your diffusion model to run faster and consume less memory. | | [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. | ## Contribution We ❤️ contributions from the open-source community! If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md). You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library. - See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute - See [New model/pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) to contribute exciting new diffusion models / diffusion pipelines - See [New scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or just hang out ☕. ## Popular Tasks & Pipelines <table> <tr> <th>Task</th> <th>Pipeline</th> <th>🤗 Hub</th> </tr> <tr style="border-top: 2px solid black"> <td>Unconditional Image Generation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/ddpm"> DDPM </a></td> <td><a href="https://huggingface.co/google/ddpm-ema-church-256"> google/ddpm-ema-church-256 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img">Stable Diffusion Text-to-Image</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/unclip">unCLIP</a></td> <td><a href="https://huggingface.co/kakaobrain/karlo-v1-alpha"> kakaobrain/karlo-v1-alpha </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/deepfloyd_if">DeepFloyd IF</a></td> <td><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0"> DeepFloyd/IF-I-XL-v1.0 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/kandinsky">Kandinsky</a></td> <td><a href="https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder"> kandinsky-community/kandinsky-2-2-decoder </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/controlnet">ControlNet</a></td> <td><a href="https://huggingface.co/lllyasviel/sd-controlnet-canny"> lllyasviel/sd-controlnet-canny </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/pix2pix">InstructPix2Pix</a></td> <td><a href="https://huggingface.co/timbrooks/instruct-pix2pix"> timbrooks/instruct-pix2pix </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/img2img">Stable Diffusion Image-to-Image</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image Inpainting</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Image Variation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/image_variation">Stable Diffusion Image Variation</a></td> <td><a href="https://huggingface.co/lambdalabs/sd-image-variations-diffusers"> lambdalabs/sd-image-variations-diffusers </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/upscale">Stable Diffusion Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler"> stabilityai/stable-diffusion-x4-upscaler </a></td> </tr> <tr> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_upscale">Stable Diffusion Latent Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler"> stabilityai/sd-x2-latent-upscaler </a></td> </tr> </table> ## Popular libraries using 🧨 Diffusers - https://github.com/microsoft/TaskMatrix - https://github.com/invoke-ai/InvokeAI - https://github.com/InstantID/InstantID - https://github.com/apple/ml-stable-diffusion - https://github.com/Sanster/lama-cleaner - https://github.com/IDEA-Research/Grounded-Segment-Anything - https://github.com/ashawkey/stable-dreamfusion - https://github.com/deep-floyd/IF - https://github.com/bentoml/BentoML - https://github.com/bmaltais/kohya_ss - +14,000 other amazing GitHub repositories 💪 Thank you for using us ❤️. ## Credits This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today: - @CompVis' latent diffusion models library, available [here](https://github.com/CompVis/latent-diffusion) - @hojonathanho original DDPM implementation, available [here](https://github.com/hojonathanho/diffusion) as well as the extremely useful translation into PyTorch by @pesser, available [here](https://github.com/pesser/pytorch_diffusion) - @ermongroup's DDIM implementation, available [here](https://github.com/ermongroup/ddim) - @yang-song's Score-VE and Score-VP implementations, available [here](https://github.com/yang-song/score_sde_pytorch) We also want to thank @heejkoo for the very helpful overview of papers, code and resources on diffusion models, available [here](https://github.com/heejkoo/Awesome-Diffusion-Models) as well as @crowsonkb and @rromb for useful discussions and insights. ## Citation ```bibtex @misc{von-platen-etal-2022-diffusers, author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Dhruv Nair and Sayak Paul and William Berman and Yiyi Xu and Steven Liu and Thomas Wolf}, title = {Diffusers: State-of-the-art diffusion models}, year = {2022}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/diffusers}} } ```
diffusers/README.md/0
{ "file_path": "diffusers/README.md", "repo_id": "diffusers", "token_count": 5361 }
101
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # IP-Adapter [IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder. <Tip> Learn how to load an IP-Adapter checkpoint and image in the IP-Adapter [loading](../../using-diffusers/loading_adapters#ip-adapter) guide, and you can see how to use it in the [usage](../../using-diffusers/ip_adapter) guide. </Tip> ## IPAdapterMixin [[autodoc]] loaders.ip_adapter.IPAdapterMixin ## IPAdapterMaskProcessor [[autodoc]] image_processor.IPAdapterMaskProcessor
diffusers/docs/source/en/api/loaders/ip_adapter.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/ip_adapter.md", "repo_id": "diffusers", "token_count": 339 }
102
<!--Copyright 2024 The HuggingFace Team and The InstantX Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # FluxControlNetModel FluxControlNetModel is an implementation of ControlNet for Flux.1. The ControlNet model was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, Maneesh Agrawala. It provides a greater degree of control over text-to-image generation by conditioning the model on additional inputs such as edge maps, depth maps, segmentation maps, and keypoints for pose detection. The abstract from the paper is: *We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.* ## Loading from the original format By default the [`FluxControlNetModel`] should be loaded with [`~ModelMixin.from_pretrained`]. ```py from diffusers import FluxControlNetPipeline from diffusers.models import FluxControlNetModel, FluxMultiControlNetModel controlnet = FluxControlNetModel.from_pretrained("InstantX/FLUX.1-dev-Controlnet-Canny") pipe = FluxControlNetPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", controlnet=controlnet) controlnet = FluxControlNetModel.from_pretrained("InstantX/FLUX.1-dev-Controlnet-Canny") controlnet = FluxMultiControlNetModel([controlnet]) pipe = FluxControlNetPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", controlnet=controlnet) ``` ## FluxControlNetModel [[autodoc]] FluxControlNetModel ## FluxControlNetOutput [[autodoc]] models.controlnet_flux.FluxControlNetOutput
diffusers/docs/source/en/api/models/controlnet_flux.md/0
{ "file_path": "diffusers/docs/source/en/api/models/controlnet_flux.md", "repo_id": "diffusers", "token_count": 740 }
103
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoPipeline The `AutoPipeline` is designed to make it easy to load a checkpoint for a task without needing to know the specific pipeline class. Based on the task, the `AutoPipeline` automatically retrieves the correct pipeline class from the checkpoint `model_index.json` file. > [!TIP] > Check out the [AutoPipeline](../../tutorials/autopipeline) tutorial to learn how to use this API! ## AutoPipelineForText2Image [[autodoc]] AutoPipelineForText2Image - all - from_pretrained - from_pipe ## AutoPipelineForImage2Image [[autodoc]] AutoPipelineForImage2Image - all - from_pretrained - from_pipe ## AutoPipelineForInpainting [[autodoc]] AutoPipelineForInpainting - all - from_pretrained - from_pipe
diffusers/docs/source/en/api/pipelines/auto_pipeline.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/auto_pipeline.md", "repo_id": "diffusers", "token_count": 378 }
104
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DiT [Scalable Diffusion Models with Transformers](https://huggingface.co/papers/2212.09748) (DiT) is by William Peebles and Saining Xie. The abstract from the paper is: *We explore a new class of diffusion models based on the transformer architecture. We train latent diffusion models of images, replacing the commonly-used U-Net backbone with a transformer that operates on latent patches. We analyze the scalability of our Diffusion Transformers (DiTs) through the lens of forward pass complexity as measured by Gflops. We find that DiTs with higher Gflops -- through increased transformer depth/width or increased number of input tokens -- consistently have lower FID. In addition to possessing good scalability properties, our largest DiT-XL/2 models outperform all prior diffusion models on the class-conditional ImageNet 512x512 and 256x256 benchmarks, achieving a state-of-the-art FID of 2.27 on the latter.* The original codebase can be found at [facebookresearch/dit](https://github.com/facebookresearch/dit). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## DiTPipeline [[autodoc]] DiTPipeline - all - __call__ ## ImagePipelineOutput [[autodoc]] pipelines.ImagePipelineOutput
diffusers/docs/source/en/api/pipelines/dit.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/dit.md", "repo_id": "diffusers", "token_count": 532 }
105
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text2Video-Zero [Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators](https://huggingface.co/papers/2303.13439) is by Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, [Zhangyang Wang](https://www.ece.utexas.edu/people/faculty/atlas-wang), Shant Navasardyan, [Humphrey Shi](https://www.humphreyshi.com). Text2Video-Zero enables zero-shot video generation using either: 1. A textual prompt 2. A prompt combined with guidance from poses or edges 3. Video Instruct-Pix2Pix (instruction-guided video editing) Results are temporally consistent and closely follow the guidance and textual prompts. ![teaser-img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/t2v_zero_teaser.png) The abstract from the paper is: *Recent text-to-video generation approaches rely on computationally heavy training and require large-scale video datasets. In this paper, we introduce a new task of zero-shot text-to-video generation and propose a low-cost approach (without any training or optimization) by leveraging the power of existing text-to-image synthesis methods (e.g., Stable Diffusion), making them suitable for the video domain. Our key modifications include (i) enriching the latent codes of the generated frames with motion dynamics to keep the global scene and the background time consistent; and (ii) reprogramming frame-level self-attention using a new cross-frame attention of each frame on the first frame, to preserve the context, appearance, and identity of the foreground object. Experiments show that this leads to low overhead, yet high-quality and remarkably consistent video generation. Moreover, our approach is not limited to text-to-video synthesis but is also applicable to other tasks such as conditional and content-specialized video generation, and Video Instruct-Pix2Pix, i.e., instruction-guided video editing. As experiments show, our method performs comparably or sometimes better than recent approaches, despite not being trained on additional video data.* You can find additional information about Text2Video-Zero on the [project page](https://text2video-zero.github.io/), [paper](https://arxiv.org/abs/2303.13439), and [original codebase](https://github.com/Picsart-AI-Research/Text2Video-Zero). ## Usage example ### Text-To-Video To generate a video from prompt, run the following Python code: ```python import torch from diffusers import TextToVideoZeroPipeline model_id = "runwayml/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A panda is playing guitar on times square" result = pipe(prompt=prompt).images result = [(r * 255).astype("uint8") for r in result] imageio.mimsave("video.mp4", result, fps=4) ``` You can change these parameters in the pipeline call: * Motion field strength (see the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1): * `motion_field_strength_x` and `motion_field_strength_y`. Default: `motion_field_strength_x=12`, `motion_field_strength_y=12` * `T` and `T'` (see the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1) * `t0` and `t1` in the range `{0, ..., num_inference_steps}`. Default: `t0=45`, `t1=48` * Video length: * `video_length`, the number of frames video_length to be generated. Default: `video_length=8` We can also generate longer videos by doing the processing in a chunk-by-chunk manner: ```python import torch from diffusers import TextToVideoZeroPipeline import numpy as np model_id = "runwayml/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") seed = 0 video_length = 24 #24 ÷ 4fps = 6 seconds chunk_size = 8 prompt = "A panda is playing guitar on times square" # Generate the video chunk-by-chunk result = [] chunk_ids = np.arange(0, video_length, chunk_size - 1) generator = torch.Generator(device="cuda") for i in range(len(chunk_ids)): print(f"Processing chunk {i + 1} / {len(chunk_ids)}") ch_start = chunk_ids[i] ch_end = video_length if i == len(chunk_ids) - 1 else chunk_ids[i + 1] # Attach the first frame for Cross Frame Attention frame_ids = [0] + list(range(ch_start, ch_end)) # Fix the seed for the temporal consistency generator.manual_seed(seed) output = pipe(prompt=prompt, video_length=len(frame_ids), generator=generator, frame_ids=frame_ids) result.append(output.images[1:]) # Concatenate chunks and save result = np.concatenate(result) result = [(r * 255).astype("uint8") for r in result] imageio.mimsave("video.mp4", result, fps=4) ``` - #### SDXL Support In order to use the SDXL model when generating a video from prompt, use the `TextToVideoZeroSDXLPipeline` pipeline: ```python import torch from diffusers import TextToVideoZeroSDXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = TextToVideoZeroSDXLPipeline.from_pretrained( model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") ``` ### Text-To-Video with Pose Control To generate a video from prompt with additional pose control 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/poses_skeleton_gifs/dance1_corr.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video containing extracted pose images ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 pose_images = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` To extract pose from actual video, read [ControlNet documentation](controlnet). 3. Run `StableDiffusionControlNetPipeline` with our custom attention processor ```python import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor model_id = "runwayml/stable-diffusion-v1-5" controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to("cuda") # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) prompt = "Darth Vader dancing in a desert" result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` - #### SDXL Support Since our attention processor also works with SDXL, it can be utilized to generate a video from prompt using ControlNet models powered by SDXL: ```python import torch from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor controlnet_model_id = 'thibaud/controlnet-openpose-sdxl-1.0' model_id = 'stabilityai/stable-diffusion-xl-base-1.0' controlnet = ControlNetModel.from_pretrained(controlnet_model_id, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to('cuda') # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 128, 128), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) prompt = "Darth Vader dancing in a desert" result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` ### Text-To-Video with Edge Control To generate a video from prompt with additional Canny edge control, follow the same steps described above for pose-guided generation using [Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny). ### Video Instruct-Pix2Pix To perform text-guided video editing (with [InstructPix2Pix](pix2pix)): 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/pix2pix video/camel.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video from path ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 video = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` 3. Run `StableDiffusionInstructPix2PixPipeline` with our custom attention processor ```python import torch from diffusers import StableDiffusionInstructPix2PixPipeline from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor model_id = "timbrooks/instruct-pix2pix" pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=3)) prompt = "make it Van Gogh Starry Night style" result = pipe(prompt=[prompt] * len(video), image=video).images imageio.mimsave("edited_video.mp4", result, fps=4) ``` ### DreamBooth specialization Methods **Text-To-Video**, **Text-To-Video with Pose Control** and **Text-To-Video with Edge Control** can run with custom [DreamBooth](../../training/dreambooth) models, as shown below for [Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny) and [Avatar style DreamBooth](https://huggingface.co/PAIR/text2video-zero-controlnet-canny-avatar) model: 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/canny_videos_mp4/girl_turning.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video from path ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 canny_edges = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` 3. Run `StableDiffusionControlNetPipeline` with custom trained DreamBooth model ```python import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor # set model id to custom model model_id = "PAIR/text2video-zero-controlnet-canny-avatar" controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to("cuda") # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(canny_edges), 1, 1, 1) prompt = "oil painting of a beautiful girl avatar style" result = pipe(prompt=[prompt] * len(canny_edges), image=canny_edges, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` You can filter out some available DreamBooth-trained models with [this link](https://huggingface.co/models?search=dreambooth). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## TextToVideoZeroPipeline [[autodoc]] TextToVideoZeroPipeline - all - __call__ ## TextToVideoZeroSDXLPipeline [[autodoc]] TextToVideoZeroSDXLPipeline - all - __call__ ## TextToVideoPipelineOutput [[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput
diffusers/docs/source/en/api/pipelines/text_to_video_zero.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/text_to_video_zero.md", "repo_id": "diffusers", "token_count": 4474 }
106
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # EDMDPMSolverMultistepScheduler `EDMDPMSolverMultistepScheduler` is a [Karras formulation](https://huggingface.co/papers/2206.00364) of `DPMSolverMultistepScheduler`, a multistep scheduler from [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu. DPMSolver (and the improved version DPMSolver++) is a fast dedicated high-order solver for diffusion ODEs with convergence order guarantee. Empirically, DPMSolver sampling with only 20 steps can generate high-quality samples, and it can generate quite good samples even in 10 steps. ## EDMDPMSolverMultistepScheduler [[autodoc]] EDMDPMSolverMultistepScheduler ## SchedulerOutput [[autodoc]] schedulers.scheduling_utils.SchedulerOutput
diffusers/docs/source/en/api/schedulers/edm_multistep_dpm_solver.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/edm_multistep_dpm_solver.md", "repo_id": "diffusers", "token_count": 450 }
107
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to run Stable Diffusion with Core ML [Core ML](https://developer.apple.com/documentation/coreml) is the model format and machine learning library supported by Apple frameworks. If you are interested in running Stable Diffusion models inside your macOS or iOS/iPadOS apps, this guide will show you how to convert existing PyTorch checkpoints into the Core ML format and use them for inference with Python or Swift. Core ML models can leverage all the compute engines available in Apple devices: the CPU, the GPU, and the Apple Neural Engine (or ANE, a tensor-optimized accelerator available in Apple Silicon Macs and modern iPhones/iPads). Depending on the model and the device it's running on, Core ML can mix and match compute engines too, so some portions of the model may run on the CPU while others run on GPU, for example. <Tip> You can also run the `diffusers` Python codebase on Apple Silicon Macs using the `mps` accelerator built into PyTorch. This approach is explained in depth in [the mps guide](mps), but it is not compatible with native apps. </Tip> ## Stable Diffusion Core ML Checkpoints Stable Diffusion weights (or checkpoints) are stored in the PyTorch format, so you need to convert them to the Core ML format before we can use them inside native apps. Thankfully, Apple engineers developed [a conversion tool](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml) based on `diffusers` to convert the PyTorch checkpoints to Core ML. Before you convert a model, though, take a moment to explore the Hugging Face Hub – chances are the model you're interested in is already available in Core ML format: - the [Apple](https://huggingface.co/apple) organization includes Stable Diffusion versions 1.4, 1.5, 2.0 base, and 2.1 base - [coreml community](https://huggingface.co/coreml-community) includes custom finetuned models - use this [filter](https://huggingface.co/models?pipeline_tag=text-to-image&library=coreml&p=2&sort=likes) to return all available Core ML checkpoints If you can't find the model you're interested in, we recommend you follow the instructions for [Converting Models to Core ML](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml) by Apple. ## Selecting the Core ML Variant to Use Stable Diffusion models can be converted to different Core ML variants intended for different purposes: - The type of attention blocks used. The attention operation is used to "pay attention" to the relationship between different areas in the image representations and to understand how the image and text representations are related. Attention is compute- and memory-intensive, so different implementations exist that consider the hardware characteristics of different devices. For Core ML Stable Diffusion models, there are two attention variants: * `split_einsum` ([introduced by Apple](https://machinelearning.apple.com/research/neural-engine-transformers)) is optimized for ANE devices, which is available in modern iPhones, iPads and M-series computers. * The "original" attention (the base implementation used in `diffusers`) is only compatible with CPU/GPU and not ANE. It can be *faster* to run your model on CPU + GPU using `original` attention than ANE. See [this performance benchmark](https://huggingface.co/blog/fast-mac-diffusers#performance-benchmarks) as well as some [additional measures provided by the community](https://github.com/huggingface/swift-coreml-diffusers/issues/31) for additional details. - The supported inference framework. * `packages` are suitable for Python inference. This can be used to test converted Core ML models before attempting to integrate them inside native apps, or if you want to explore Core ML performance but don't need to support native apps. For example, an application with a web UI could perfectly use a Python Core ML backend. * `compiled` models are required for Swift code. The `compiled` models in the Hub split the large UNet model weights into several files for compatibility with iOS and iPadOS devices. This corresponds to the [`--chunk-unet` conversion option](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml). If you want to support native apps, then you need to select the `compiled` variant. The official Core ML Stable Diffusion [models](https://huggingface.co/apple/coreml-stable-diffusion-v1-4/tree/main) include these variants, but the community ones may vary: ``` coreml-stable-diffusion-v1-4 ├── README.md ├── original │ ├── compiled │ └── packages └── split_einsum ├── compiled └── packages ``` You can download and use the variant you need as shown below. ## Core ML Inference in Python Install the following libraries to run Core ML inference in Python: ```bash pip install huggingface_hub pip install git+https://github.com/apple/ml-stable-diffusion ``` ### Download the Model Checkpoints To run inference in Python, use one of the versions stored in the `packages` folders because the `compiled` ones are only compatible with Swift. You may choose whether you want to use `original` or `split_einsum` attention. This is how you'd download the `original` attention variant from the Hub to a directory called `models`: ```Python from huggingface_hub import snapshot_download from pathlib import Path repo_id = "apple/coreml-stable-diffusion-v1-4" variant = "original/packages" model_path = Path("./models") / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) snapshot_download(repo_id, allow_patterns=f"{variant}/*", local_dir=model_path, local_dir_use_symlinks=False) print(f"Model downloaded at {model_path}") ``` ### Inference[[python-inference]] Once you have downloaded a snapshot of the model, you can test it using Apple's Python script. ```shell python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" -i models/coreml-stable-diffusion-v1-4_original_packages -o </path/to/output/image> --compute-unit CPU_AND_GPU --seed 93 ``` Pass the path of the downloaded checkpoint with `-i` flag to the script. `--compute-unit` indicates the hardware you want to allow for inference. It must be one of the following options: `ALL`, `CPU_AND_GPU`, `CPU_ONLY`, `CPU_AND_NE`. You may also provide an optional output path, and a seed for reproducibility. The inference script assumes you're using the original version of the Stable Diffusion model, `CompVis/stable-diffusion-v1-4`. If you use another model, you *have* to specify its Hub id in the inference command line, using the `--model-version` option. This works for models already supported and custom models you trained or fine-tuned yourself. For example, if you want to use [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5): ```shell python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" --compute-unit ALL -o output --seed 93 -i models/coreml-stable-diffusion-v1-5_original_packages --model-version runwayml/stable-diffusion-v1-5 ``` ## Core ML inference in Swift Running inference in Swift is slightly faster than in Python because the models are already compiled in the `mlmodelc` format. This is noticeable on app startup when the model is loaded but shouldn’t be noticeable if you run several generations afterward. ### Download To run inference in Swift on your Mac, you need one of the `compiled` checkpoint versions. We recommend you download them locally using Python code similar to the previous example, but with one of the `compiled` variants: ```Python from huggingface_hub import snapshot_download from pathlib import Path repo_id = "apple/coreml-stable-diffusion-v1-4" variant = "original/compiled" model_path = Path("./models") / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) snapshot_download(repo_id, allow_patterns=f"{variant}/*", local_dir=model_path, local_dir_use_symlinks=False) print(f"Model downloaded at {model_path}") ``` ### Inference[[swift-inference]] To run inference, please clone Apple's repo: ```bash git clone https://github.com/apple/ml-stable-diffusion cd ml-stable-diffusion ``` And then use Apple's command line tool, [Swift Package Manager](https://www.swift.org/package-manager/#): ```bash swift run StableDiffusionSample --resource-path models/coreml-stable-diffusion-v1-4_original_compiled --compute-units all "a photo of an astronaut riding a horse on mars" ``` You have to specify in `--resource-path` one of the checkpoints downloaded in the previous step, so please make sure it contains compiled Core ML bundles with the extension `.mlmodelc`. The `--compute-units` has to be one of these values: `all`, `cpuOnly`, `cpuAndGPU`, `cpuAndNeuralEngine`. For more details, please refer to the [instructions in Apple's repo](https://github.com/apple/ml-stable-diffusion). ## Supported Diffusers Features The Core ML models and inference code don't support many of the features, options, and flexibility of 🧨 Diffusers. These are some of the limitations to keep in mind: - Core ML models are only suitable for inference. They can't be used for training or fine-tuning. - Only two schedulers have been ported to Swift, the default one used by Stable Diffusion and `DPMSolverMultistepScheduler`, which we ported to Swift from our `diffusers` implementation. We recommend you use `DPMSolverMultistepScheduler`, since it produces the same quality in about half the steps. - Negative prompts, classifier-free guidance scale, and image-to-image tasks are available in the inference code. Advanced features such as depth guidance, ControlNet, and latent upscalers are not available yet. Apple's [conversion and inference repo](https://github.com/apple/ml-stable-diffusion) and our own [swift-coreml-diffusers](https://github.com/huggingface/swift-coreml-diffusers) repos are intended as technology demonstrators to enable other developers to build upon. If you feel strongly about any missing features, please feel free to open a feature request or, better yet, a contribution PR 🙂. ## Native Diffusers Swift app One easy way to run Stable Diffusion on your own Apple hardware is to use [our open-source Swift repo](https://github.com/huggingface/swift-coreml-diffusers), based on `diffusers` and Apple's conversion and inference repo. You can study the code, compile it with [Xcode](https://developer.apple.com/xcode/) and adapt it for your own needs. For your convenience, there's also a [standalone Mac app in the App Store](https://apps.apple.com/app/diffusers/id1666309574), so you can play with it without having to deal with the code or IDE. If you are a developer and have determined that Core ML is the best solution to build your Stable Diffusion app, then you can use the rest of this guide to get started with your project. We can't wait to see what you'll build 🙂.
diffusers/docs/source/en/optimization/coreml.md/0
{ "file_path": "diffusers/docs/source/en/optimization/coreml.md", "repo_id": "diffusers", "token_count": 3088 }
108
# Create a dataset for training There are many datasets on the [Hub](https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads) to train a model on, but if you can't find one you're interested in or want to use your own, you can create a dataset with the 🤗 [Datasets](hf.co/docs/datasets) library. The dataset structure depends on the task you want to train your model on. The most basic dataset structure is a directory of images for tasks like unconditional image generation. Another dataset structure may be a directory of images and a text file containing their corresponding text captions for tasks like text-to-image generation. This guide will show you two ways to create a dataset to finetune on: - provide a folder of images to the `--train_data_dir` argument - upload a dataset to the Hub and pass the dataset repository id to the `--dataset_name` argument <Tip> 💡 Learn more about how to create an image dataset for training in the [Create an image dataset](https://huggingface.co/docs/datasets/image_dataset) guide. </Tip> ## Provide a dataset as a folder For unconditional generation, you can provide your own dataset as a folder of images. The training script uses the [`ImageFolder`](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder) builder from 🤗 Datasets to automatically build a dataset from the folder. Your directory structure should look like: ```bash data_dir/xxx.png data_dir/xxy.png data_dir/[...]/xxz.png ``` Pass the path to the dataset directory to the `--train_data_dir` argument, and then you can start training: ```bash accelerate launch train_unconditional.py \ --train_data_dir <path-to-train-directory> \ <other-arguments> ``` ## Upload your data to the Hub <Tip> 💡 For more details and context about creating and uploading a dataset to the Hub, take a look at the [Image search with 🤗 Datasets](https://huggingface.co/blog/image-search-datasets) post. </Tip> Start by creating a dataset with the [`ImageFolder`](https://huggingface.co/docs/datasets/image_load#imagefolder) feature, which creates an `image` column containing the PIL-encoded images. You can use the `data_dir` or `data_files` parameters to specify the location of the dataset. The `data_files` parameter supports mapping specific files to dataset splits like `train` or `test`: ```python from datasets import load_dataset # example 1: local folder dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset( "imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip", ) # example 4: providing several splits dataset = load_dataset( "imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]} ) ``` Then use the [`~datasets.Dataset.push_to_hub`] method to upload the dataset to the Hub: ```python # assuming you have ran the huggingface-cli login command in a terminal dataset.push_to_hub("name_of_your_dataset") # if you want to push to a private repo, simply pass private=True: dataset.push_to_hub("name_of_your_dataset", private=True) ``` Now the dataset is available for training by passing the dataset name to the `--dataset_name` argument: ```bash accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5" \ --dataset_name="name_of_your_dataset" \ <other-arguments> ``` ## Next steps Now that you've created a dataset, you can plug it into the `train_data_dir` (if your dataset is local) or `dataset_name` (if your dataset is on the Hub) arguments of a training script. For your next steps, feel free to try and use your dataset to train a model for [unconditional generation](unconditional_training) or [text-to-image generation](text2image)!
diffusers/docs/source/en/training/create_dataset.md/0
{ "file_path": "diffusers/docs/source/en/training/create_dataset.md", "repo_id": "diffusers", "token_count": 1299 }
109
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoPipeline Diffusers provides many pipelines for basic tasks like generating images, videos, audio, and inpainting. On top of these, there are specialized pipelines for adapters and features like upscaling, super-resolution, and more. Different pipeline classes can even use the same checkpoint because they share the same pretrained model! With so many different pipelines, it can be overwhelming to know which pipeline class to use. The [AutoPipeline](../api/pipelines/auto_pipeline) class is designed to simplify the variety of pipelines in Diffusers. It is a generic *task-first* pipeline that lets you focus on a task ([`AutoPipelineForText2Image`], [`AutoPipelineForImage2Image`], and [`AutoPipelineForInpainting`]) without needing to know the specific pipeline class. The [AutoPipeline](../api/pipelines/auto_pipeline) automatically detects the correct pipeline class to use. For example, let's use the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint. Under the hood, [AutoPipeline](../api/pipelines/auto_pipeline): 1. Detects a `"stable-diffusion"` class from the [model_index.json](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0/blob/main/model_index.json) file. 2. Depending on the task you're interested in, it loads the [`StableDiffusionPipeline`], [`StableDiffusionImg2ImgPipeline`], or [`StableDiffusionInpaintPipeline`]. Any parameter (`strength`, `num_inference_steps`, etc.) you would pass to these specific pipelines can also be passed to the [AutoPipeline](../api/pipelines/auto_pipeline). <hfoptions id="autopipeline"> <hfoption id="text-to-image"> ```py from diffusers import AutoPipelineForText2Image import torch pipe_txt2img = AutoPipelineForText2Image.from_pretrained( "dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") prompt = "cinematic photo of Godzilla eating sushi with a cat in a izakaya, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(37) image = pipe_txt2img(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png"/> </div> </hfoption> <hfoption id="image-to-image"> ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image import torch pipe_img2img = AutoPipelineForImage2Image.from_pretrained( "dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png") prompt = "cinematic photo of Godzilla eating burgers with a cat in a fast food restaurant, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(53) image = pipe_img2img(prompt, image=init_image, generator=generator).images[0] image ``` Notice how the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint is used for both text-to-image and image-to-image tasks? To save memory and avoid loading the checkpoint twice, use the [`~DiffusionPipeline.from_pipe`] method. ```py pipe_img2img = AutoPipelineForImage2Image.from_pipe(pipe_txt2img).to("cuda") image = pipeline(prompt, image=init_image, generator=generator).images[0] image ``` You can learn more about the [`~DiffusionPipeline.from_pipe`] method in the [Reuse a pipeline](../using-diffusers/loading#reuse-a-pipeline) guide. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png"/> </div> </hfoption> <hfoption id="inpainting"> ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch pipeline = AutoPipelineForInpainting.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-mask.png") prompt = "cinematic photo of a owl, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(38) image = pipeline(prompt, image=init_image, mask_image=mask_image, generator=generator, strength=0.4).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-inpaint.png"/> </div> </hfoption> </hfoptions> ## Unsupported checkpoints The [AutoPipeline](../api/pipelines/auto_pipeline) supports [Stable Diffusion](../api/pipelines/stable_diffusion/overview), [Stable Diffusion XL](../api/pipelines/stable_diffusion/stable_diffusion_xl), [ControlNet](../api/pipelines/controlnet), [Kandinsky 2.1](../api/pipelines/kandinsky.md), [Kandinsky 2.2](../api/pipelines/kandinsky_v22), and [DeepFloyd IF](../api/pipelines/deepfloyd_if) checkpoints. If you try to load an unsupported checkpoint, you'll get an error. ```py from diffusers import AutoPipelineForImage2Image import torch pipeline = AutoPipelineForImage2Image.from_pretrained( "openai/shap-e-img2img", torch_dtype=torch.float16, use_safetensors=True ) "ValueError: AutoPipeline can't find a pipeline linked to ShapEImg2ImgPipeline for None" ```
diffusers/docs/source/en/tutorials/autopipeline.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/autopipeline.md", "repo_id": "diffusers", "token_count": 2061 }
110
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Trajectory Consistency Distillation-LoRA Trajectory Consistency Distillation (TCD) enables a model to generate higher quality and more detailed images with fewer steps. Moreover, owing to the effective error mitigation during the distillation process, TCD demonstrates superior performance even under conditions of large inference steps. The major advantages of TCD are: - Better than Teacher: TCD demonstrates superior generative quality at both small and large inference steps and exceeds the performance of [DPM-Solver++(2S)](../../api/schedulers/multistep_dpm_solver) with Stable Diffusion XL (SDXL). There is no additional discriminator or LPIPS supervision included during TCD training. - Flexible Inference Steps: The inference steps for TCD sampling can be freely adjusted without adversely affecting the image quality. - Freely change detail level: During inference, the level of detail in the image can be adjusted with a single hyperparameter, *gamma*. > [!TIP] > For more technical details of TCD, please refer to the [paper](https://arxiv.org/abs/2402.19159) or official [project page](https://mhh0318.github.io/tcd/)). For large models like SDXL, TCD is trained with [LoRA](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) to reduce memory usage. This is also useful because you can reuse LoRAs between different finetuned models, as long as they share the same base model, without further training. This guide will show you how to perform inference with TCD-LoRAs for a variety of tasks like text-to-image and inpainting, as well as how you can easily combine TCD-LoRAs with other adapters. Choose one of the supported base model and it's corresponding TCD-LoRA checkpoint from the table below to get started. | Base model | TCD-LoRA checkpoint | |-------------------------------------------------------------------------------------------------|----------------------------------------------------------------| | [stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) | [TCD-SD15](https://huggingface.co/h1t/TCD-SD15-LoRA) | | [stable-diffusion-2-1-base](https://huggingface.co/stabilityai/stable-diffusion-2-1-base) | [TCD-SD21-base](https://huggingface.co/h1t/TCD-SD21-base-LoRA) | | [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) | [TCD-SDXL](https://huggingface.co/h1t/TCD-SDXL-LoRA) | Make sure you have [PEFT](https://github.com/huggingface/peft) installed for better LoRA support. ```bash pip install -U peft ``` ## General tasks In this guide, let's use the [`StableDiffusionXLPipeline`] and the [`TCDScheduler`]. Use the [`~StableDiffusionPipeline.load_lora_weights`] method to load the SDXL-compatible TCD-LoRA weights. A few tips to keep in mind for TCD-LoRA inference are to: - Keep the `num_inference_steps` between 4 and 50 - Set `eta` (used to control stochasticity at each step) between 0 and 1. You should use a higher `eta` when increasing the number of inference steps, but the downside is that a larger `eta` in [`TCDScheduler`] leads to blurrier images. A value of 0.3 is recommended to produce good results. <hfoptions id="tasks"> <hfoption id="text-to-image"> ```python import torch from diffusers import StableDiffusionXLPipeline, TCDScheduler device = "cuda" base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" tcd_lora_id = "h1t/TCD-SDXL-LoRA" pipe = StableDiffusionXLPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device) pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights(tcd_lora_id) pipe.fuse_lora() prompt = "Painting of the orange cat Otto von Garfield, Count of Bismarck-Schönhausen, Duke of Lauenburg, Minister-President of Prussia. Depicted wearing a Prussian Pickelhaube and eating his favorite meal - lasagna." image = pipe( prompt=prompt, num_inference_steps=4, guidance_scale=0, eta=0.3, generator=torch.Generator(device=device).manual_seed(0), ).images[0] ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/demo_image.png) </hfoption> <hfoption id="inpainting"> ```python import torch from diffusers import AutoPipelineForInpainting, TCDScheduler from diffusers.utils import load_image, make_image_grid device = "cuda" base_model_id = "diffusers/stable-diffusion-xl-1.0-inpainting-0.1" tcd_lora_id = "h1t/TCD-SDXL-LoRA" pipe = AutoPipelineForInpainting.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device) pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights(tcd_lora_id) pipe.fuse_lora() img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).resize((1024, 1024)) mask_image = load_image(mask_url).resize((1024, 1024)) prompt = "a tiger sitting on a park bench" image = pipe( prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=8, guidance_scale=0, eta=0.3, strength=0.99, # make sure to use `strength` below 1.0 generator=torch.Generator(device=device).manual_seed(0), ).images[0] grid_image = make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/inpainting_tcd.png) </hfoption> </hfoptions> ## Community models TCD-LoRA also works with many community finetuned models and plugins. For example, load the [animagine-xl-3.0](https://huggingface.co/cagliostrolab/animagine-xl-3.0) checkpoint which is a community finetuned version of SDXL for generating anime images. ```python import torch from diffusers import StableDiffusionXLPipeline, TCDScheduler device = "cuda" base_model_id = "cagliostrolab/animagine-xl-3.0" tcd_lora_id = "h1t/TCD-SDXL-LoRA" pipe = StableDiffusionXLPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device) pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights(tcd_lora_id) pipe.fuse_lora() prompt = "A man, clad in a meticulously tailored military uniform, stands with unwavering resolve. The uniform boasts intricate details, and his eyes gleam with determination. Strands of vibrant, windswept hair peek out from beneath the brim of his cap." image = pipe( prompt=prompt, num_inference_steps=8, guidance_scale=0, eta=0.3, generator=torch.Generator(device=device).manual_seed(0), ).images[0] ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/animagine_xl.png) TCD-LoRA also supports other LoRAs trained on different styles. For example, let's load the [TheLastBen/Papercut_SDXL](https://huggingface.co/TheLastBen/Papercut_SDXL) LoRA and fuse it with the TCD-LoRA with the [`~loaders.UNet2DConditionLoadersMixin.set_adapters`] method. > [!TIP] > Check out the [Merge LoRAs](merge_loras) guide to learn more about efficient merging methods. ```python import torch from diffusers import StableDiffusionXLPipeline from scheduling_tcd import TCDScheduler device = "cuda" base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" tcd_lora_id = "h1t/TCD-SDXL-LoRA" styled_lora_id = "TheLastBen/Papercut_SDXL" pipe = StableDiffusionXLPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to(device) pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights(tcd_lora_id, adapter_name="tcd") pipe.load_lora_weights(styled_lora_id, adapter_name="style") pipe.set_adapters(["tcd", "style"], adapter_weights=[1.0, 1.0]) prompt = "papercut of a winter mountain, snow" image = pipe( prompt=prompt, num_inference_steps=4, guidance_scale=0, eta=0.3, generator=torch.Generator(device=device).manual_seed(0), ).images[0] ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/styled_lora.png) ## Adapters TCD-LoRA is very versatile, and it can be combined with other adapter types like ControlNets, IP-Adapter, and AnimateDiff. <hfoptions id="adapters"> <hfoption id="ControlNet"> ### Depth ControlNet ```python import torch import numpy as np from PIL import Image from transformers import DPTImageProcessor, DPTForDepthEstimation from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline from diffusers.utils import load_image, make_image_grid from scheduling_tcd import TCDScheduler device = "cuda" depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(device) feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") def get_depth_map(image): image = feature_extractor(images=image, return_tensors="pt").pixel_values.to(device) with torch.no_grad(), torch.autocast(device): depth_map = depth_estimator(image).predicted_depth depth_map = torch.nn.functional.interpolate( depth_map.unsqueeze(1), size=(1024, 1024), mode="bicubic", align_corners=False, ) depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) depth_map = (depth_map - depth_min) / (depth_max - depth_min) image = torch.cat([depth_map] * 3, dim=1) image = image.permute(0, 2, 3, 1).cpu().numpy()[0] image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) return image base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" controlnet_id = "diffusers/controlnet-depth-sdxl-1.0" tcd_lora_id = "h1t/TCD-SDXL-LoRA" controlnet = ControlNetModel.from_pretrained( controlnet_id, torch_dtype=torch.float16, variant="fp16", ) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( base_model_id, controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", ) pipe.enable_model_cpu_offload() pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights(tcd_lora_id) pipe.fuse_lora() prompt = "stormtrooper lecture, photorealistic" image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-depth/resolve/main/images/stormtrooper.png") depth_image = get_depth_map(image) controlnet_conditioning_scale = 0.5 # recommended for good generalization image = pipe( prompt, image=depth_image, num_inference_steps=4, guidance_scale=0, eta=0.3, controlnet_conditioning_scale=controlnet_conditioning_scale, generator=torch.Generator(device=device).manual_seed(0), ).images[0] grid_image = make_image_grid([depth_image, image], rows=1, cols=2) ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/controlnet_depth_tcd.png) ### Canny ControlNet ```python import torch from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline from diffusers.utils import load_image, make_image_grid from scheduling_tcd import TCDScheduler device = "cuda" base_model_id = "stabilityai/stable-diffusion-xl-base-1.0" controlnet_id = "diffusers/controlnet-canny-sdxl-1.0" tcd_lora_id = "h1t/TCD-SDXL-LoRA" controlnet = ControlNetModel.from_pretrained( controlnet_id, torch_dtype=torch.float16, variant="fp16", ) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( base_model_id, controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", ) pipe.enable_model_cpu_offload() pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights(tcd_lora_id) pipe.fuse_lora() prompt = "ultrarealistic shot of a furry blue bird" canny_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png") controlnet_conditioning_scale = 0.5 # recommended for good generalization image = pipe( prompt, image=canny_image, num_inference_steps=4, guidance_scale=0, eta=0.3, controlnet_conditioning_scale=controlnet_conditioning_scale, generator=torch.Generator(device=device).manual_seed(0), ).images[0] grid_image = make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/controlnet_canny_tcd.png) <Tip> The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one. </Tip> </hfoption> <hfoption id="IP-Adapter"> This example shows how to use the TCD-LoRA with the [IP-Adapter](https://github.com/tencent-ailab/IP-Adapter/tree/main) and SDXL. ```python import torch from diffusers import StableDiffusionXLPipeline from diffusers.utils import load_image, make_image_grid from ip_adapter import IPAdapterXL from scheduling_tcd import TCDScheduler device = "cuda" base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" image_encoder_path = "sdxl_models/image_encoder" ip_ckpt = "sdxl_models/ip-adapter_sdxl.bin" tcd_lora_id = "h1t/TCD-SDXL-LoRA" pipe = StableDiffusionXLPipeline.from_pretrained( base_model_path, torch_dtype=torch.float16, variant="fp16" ) pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights(tcd_lora_id) pipe.fuse_lora() ip_model = IPAdapterXL(pipe, image_encoder_path, ip_ckpt, device) ref_image = load_image("https://raw.githubusercontent.com/tencent-ailab/IP-Adapter/main/assets/images/woman.png").resize((512, 512)) prompt = "best quality, high quality, wearing sunglasses" image = ip_model.generate( pil_image=ref_image, prompt=prompt, scale=0.5, num_samples=1, num_inference_steps=4, guidance_scale=0, eta=0.3, seed=0, )[0] grid_image = make_image_grid([ref_image, image], rows=1, cols=2) ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/ip_adapter.png) </hfoption> <hfoption id="AnimateDiff"> [`AnimateDiff`] allows animating images using Stable Diffusion models. TCD-LoRA can substantially accelerate the process without degrading image quality. The quality of animation with TCD-LoRA and AnimateDiff has a more lucid outcome. ```python import torch from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler from scheduling_tcd import TCDScheduler from diffusers.utils import export_to_gif adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5") pipe = AnimateDiffPipeline.from_pretrained( "frankjoshua/toonyou_beta6", motion_adapter=adapter, ).to("cuda") # set TCDScheduler pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config) # load TCD LoRA pipe.load_lora_weights("h1t/TCD-SD15-LoRA", adapter_name="tcd") pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-in", weight_name="diffusion_pytorch_model.safetensors", adapter_name="motion-lora") pipe.set_adapters(["tcd", "motion-lora"], adapter_weights=[1.0, 1.2]) prompt = "best quality, masterpiece, 1girl, looking at viewer, blurry background, upper body, contemporary, dress" generator = torch.manual_seed(0) frames = pipe( prompt=prompt, num_inference_steps=5, guidance_scale=0, cross_attention_kwargs={"scale": 1}, num_frames=24, eta=0.3, generator=generator ).frames[0] export_to_gif(frames, "animation.gif") ``` ![](https://github.com/jabir-zheng/TCD/raw/main/assets/animation_example.gif) </hfoption> </hfoptions>
diffusers/docs/source/en/using-diffusers/inference_with_tcd_lora.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/inference_with_tcd_lora.md", "repo_id": "diffusers", "token_count": 6005 }
111
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Diffusion XL Turbo [[open-in-colab]] SDXL Turbo is an adversarial time-distilled [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) (SDXL) model capable of running inference in as little as 1 step. This guide will show you how to use SDXL-Turbo for text-to-image and image-to-image. Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate ``` ## Load model checkpoints Model weights may be stored in separate subfolders on the Hub or locally, in which case, you should use the [`~StableDiffusionXLPipeline.from_pretrained`] method: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") pipeline = pipeline.to("cuda") ``` You can also use the [`~StableDiffusionXLPipeline.from_single_file`] method to load a model checkpoint stored in a single file format (`.ckpt` or `.safetensors`) from the Hub or locally. For this loading method, you need to set `timestep_spacing="trailing"` (feel free to experiment with the other scheduler config values to get better results): ```py from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler import torch pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/sdxl-turbo/blob/main/sd_xl_turbo_1.0_fp16.safetensors", torch_dtype=torch.float16, variant="fp16") pipeline = pipeline.to("cuda") pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config, timestep_spacing="trailing") ``` ## Text-to-image For text-to-image, pass a text prompt. By default, SDXL Turbo generates a 512x512 image, and that resolution gives the best results. You can try setting the `height` and `width` parameters to 768x768 or 1024x1024, but you should expect quality degradations when doing so. Make sure to set `guidance_scale` to 0.0 to disable, as the model was trained without it. A single inference step is enough to generate high quality images. Increasing the number of steps to 2, 3 or 4 should improve image quality. ```py from diffusers import AutoPipelineForText2Image import torch pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") pipeline_text2image = pipeline_text2image.to("cuda") prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe." image = pipeline_text2image(prompt=prompt, guidance_scale=0.0, num_inference_steps=1).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/sdxl-turbo-text2img.png" alt="generated image of a racoon in a robe"/> </div> ## Image-to-image For image-to-image generation, make sure that `num_inference_steps * strength` is larger or equal to 1. The image-to-image pipeline will run for `int(num_inference_steps * strength)` steps, e.g. `0.5 * 2.0 = 1` step in our example below. ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image, make_image_grid # use from_pipe to avoid consuming additional memory when loading a checkpoint pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda") init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png") init_image = init_image.resize((512, 512)) prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" image = pipeline_image2image(prompt, image=init_image, strength=0.5, guidance_scale=0.0, num_inference_steps=2).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/sdxl-turbo-img2img.png" alt="Image-to-image generation sample using SDXL Turbo"/> </div> ## Speed-up SDXL Turbo even more - Compile the UNet if you are using PyTorch version 2.0 or higher. The first inference run will be very slow, but subsequent ones will be much faster. ```py pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` - When using the default VAE, keep it in `float32` to avoid costly `dtype` conversions before and after each generation. You only need to do this one before your first generation: ```py pipe.upcast_vae() ``` As an alternative, you can also use a [16-bit VAE](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) created by community member [`@madebyollin`](https://huggingface.co/madebyollin) that does not need to be upcasted to `float32`.
diffusers/docs/source/en/using-diffusers/sdxl_turbo.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/sdxl_turbo.md", "repo_id": "diffusers", "token_count": 1714 }
112
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Token Merging (토큰 병합) Token Merging (introduced in [Token Merging: Your ViT But Faster](https://arxiv.org/abs/2210.09461))은 트랜스포머 기반 네트워크의 forward pass에서 중복 토큰이나 패치를 점진적으로 병합하는 방식으로 작동합니다. 이를 통해 기반 네트워크의 추론 지연 시간을 단축할 수 있습니다. Token Merging(ToMe)이 출시된 후, 저자들은 [Fast Stable Diffusion을 위한 토큰 병합](https://arxiv.org/abs/2303.17604)을 발표하여 Stable Diffusion과 더 잘 호환되는 ToMe 버전을 소개했습니다. ToMe를 사용하면 [`DiffusionPipeline`]의 추론 지연 시간을 부드럽게 단축할 수 있습니다. 이 문서에서는 ToMe를 [`StableDiffusionPipeline`]에 적용하는 방법, 예상되는 속도 향상, [`StableDiffusionPipeline`]에서 ToMe를 사용할 때의 질적 측면에 대해 설명합니다. ## ToMe 사용하기 ToMe의 저자들은 [`tomesd`](https://github.com/dbolya/tomesd)라는 편리한 Python 라이브러리를 공개했는데, 이 라이브러리를 이용하면 [`DiffusionPipeline`]에 ToMe를 다음과 같이 적용할 수 있습니다: ```diff from diffusers import StableDiffusionPipeline import tomesd pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") + tomesd.apply_patch(pipeline, ratio=0.5) image = pipeline("a photo of an astronaut riding a horse on mars").images[0] ``` 이것이 다입니다! `tomesd.apply_patch()`는 파이프라인 추론 속도와 생성된 토큰의 품질 사이의 균형을 맞출 수 있도록 [여러 개의 인자](https://github.com/dbolya/tomesd#usage)를 노출합니다. 이러한 인수 중 가장 중요한 것은 `ratio(비율)`입니다. `ratio`은 forward pass 중에 병합될 토큰의 수를 제어합니다. `tomesd`에 대한 자세한 내용은 해당 리포지토리(https://github.com/dbolya/tomesd) 및 [논문](https://arxiv.org/abs/2303.17604)을 참고하시기 바랍니다. ## `StableDiffusionPipeline`으로 `tomesd` 벤치마킹하기 We benchmarked the impact of using `tomesd` on [`StableDiffusionPipeline`] along with [xformers](https://huggingface.co/docs/diffusers/optimization/xformers) across different image resolutions. We used A100 and V100 as our test GPU devices with the following development environment (with Python 3.8.5): 다양한 이미지 해상도에서 [xformers](https://huggingface.co/docs/diffusers/optimization/xformers)를 적용한 상태에서, [`StableDiffusionPipeline`]에 `tomesd`를 사용했을 때의 영향을 벤치마킹했습니다. 테스트 GPU 장치로 A100과 V100을 사용했으며 개발 환경은 다음과 같습니다(Python 3.8.5 사용): ```bash - `diffusers` version: 0.15.1 - Python version: 3.8.16 - PyTorch version (GPU?): 1.13.1+cu116 (True) - Huggingface_hub version: 0.13.2 - Transformers version: 4.27.2 - Accelerate version: 0.18.0 - xFormers version: 0.0.16 - tomesd version: 0.1.2 ``` 벤치마킹에는 다음 스크립트를 사용했습니다: [https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335](https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335). 결과는 다음과 같습니다: ### A100 | 해상도 | 배치 크기 | Vanilla | ToMe | ToMe + xFormers | ToMe 속도 향상 (%) | ToMe + xFormers 속도 향상 (%) | | --- | --- | --- | --- | --- | --- | --- | | 512 | 10 | 6.88 | 5.26 | 4.69 | 23.54651163 | 31.83139535 | | | | | | | | | | 768 | 10 | OOM | 14.71 | 11 | | | | | 8 | OOM | 11.56 | 8.84 | | | | | 4 | OOM | 5.98 | 4.66 | | | | | 2 | 4.99 | 3.24 | 3.1 | 35.07014028 | 37.8757515 | | | 1 | 3.29 | 2.24 | 2.03 | 31.91489362 | 38.29787234 | | | | | | | | | | 1024 | 10 | OOM | OOM | OOM | | | | | 8 | OOM | OOM | OOM | | | | | 4 | OOM | 12.51 | 9.09 | | | | | 2 | OOM | 6.52 | 4.96 | | | | | 1 | 6.4 | 3.61 | 2.81 | 43.59375 | 56.09375 | ***결과는 초 단위입니다. 속도 향상은 `Vanilla`과 비교해 계산됩니다.*** ### V100 | 해상도 | 배치 크기 | Vanilla | ToMe | ToMe + xFormers | ToMe 속도 향상 (%) | ToMe + xFormers 속도 향상 (%) | | --- | --- | --- | --- | --- | --- | --- | | 512 | 10 | OOM | 10.03 | 9.29 | | | | | 8 | OOM | 8.05 | 7.47 | | | | | 4 | 5.7 | 4.3 | 3.98 | 24.56140351 | 30.1754386 | | | 2 | 3.14 | 2.43 | 2.27 | 22.61146497 | 27.70700637 | | | 1 | 1.88 | 1.57 | 1.57 | 16.4893617 | 16.4893617 | | | | | | | | | | 768 | 10 | OOM | OOM | 23.67 | | | | | 8 | OOM | OOM | 18.81 | | | | | 4 | OOM | 11.81 | 9.7 | | | | | 2 | OOM | 6.27 | 5.2 | | | | | 1 | 5.43 | 3.38 | 2.82 | 37.75322284 | 48.06629834 | | | | | | | | | | 1024 | 10 | OOM | OOM | OOM | | | | | 8 | OOM | OOM | OOM | | | | | 4 | OOM | OOM | 19.35 | | | | | 2 | OOM | 13 | 10.78 | | | | | 1 | OOM | 6.66 | 5.54 | | | 위의 표에서 볼 수 있듯이, 이미지 해상도가 높을수록 `tomesd`를 사용한 속도 향상이 더욱 두드러집니다. 또한 `tomesd`를 사용하면 1024x1024와 같은 더 높은 해상도에서 파이프라인을 실행할 수 있다는 점도 흥미롭습니다. [`torch.compile()`](https://huggingface.co/docs/diffusers/optimization/torch2.0)을 사용하면 추론 속도를 더욱 높일 수 있습니다. ## 품질 As reported in [the paper](https://arxiv.org/abs/2303.17604), ToMe can preserve the quality of the generated images to a great extent while speeding up inference. By increasing the `ratio`, it is possible to further speed up inference, but that might come at the cost of a deterioration in the image quality. To test the quality of the generated samples using our setup, we sampled a few prompts from the “Parti Prompts” (introduced in [Parti](https://parti.research.google/)) and performed inference with the [`StableDiffusionPipeline`] in the following settings: [논문](https://arxiv.org/abs/2303.17604)에 보고된 바와 같이, ToMe는 생성된 이미지의 품질을 상당 부분 보존하면서 추론 속도를 높일 수 있습니다. `ratio`을 높이면 추론 속도를 더 높일 수 있지만, 이미지 품질이 저하될 수 있습니다. 해당 설정을 사용하여 생성된 샘플의 품질을 테스트하기 위해, "Parti 프롬프트"([Parti](https://parti.research.google/)에서 소개)에서 몇 가지 프롬프트를 샘플링하고 다음 설정에서 [`StableDiffusionPipeline`]을 사용하여 추론을 수행했습니다: - Vanilla [`StableDiffusionPipeline`] - [`StableDiffusionPipeline`] + ToMe - [`StableDiffusionPipeline`] + ToMe + xformers 생성된 샘플의 품질이 크게 저하되는 것을 발견하지 못했습니다. 다음은 샘플입니다: ![tome-samples](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/tome/tome_samples.png) 생성된 샘플은 [여기](https://wandb.ai/sayakpaul/tomesd-results/runs/23j4bj3i?workspace=)에서 확인할 수 있습니다. 이 실험을 수행하기 위해 [이 스크립트](https://gist.github.com/sayakpaul/8cac98d7f22399085a060992f411ecbd)를 사용했습니다.
diffusers/docs/source/ko/optimization/tome.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/tome.md", "repo_id": "diffusers", "token_count": 4361 }
113
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Unconditional 이미지 생성 unconditional 이미지 생성은 text-to-image 또는 image-to-image 모델과 달리 텍스트나 이미지에 대한 조건이 없이 학습 데이터 분포와 유사한 이미지만을 생성합니다. <iframe src="https://stevhliu-ddpm-butterflies-128.hf.space" frameborder="0" width="850" height="550" ></iframe> 이 가이드에서는 기존에 존재하던 데이터셋과 자신만의 커스텀 데이터셋에 대해 unconditional image generation 모델을 훈련하는 방법을 설명합니다. 훈련 세부 사항에 대해 더 자세히 알고 싶다면 unconditional image generation을 위한 모든 학습 스크립트를 [여기](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation)에서 확인할 수 있습니다. 스크립트를 실행하기 전, 먼저 의존성 라이브러리들을 설치해야 합니다. ```bash pip install diffusers[training] accelerate datasets ``` 그 다음 🤗 [Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화합니다. ```bash accelerate config ``` 별도의 설정 없이 기본 설정으로 🤗 [Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화해봅시다. ```bash accelerate config default ``` 노트북과 같은 대화형 쉘을 지원하지 않는 환경의 경우, 다음과 같이 사용해볼 수도 있습니다. ```py from accelerate.utils import write_basic_config write_basic_config() ``` ## 모델을 허브에 업로드하기 학습 스크립트에 다음 인자를 추가하여 허브에 모델을 업로드할 수 있습니다. ```bash --push_to_hub ``` ## 체크포인트 저장하고 불러오기 훈련 중 문제가 발생할 경우를 대비하여 체크포인트를 정기적으로 저장하는 것이 좋습니다. 체크포인트를 저장하려면 학습 스크립트에 다음 인자를 전달합니다: ```bash --checkpointing_steps=500 ``` 전체 훈련 상태는 500스텝마다 `output_dir`의 하위 폴더에 저장되며, 학습 스크립트에 `--resume_from_checkpoint` 인자를 전달함으로써 체크포인트를 불러오고 훈련을 재개할 수 있습니다. ```bash --resume_from_checkpoint="checkpoint-1500" ``` ## 파인튜닝 이제 학습 스크립트를 시작할 준비가 되었습니다! `--dataset_name` 인자에 파인튜닝할 데이터셋 이름을 지정한 다음, `--output_dir` 인자에 지정된 경로로 저장합니다. 본인만의 데이터셋를 사용하려면, [학습용 데이터셋 만들기](create_dataset) 가이드를 참조하세요. 학습 스크립트는 `diffusion_pytorch_model.bin` 파일을 생성하고, 그것을 당신의 리포지토리에 저장합니다. <Tip> 💡 전체 학습은 V100 GPU 4개를 사용할 경우, 2시간이 소요됩니다. </Tip> 예를 들어, [Oxford Flowers](https://huggingface.co/datasets/huggan/flowers-102-categories) 데이터셋을 사용해 파인튜닝할 경우: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --resolution=64 \ --output_dir="ddpm-ema-flowers-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` <div class="flex justify-center"> <img src="https://user-images.githubusercontent.com/26864830/180248660-a0b143d0-b89a-42c5-8656-2ebf6ece7e52.png"/> </div> [Naruto](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) 데이터셋을 사용할 경우: ```bash accelerate launch train_unconditional.py \ --dataset_name="lambdalabs/naruto-blip-captions" \ --resolution=64 \ --output_dir="ddpm-ema-naruto-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` <div class="flex justify-center"> <img src="https://user-images.githubusercontent.com/26864830/180248200-928953b4-db38-48db-b0c6-8b740fe6786f.png"/> </div> ### 여러개의 GPU로 훈련하기 `accelerate`을 사용하면 원활한 다중 GPU 훈련이 가능합니다. `accelerate`을 사용하여 분산 훈련을 실행하려면 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 지침을 따르세요. 다음은 명령어 예제입니다. ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \ --dataset_name="lambdalabs/naruto-blip-captions" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-naruto-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision="fp16" \ --logger="wandb" \ --push_to_hub ```
diffusers/docs/source/ko/training/unconditional_training.md/0
{ "file_path": "diffusers/docs/source/ko/training/unconditional_training.md", "repo_id": "diffusers", "token_count": 3117 }
114
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Diffusion XL Turbo [[open-in-colab]] SDXL Turbo는 adversarial time-distilled(적대적 시간 전이) [Stable Diffusion XL](https://huggingface.co/papers/2307.01952)(SDXL) 모델로, 단 한 번의 스텝만으로 추론을 실행할 수 있습니다. 이 가이드에서는 text-to-image와 image-to-image를 위한 SDXL-Turbo를 사용하는 방법을 설명합니다. 시작하기 전에 다음 라이브러리가 설치되어 있는지 확인하세요: ```py # Colab에서 필요한 라이브러리를 설치하기 위해 주석을 제외하세요 #!pip install -q diffusers transformers accelerate ``` ## 모델 체크포인트 불러오기 모델 가중치는 Hub의 별도 하위 폴더 또는 로컬에 저장할 수 있으며, 이 경우 [`~StableDiffusionXLPipeline.from_pretrained`] 메서드를 사용해야 합니다: ```py from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") pipeline = pipeline.to("cuda") ``` 또한 [`~StableDiffusionXLPipeline.from_single_file`] 메서드를 사용하여 허브 또는 로컬에서 단일 파일 형식(`.ckpt` 또는 `.safetensors`)으로 저장된 모델 체크포인트를 불러올 수도 있습니다: ```py from diffusers import StableDiffusionXLPipeline import torch pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/sdxl-turbo/blob/main/sd_xl_turbo_1.0_fp16.safetensors", torch_dtype=torch.float16) pipeline = pipeline.to("cuda") ``` ## Text-to-image Text-to-image의 경우 텍스트 프롬프트를 전달합니다. 기본적으로 SDXL Turbo는 512x512 이미지를 생성하며, 이 해상도에서 최상의 결과를 제공합니다. `height` 및 `width` 매개 변수를 768x768 또는 1024x1024로 설정할 수 있지만 이 경우 품질 저하를 예상할 수 있습니다. 모델이 `guidance_scale` 없이 학습되었으므로 이를 0.0으로 설정해 비활성화해야 합니다. 단일 추론 스텝만으로도 고품질 이미지를 생성할 수 있습니다. 스텝 수를 2, 3 또는 4로 늘리면 이미지 품질이 향상됩니다. ```py from diffusers import AutoPipelineForText2Image import torch pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") pipeline_text2image = pipeline_text2image.to("cuda") prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe." image = pipeline_text2image(prompt=prompt, guidance_scale=0.0, num_inference_steps=1).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/sdxl-turbo-text2img.png" alt="generated image of a racoon in a robe"/> </div> ## Image-to-image Image-to-image 생성의 경우 `num_inference_steps * strength`가 1보다 크거나 같은지 확인하세요. Image-to-image 파이프라인은 아래 예제에서 `0.5 * 2.0 = 1` 스텝과 같이 `int(num_inference_steps * strength)` 스텝으로 실행됩니다. ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image, make_image_grid # 체크포인트를 불러올 때 추가 메모리 소모를 피하려면 from_pipe를 사용하세요. pipeline = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda") init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png") init_image = init_image.resize((512, 512)) prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" image = pipeline(prompt, image=init_image, strength=0.5, guidance_scale=0.0, num_inference_steps=2).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/sdxl-turbo-img2img.png" alt="Image-to-image generation sample using SDXL Turbo"/> </div> ## SDXL Turbo 속도 훨씬 더 빠르게 하기 - PyTorch 버전 2 이상을 사용하는 경우 UNet을 컴파일합니다. 첫 번째 추론 실행은 매우 느리지만 이후 실행은 훨씬 빨라집니다. ```py pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` - 기본 VAE를 사용하는 경우, 각 생성 전후에 비용이 많이 드는 `dtype` 변환을 피하기 위해 `float32`로 유지하세요. 이 작업은 첫 생성 이전에 한 번만 수행하면 됩니다: ```py pipe.upcast_vae() ``` 또는, 커뮤니티 회원인 [`@madebyollin`](https://huggingface.co/madebyollin)이 만든 [16비트 VAE](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)를 사용할 수도 있으며, 이는 `float32`로 업캐스트할 필요가 없습니다.
diffusers/docs/source/ko/using-diffusers/sdxl_turbo.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/sdxl_turbo.md", "repo_id": "diffusers", "token_count": 2976 }
115
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 有效且高效的扩散 [[open-in-colab]] 让 [`DiffusionPipeline`] 生成特定风格或包含你所想要的内容的图像可能会有些棘手。 通常情况下,你需要多次运行 [`DiffusionPipeline`] 才能得到满意的图像。但是从无到有生成图像是一个计算密集的过程,特别是如果你要一遍又一遍地进行推理运算。 这就是为什么从pipeline中获得最高的 *computational* (speed) 和 *memory* (GPU RAM) 非常重要 ,以减少推理周期之间的时间,从而使迭代速度更快。 本教程将指导您如何通过 [`DiffusionPipeline`] 更快、更好地生成图像。 首先,加载 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 模型: ```python from diffusers import DiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True) ``` 本教程将使用的提示词是 [`portrait photo of a old warrior chief`] ,但是你可以随心所欲的想象和构造自己的提示词: ```python prompt = "portrait photo of a old warrior chief" ``` ## 速度 <Tip> 💡 如果你没有 GPU, 你可以从像 [Colab](https://colab.research.google.com/) 这样的 GPU 提供商获取免费的 GPU ! </Tip> 加速推理的最简单方法之一是将 pipeline 放在 GPU 上 ,就像使用任何 PyTorch 模块一样: ```python pipeline = pipeline.to("cuda") ``` 为了确保您可以使用相同的图像并对其进行改进,使用 [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) 方法,然后设置一个随机数种子 以确保其 [复现性](./using-diffusers/reusing_seeds): ```python import torch generator = torch.Generator("cuda").manual_seed(0) ``` 现在,你可以生成一个图像: ```python image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png"> </div> 在 T4 GPU 上,这个过程大概要30秒(如果你的 GPU 比 T4 好,可能会更快)。在默认情况下,[`DiffusionPipeline`] 使用完整的 `float32` 精度进行 50 步推理。你可以通过降低精度(如 `float16` )或者减少推理步数来加速整个过程 让我们把模型的精度降低至 `float16` ,然后生成一张图像: ```python import torch pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_safetensors=True) pipeline = pipeline.to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png"> </div> 这一次,生成图像只花了约 11 秒,比之前快了近 3 倍! <Tip> 💡 我们强烈建议把 pipeline 精度降低至 `float16` , 到目前为止, 我们很少看到输出质量有任何下降。 </Tip> 另一个选择是减少推理步数。 你可以选择一个更高效的调度器 (*scheduler*) 可以减少推理步数同时保证输出质量。您可以在 [DiffusionPipeline] 中通过调用compatibles方法找到与当前模型兼容的调度器 (*scheduler*)。 ```python pipeline.scheduler.compatibles [ diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler, diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler, diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler, diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler, diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, ] ``` Stable Diffusion 模型默认使用的是 [`PNDMScheduler`] ,通常要大概50步推理, 但是像 [`DPMSolverMultistepScheduler`] 这样更高效的调度器只要大概 20 或 25 步推理. 使用 [`ConfigMixin.from_config`] 方法加载新的调度器: ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) ``` 现在将 `num_inference_steps` 设置为 20: ```python generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png"> </div> 太棒了!你成功把推理时间缩短到 4 秒!⚡️ ## 内存 改善 pipeline 性能的另一个关键是减少内存的使用量,这间接意味着速度更快,因为你经常试图最大化每秒生成的图像数量。要想知道你一次可以生成多少张图片,最简单的方法是尝试不同的batch size,直到出现`OutOfMemoryError` (OOM)。 创建一个函数,为每一批要生成的图像分配提示词和 `Generators` 。请务必为每个`Generator` 分配一个种子,以便于复现良好的结果。 ```python def get_inputs(batch_size=1): generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] prompts = batch_size * [prompt] num_inference_steps = 20 return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps} ``` 设置 `batch_size=4` ,然后看一看我们消耗了多少内存: ```python from diffusers.utils import make_image_grid images = pipeline(**get_inputs(batch_size=4)).images make_image_grid(images, 2, 2) ``` 除非你有一个更大内存的GPU, 否则上述代码会返回 `OOM` 错误! 大部分内存被 cross-attention 层使用。按顺序运行可以节省大量内存,而不是在批处理中进行。你可以为 pipeline 配置 [`~DiffusionPipeline.enable_attention_slicing`] 函数: ```python pipeline.enable_attention_slicing() ``` 现在尝试把 `batch_size` 增加到 8! ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png"> </div> 以前你不能一批生成 4 张图片,而现在你可以在一张图片里面生成八张图片而只需要大概3.5秒!这可能是 T4 GPU 在不牺牲质量的情况运行速度最快的一种方法。 ## 质量 在最后两节中, 你要学习如何通过 `fp16` 来优化 pipeline 的速度, 通过使用性能更高的调度器来减少推理步数, 使用注意力切片(*enabling attention slicing*)方法来节省内存。现在,你将关注的是如何提高图像的质量。 ### 更好的 checkpoints 有个显而易见的方法是使用更好的 checkpoints。 Stable Diffusion 模型是一个很好的起点, 自正式发布以来,还发布了几个改进版本。然而, 使用更新的版本并不意味着你会得到更好的结果。你仍然需要尝试不同的 checkpoints ,并做一些研究 (例如使用 [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/)) 来获得更好的结果。 随着该领域的发展, 有越来越多经过微调的高质量的 checkpoints 用来生成不一样的风格. 在 [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) 和 [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery) 寻找你感兴趣的一种! ### 更好的 pipeline 组件 也可以尝试用新版本替换当前 pipeline 组件。让我们加载最新的 [autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae) 从 Stability AI 加载到 pipeline, 并生成一些图像: ```python from diffusers import AutoencoderKL vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda") pipeline.vae = vae images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png"> </div> ### 更好的提示词工程 用于生成图像的文本非常重要, 因此被称为 *提示词工程*。 在设计提示词工程应注意如下事项: - 我想生成的图像或类似图像如何存储在互联网上? - 我可以提供哪些额外的细节来引导模型朝着我想要的风格生成? 考虑到这一点,让我们改进提示词,以包含颜色和更高质量的细节: ```python prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes" prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" ``` 使用新的提示词生成一批图像: ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png"> </div> 非常的令人印象深刻! Let's tweak the second image - 把 `Generator` 的种子设置为 `1` - 添加一些关于年龄的主题文本: ```python prompts = [ "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", ] generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images make_image_grid(images, 2, 2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png"> </div> ## 最后 在本教程中, 您学习了如何优化[`DiffusionPipeline`]以提高计算和内存效率,以及提高生成输出的质量. 如果你有兴趣让你的 pipeline 更快, 可以看一看以下资源: - 学习 [PyTorch 2.0](./optimization/torch2.0) 和 [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) 可以让推理速度提高 5 - 300% . 在 A100 GPU 上, 推理速度可以提高 50% ! - 如果你没法用 PyTorch 2, 我们建议你安装 [xFormers](./optimization/xformers)。它的内存高效注意力机制(*memory-efficient attention mechanism*)与PyTorch 1.13.1配合使用,速度更快,内存消耗更少。 - 其他的优化技术, 如:模型卸载(*model offloading*), 包含在 [这份指南](./optimization/fp16).
diffusers/docs/source/zh/stable_diffusion.md/0
{ "file_path": "diffusers/docs/source/zh/stable_diffusion.md", "repo_id": "diffusers", "token_count": 6124 }
116
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import PIL.Image import torch from torchvision import transforms from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils.torch_utils import randn_tensor trans = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess(image): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] image = [trans(img.convert("RGB")) for img in image] image = torch.stack(image) return image class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() # make sure scheduler can always be converted to DDIM scheduler = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=unet, scheduler=scheduler) def check_inputs(self, strength): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) init_latents = image.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents print("add noise to latents at timestep", timestep) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.no_grad() def __call__( self, image: Union[torch.Tensor, PIL.Image.Image] = None, strength: float = 0.8, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, eta: float = 0.0, num_inference_steps: int = 50, use_clipped_model_output: Optional[bool] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. use_clipped_model_output (`bool`, *optional*, defaults to `None`): if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed downstream to the scheduler. So use `None` for schedulers which don't support this argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # 1. Check inputs. Raise error if not correct self.check_inputs(strength) # 2. Preprocess image image = preprocess(image) # 3. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device) latent_timestep = timesteps[:1].repeat(batch_size) # 4. Prepare latent variables latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator) image = latents # 5. Denoising loop for t in self.progress_bar(timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step( model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator, ).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=image)
diffusers/examples/community/ddim_noise_comparative_analysis.py/0
{ "file_path": "diffusers/examples/community/ddim_noise_comparative_analysis.py", "repo_id": "diffusers", "token_count": 3415 }
117
# Copyright 2024 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a single file implementation of LMD+. See README.md for examples. import ast import gc import inspect import math import warnings from collections.abc import Iterable from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.nn.functional as F from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.configuration_utils import FrozenDict from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.attention import Attention, GatedSelfAttentionDense from diffusers.models.attention_processor import AttnProcessor2_0 from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines import DiffusionPipeline from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained( ... "longlian/lmd_plus", ... custom_pipeline="llm_grounded_diffusion", ... custom_revision="main", ... variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe.enable_model_cpu_offload() >>> # Generate an image described by the prompt and >>> # insert objects described by text at the region defined by bounding boxes >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage" >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]] >>> phrases = ["a waterfall", "a modern high speed train"] >>> images = pipe( ... prompt=prompt, ... phrases=phrases, ... boxes=boxes, ... gligen_scheduled_sampling_beta=0.4, ... output_type="pil", ... num_inference_steps=50, ... lmd_guidance_kwargs={} ... ).images >>> images[0].save("./lmd_plus_generation.jpg") >>> # Generate directly from a text prompt and an LLM response >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage" >>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\""" [('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])] Background prompt: A beautiful forest with fall foliage Negative prompt: \""") >> images = pipe( ... prompt=prompt, ... negative_prompt=neg_prompt, ... phrases=phrases, ... boxes=boxes, ... gligen_scheduled_sampling_beta=0.4, ... output_type="pil", ... num_inference_steps=50, ... lmd_guidance_kwargs={} ... ).images >>> images[0].save("./lmd_plus_generation.jpg") images[0] ``` """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name # All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)] # Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`. DEFAULT_GUIDANCE_ATTN_KEYS = [ ("mid", 0, 0, 0), ("up", 1, 0, 0), ("up", 1, 1, 0), ("up", 1, 2, 0), ] def convert_attn_keys(key): """Convert the attention key from tuple format to the torch state format""" if key[0] == "mid": assert key[1] == 0, f"mid block only has one block but the index is {key[1]}" return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor" return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor" DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS] def scale_proportion(obj_box, H, W): # Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5". x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H) box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H) x_max, y_max = x_min + box_w, y_min + box_h x_min, y_min = max(x_min, 0), max(y_min, 0) x_max, y_max = min(x_max, W), min(y_max, H) return x_min, y_min, x_max, y_max # Adapted from the parent class `AttnProcessor2_0` class AttnProcessorWithHook(AttnProcessor2_0): def __init__( self, attn_processor_key, hidden_size, cross_attention_dim, hook=None, fast_attn=True, enabled=True, ): super().__init__() self.attn_processor_key = attn_processor_key self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.hook = hook self.fast_attn = fast_attn self.enabled = enabled def __call__( self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, scale: float = 1.0, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) args = () if USE_PEFT_BACKEND else (scale,) query = attn.to_q(hidden_states, *args) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states, *args) value = attn.to_v(encoder_hidden_states, *args) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads if (self.hook is not None and self.enabled) or not self.fast_attn: query_batch_dim = attn.head_to_batch_dim(query) key_batch_dim = attn.head_to_batch_dim(key) value_batch_dim = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask) if self.hook is not None and self.enabled: # Call the hook with query, key, value, and attention maps self.hook( self.attn_processor_key, query_batch_dim, key_batch_dim, value_batch_dim, attention_probs, ) if self.fast_attn: query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attention_mask is not None: # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) else: hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LLMGroundedDiffusionPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, ): r""" Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://arxiv.org/pdf/2305.13655.pdf. This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control. Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. requires_safety_checker (bool): Whether a safety checker is needed for this pipeline. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] objects_text = "Objects: " bg_prompt_text = "Background prompt: " bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip() neg_prompt_text = "Negative prompt: " neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip() def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): # This is copied from StableDiffusionPipeline, with hook initizations for LMD+. super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Initialize the attention hooks for LLM-grounded Diffusion self.register_attn_hooks(unet) self._saved_attn = None def attn_hook(self, name, query, key, value, attention_probs): if name in DEFAULT_GUIDANCE_ATTN_KEYS: self._saved_attn[name] = attention_probs @classmethod def convert_box(cls, box, height, width): # box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max x_min, y_min = box[0] / width, box[1] / height w_box, h_box = box[2] / width, box[3] / height x_max, y_max = x_min + w_box, y_min + h_box return x_min, y_min, x_max, y_max @classmethod def _parse_response_with_negative(cls, text): if not text: raise ValueError("LLM response is empty") if cls.objects_text in text: text = text.split(cls.objects_text)[1] text_split = text.split(cls.bg_prompt_text_no_trailing_space) if len(text_split) == 2: gen_boxes, text_rem = text_split else: raise ValueError(f"LLM response is incomplete: {text}") text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space) if len(text_split) == 2: bg_prompt, neg_prompt = text_split else: raise ValueError(f"LLM response is incomplete: {text}") try: gen_boxes = ast.literal_eval(gen_boxes) except SyntaxError as e: # Sometimes the response is in plain text if "No objects" in gen_boxes or gen_boxes.strip() == "": gen_boxes = [] else: raise e bg_prompt = bg_prompt.strip() neg_prompt = neg_prompt.strip() # LLM may return "None" to mean no negative prompt provided. if neg_prompt == "None": neg_prompt = "" return gen_boxes, bg_prompt, neg_prompt @classmethod def parse_llm_response(cls, response, canvas_height=512, canvas_width=512): # Infer from spec gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response) gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0]) phrases = [name for name, _ in gen_boxes] boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes] return phrases, boxes, bg_prompt, neg_prompt def check_inputs( self, prompt, height, width, callback_steps, phrases, boxes, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, phrase_indices=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt is None and phrase_indices is None: raise ValueError("If the prompt is None, the phrase_indices cannot be None") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if len(phrases) != len(boxes): raise ValueError( "length of `phrases` and `boxes` has to be same, but" f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}" ) def register_attn_hooks(self, unet): """Registering hooks to obtain the attention maps for guidance""" attn_procs = {} for name in unet.attn_processors.keys(): # Only obtain the queries and keys from cross-attention if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"): # Keep the same attn_processors for self-attention (no hooks for self-attention) attn_procs[name] = unet.attn_processors[name] continue cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] attn_procs[name] = AttnProcessorWithHook( attn_processor_key=name, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, hook=self.attn_hook, fast_attn=True, # Not enabled by default enabled=False, ) unet.set_attn_processor(attn_procs) def enable_fuser(self, enabled=True): for module in self.unet.modules(): if isinstance(module, GatedSelfAttentionDense): module.enabled = enabled def enable_attn_hook(self, enabled=True): for module in self.unet.attn_processors.values(): if isinstance(module, AttnProcessorWithHook): module.enabled = enabled def get_token_map(self, prompt, padding="do_not_pad", verbose=False): """Get a list of mapping: prompt index to str (prompt in a list of token str)""" fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np") input_ids = fg_prompt_tokens["input_ids"][0] token_map = [] for ind, item in enumerate(input_ids.tolist()): token = self.tokenizer._convert_id_to_token(item) if verbose: logger.info(f"{ind}, {token} ({item})") token_map.append(token) return token_map def get_phrase_indices( self, prompt, phrases, token_map=None, add_suffix_if_not_found=False, verbose=False, ): for obj in phrases: # Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix if obj not in prompt: prompt += "| " + obj if token_map is None: # We allow using a pre-computed token map. token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose) token_map_str = " ".join(token_map) phrase_indices = [] for obj in phrases: phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose) # Remove <bos> and <eos> in substr phrase_token_map = phrase_token_map[1:-1] phrase_token_map_len = len(phrase_token_map) phrase_token_map_str = " ".join(phrase_token_map) if verbose: logger.info( "Full str:", token_map_str, "Substr:", phrase_token_map_str, "Phrase:", phrases, ) # Count the number of token before substr # The substring comes with a trailing space that needs to be removed by minus one in the index. obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" ")) obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len)) phrase_indices.append(obj_position) if add_suffix_if_not_found: return phrase_indices, prompt return phrase_indices def add_ca_loss_per_attn_map_to_loss( self, loss, attn_map, object_number, bboxes, phrase_indices, fg_top_p=0.2, bg_top_p=0.2, fg_weight=1.0, bg_weight=1.0, ): # b is the number of heads, not batch b, i, j = attn_map.shape H = W = int(math.sqrt(i)) for obj_idx in range(object_number): obj_loss = 0 mask = torch.zeros(size=(H, W), device="cuda") obj_boxes = bboxes[obj_idx] # We support two level (one box per phrase) and three level (multiple boxes per phrase) if not isinstance(obj_boxes[0], Iterable): obj_boxes = [obj_boxes] for obj_box in obj_boxes: # x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H) x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W) mask[y_min:y_max, x_min:x_max] = 1 for obj_position in phrase_indices[obj_idx]: # Could potentially optimize to compute this for loop in batch. # Could crop the ref cross attention before saving to save memory. ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W) # shape: (b, H * W) ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W) k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1) k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1) mask_1d = mask.view(1, -1) # Max-based loss function # Take the topk over spatial dimension, and then take the sum over heads dim # The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own. obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight loss += obj_loss / len(phrase_indices[obj_idx]) return loss def compute_ca_loss( self, saved_attn, bboxes, phrase_indices, guidance_attn_keys, verbose=False, **kwargs, ): """ The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss. `AttnProcessor` will put attention maps into the `save_attn_to_dict`. `index` is the timestep. `ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token). `ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens. """ loss = torch.tensor(0).float().cuda() object_number = len(bboxes) if object_number == 0: return loss for attn_key in guidance_attn_keys: # We only have 1 cross attention for mid. attn_map_integrated = saved_attn[attn_key] if not attn_map_integrated.is_cuda: attn_map_integrated = attn_map_integrated.cuda() # Example dimension: [20, 64, 77] attn_map = attn_map_integrated.squeeze(dim=0) loss = self.add_ca_loss_per_attn_map_to_loss( loss, attn_map, object_number, bboxes, phrase_indices, **kwargs ) num_attn = len(guidance_attn_keys) if num_attn > 0: loss = loss / (object_number * num_attn) return loss @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, gligen_scheduled_sampling_beta: float = 0.3, phrases: List[str] = None, boxes: List[List[float]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, lmd_guidance_kwargs: Optional[Dict[str, Any]] = {}, phrase_indices: Optional[List[int]] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. phrases (`List[str]`): The phrases to guide what to include in each of the regions defined by the corresponding `boxes`. There should only be one phrase per bounding box. boxes (`List[List[float]]`): The bounding boxes that identify rectangular regions of the image that are going to be filled with the content described by the corresponding `phrases`. Each rectangular box is defined as a `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. gligen_scheduled_sampling_beta (`float`, defaults to 0.3): Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for scheduled sampling during inference for improved quality and controllability. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. lmd_guidance_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details. phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, phrases, boxes, negative_prompt, prompt_embeds, negative_prompt_embeds, phrase_indices, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 if phrase_indices is None: phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True) elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) if phrase_indices is None: phrase_indices = [] prompt_parsed = [] for prompt_item in prompt: ( phrase_indices_parsed_item, prompt_parsed_item, ) = self.get_phrase_indices(prompt_item, add_suffix_if_not_found=True) phrase_indices.append(phrase_indices_parsed_item) prompt_parsed.append(prompt_parsed_item) prompt = prompt_parsed else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) cond_prompt_embeds = prompt_embeds # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None: image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 5.1 Prepare GLIGEN variables max_objs = 30 if len(boxes) > max_objs: warnings.warn( f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", FutureWarning, ) phrases = phrases[:max_objs] boxes = boxes[:max_objs] n_objs = len(boxes) if n_objs: # prepare batched input to the PositionNet (boxes, phrases, mask) # Get tokens for phrases from pre-trained CLIPTokenizer tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device) # For the token, we use the same pre-trained text encoder # to obtain its text feature _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output # For each entity, described in phrases, is denoted with a bounding box, # we represent the location information as (xmin,ymin,xmax,ymax) cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) if n_objs: cond_boxes[:n_objs] = torch.tensor(boxes) text_embeddings = torch.zeros( max_objs, self.unet.config.cross_attention_dim, device=device, dtype=self.text_encoder.dtype, ) if n_objs: text_embeddings[:n_objs] = _text_embeddings # Generate a mask for each object that is entity described by phrases masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) masks[:n_objs] = 1 repeat_batch = batch_size * num_images_per_prompt cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone() text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone() masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone() if do_classifier_free_guidance: repeat_batch = repeat_batch * 2 cond_boxes = torch.cat([cond_boxes] * 2) text_embeddings = torch.cat([text_embeddings] * 2) masks = torch.cat([masks] * 2) masks[: repeat_batch // 2] = 0 if cross_attention_kwargs is None: cross_attention_kwargs = {} cross_attention_kwargs["gligen"] = { "boxes": cond_boxes, "positive_embeddings": text_embeddings, "masks": masks, } num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps)) self.enable_fuser(True) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None loss_attn = torch.tensor(10000.0) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Scheduled sampling if i == num_grounding_steps: self.enable_fuser(False) if latents.shape[1] != 4: latents = torch.randn_like(latents[:, :4]) # 7.1 Perform LMD guidance if boxes: latents, loss_attn = self.latent_lmd_guidance( cond_prompt_embeds, index=i, boxes=boxes, phrase_indices=phrase_indices, t=t, latents=latents, loss=loss_attn, **lmd_guidance_kwargs, ) # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.set_grad_enabled(True) def latent_lmd_guidance( self, cond_embeddings, index, boxes, phrase_indices, t, latents, loss, *, loss_scale=20, loss_threshold=5.0, max_iter=[3] * 5 + [2] * 5 + [1] * 5, guidance_timesteps=15, cross_attention_kwargs=None, guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS, verbose=False, clear_cache=False, unet_additional_kwargs={}, guidance_callback=None, **kwargs, ): scheduler, unet = self.scheduler, self.unet iteration = 0 if index < guidance_timesteps: if isinstance(max_iter, list): max_iter = max_iter[index] if verbose: logger.info( f"time index {index}, loss: {loss.item()/loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}" ) try: self.enable_attn_hook(enabled=True) while ( loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps ): self._saved_attn = {} latents.requires_grad_(True) latent_model_input = latents latent_model_input = scheduler.scale_model_input(latent_model_input, t) unet( latent_model_input, t, encoder_hidden_states=cond_embeddings, cross_attention_kwargs=cross_attention_kwargs, **unet_additional_kwargs, ) # update latents with guidance loss = ( self.compute_ca_loss( saved_attn=self._saved_attn, bboxes=boxes, phrase_indices=phrase_indices, guidance_attn_keys=guidance_attn_keys, verbose=verbose, **kwargs, ) * loss_scale ) if torch.isnan(loss): raise RuntimeError("**Loss is NaN**") # This callback allows visualizations. if guidance_callback is not None: guidance_callback(self, latents, loss, iteration, index) self._saved_attn = None grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0] latents.requires_grad_(False) # Scaling with classifier guidance alpha_prod_t = scheduler.alphas_cumprod[t] # Classifier guidance: https://arxiv.org/pdf/2105.05233.pdf # DDIM: https://arxiv.org/pdf/2010.02502.pdf scale = (1 - alpha_prod_t) ** (0.5) latents = latents - scale * grad_cond iteration += 1 if clear_cache: gc.collect() torch.cuda.empty_cache() if verbose: logger.info( f"time index {index}, loss: {loss.item()/loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}" ) finally: self.enable_attn_hook(enabled=False) return latents, loss # Below are methods copied from StableDiffusionPipeline # The design choice of not inheriting from StableDiffusionPipeline is discussed here: https://github.com/huggingface/diffusers/pull/5993#issuecomment-1834258517 # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True, ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale def guidance_scale(self): return self._guidance_scale @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale def guidance_rescale(self): return self._guidance_rescale @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs def cross_attention_kwargs(self): return self._cross_attention_kwargs @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps def num_timesteps(self): return self._num_timesteps
diffusers/examples/community/llm_grounded_diffusion.py/0
{ "file_path": "diffusers/examples/community/llm_grounded_diffusion.py", "repo_id": "diffusers", "token_count": 32773 }
118
# Copyright 2024 HunyuanDiT Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import ( BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel, ) from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.models import AutoencoderKL, HunyuanDiT2DModel from diffusers.models.embeddings import get_2d_rotary_pos_embed from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) from diffusers.schedulers import DDPMScheduler from diffusers.utils import ( deprecate, is_torch_xla_available, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import FlowMatchEulerDiscreteScheduler >>> from diffusers.utils import load_image >>> from PIL import Image >>> from torchvision import transforms >>> from pipeline_hunyuandit_differential_img2img import HunyuanDiTDifferentialImg2ImgPipeline >>> pipe = HunyuanDiTDifferentialImg2ImgPipeline.from_pretrained( >>> "Tencent-Hunyuan/HunyuanDiT-Diffusers", torch_dtype=torch.float16 >>> ).to("cuda") >>> source_image = load_image( >>> "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/differential/20240329211129_4024911930.png" >>> ) >>> map = load_image( >>> "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/differential/gradient_mask_2.png" >>> ) >>> prompt = "a green pear" >>> negative_prompt = "blurry" >>> image = pipe( >>> prompt=prompt, >>> negative_prompt=negative_prompt, >>> image=source_image, >>> num_inference_steps=28, >>> guidance_scale=4.5, >>> strength=1.0, >>> map=map, >>> ).images[0] ``` """ STANDARD_RATIO = np.array( [ 1.0, # 1:1 4.0 / 3.0, # 4:3 3.0 / 4.0, # 3:4 16.0 / 9.0, # 16:9 9.0 / 16.0, # 9:16 ] ) STANDARD_SHAPE = [ [(1024, 1024), (1280, 1280)], # 1:1 [(1024, 768), (1152, 864), (1280, 960)], # 4:3 [(768, 1024), (864, 1152), (960, 1280)], # 3:4 [(1280, 768)], # 16:9 [(768, 1280)], # 9:16 ] STANDARD_AREA = [np.array([w * h for w, h in shapes]) for shapes in STANDARD_SHAPE] SUPPORTED_SHAPE = [ (1024, 1024), (1280, 1280), # 1:1 (1024, 768), (1152, 864), (1280, 960), # 4:3 (768, 1024), (864, 1152), (960, 1280), # 3:4 (1280, 768), # 16:9 (768, 1280), # 9:16 ] def map_to_standard_shapes(target_width, target_height): target_ratio = target_width / target_height closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) width, height = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] return width, height def get_resize_crop_region_for_grid(src, tgt_size): th = tw = tgt_size h, w = src r = h / w # resize if r > 1: resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample", ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class HunyuanDiTDifferentialImg2ImgPipeline(DiffusionPipeline): r""" Differential Pipeline for English/Chinese-to-image generation using HunyuanDiT. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) HunyuanDiT uses two text encoders: [mT5](https://huggingface.co/google/mt5-base) and [bilingual CLIP](fine-tuned by ourselves) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. We use `sdxl-vae-fp16-fix`. text_encoder (Optional[`~transformers.BertModel`, `~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). HunyuanDiT uses a fine-tuned [bilingual CLIP]. tokenizer (Optional[`~transformers.BertTokenizer`, `~transformers.CLIPTokenizer`]): A `BertTokenizer` or `CLIPTokenizer` to tokenize text. transformer ([`HunyuanDiT2DModel`]): The HunyuanDiT model designed by Tencent Hunyuan. text_encoder_2 (`T5EncoderModel`): The mT5 embedder. Specifically, it is 't5-v1_1-xxl'. tokenizer_2 (`MT5Tokenizer`): The tokenizer for the mT5 embedder. scheduler ([`DDPMScheduler`]): A scheduler to be used in combination with HunyuanDiT to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _optional_components = [ "safety_checker", "feature_extractor", "text_encoder_2", "tokenizer_2", "text_encoder", "tokenizer", ] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "prompt_embeds_2", "negative_prompt_embeds_2", ] def __init__( self, vae: AutoencoderKL, text_encoder: BertModel, tokenizer: BertTokenizer, transformer: HunyuanDiT2DModel, scheduler: DDPMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, text_encoder_2=T5EncoderModel, tokenizer_2=MT5Tokenizer, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, text_encoder_2=text_encoder_2, ) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_convert_grayscale=True, ) self.register_to_config(requires_safety_checker=requires_safety_checker) self.default_sample_size = ( self.transformer.config.sample_size if hasattr(self, "transformer") and self.transformer is not None else 128 ) # copied from diffusers.pipelines.huanyuandit.pipeline_huanyuandit.HunyuanDiTPipeline.encode_prompt def encode_prompt( self, prompt: str, device: torch.device = None, dtype: torch.dtype = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: Optional[int] = None, text_encoder_index: int = 0, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device dtype (`torch.dtype`): torch dtype num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. text_encoder_index (`int`, *optional*): Index of the text encoder to use. `0` for clip and `1` for T5. """ if dtype is None: if self.text_encoder_2 is not None: dtype = self.text_encoder_2.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None if device is None: device = self._execution_device tokenizers = [self.tokenizer, self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = tokenizers[text_encoder_index] text_encoder = text_encoders[text_encoder_index] if max_sequence_length is None: if text_encoder_index == 0: max_length = 77 if text_encoder_index == 1: max_length = 256 else: max_length = max_sequence_length if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = text_encoder( text_input_ids.to(device), attention_mask=prompt_attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, prompt_embeds_2=None, negative_prompt_embeds_2=None, prompt_attention_mask_2=None, negative_prompt_attention_mask_2=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is None and prompt_embeds_2 is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: raise ValueError("Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: raise ValueError( "Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: raise ValueError( "`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but" f" got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2`" f" {negative_prompt_embeds_2.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, image, timestep, dtype, device, generator=None, ): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) image = image.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = init_latents * self.vae.config.scaling_factor if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate( "len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False, ) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, strength: float = 0.8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: Optional[int] = 50, timesteps: List[int] = None, sigmas: List[float] = None, guidance_scale: Optional[float] = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_embeds_2: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds_2: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, prompt_attention_mask_2: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask_2: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[ Union[ Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks, ] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = (1024, 1024), target_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), use_resolution_binning: bool = True, map: PipelineImageInput = None, denoising_start: Optional[float] = None, ): r""" The call function to the pipeline for generation with HunyuanDiT. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. height (`int`): The height in pixels of the generated image. width (`int`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. negative_prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. prompt_attention_mask_2 (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds_2` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. negative_prompt_attention_mask_2 (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds_2` is passed directly. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A callback function or a list of callback functions to be called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List[str]`, *optional*): A list of tensor inputs that should be passed to the callback function. If not defined, all tensor inputs will be passed. guidance_rescale (`float`, *optional*, defaults to 0.0): Rescale the noise_cfg according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 original_size (`Tuple[int, int]`, *optional*, defaults to `(1024, 1024)`): The original size of the image. Used to calculate the time ids. target_size (`Tuple[int, int]`, *optional*): The target size of the image. Used to calculate the time ids. crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to `(0, 0)`): The top left coordinates of the crop. Used to calculate the time ids. use_resolution_binning (`bool`, *optional*, defaults to `True`): Whether to use resolution binning or not. If `True`, the input resolution will be mapped to the closest standard resolution. Supported resolutions are 1024x1024, 1280x1280, 1024x768, 1152x864, 1280x960, 768x1024, 864x1152, 960x1280, 1280x768, and 768x1280. It is recommended to set this to `True`. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. default height and width height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor height = int((height // 16) * 16) width = int((width // 16) * 16) if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: width, height = map_to_standard_shapes(width, height) height = int(height) width = int(width) logger.warning(f"Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}") # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=77, text_encoder_index=0, ) ( prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, ) = self.encode_prompt( prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds_2, negative_prompt_embeds=negative_prompt_embeds_2, prompt_attention_mask=prompt_attention_mask_2, negative_prompt_attention_mask=negative_prompt_attention_mask_2, max_sequence_length=256, text_encoder_index=1, ) # 4. Preprocess image init_image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) map = self.mask_processor.preprocess( map, height=height // self.vae_scale_factor, width=width // self.vae_scale_factor, ).to(device) # 5. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) # begin diff diff change total_time_steps = num_inference_steps # end diff diff change timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, init_image, latent_timestep, prompt_embeds.dtype, device, generator, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. create image_rotary_emb, style embedding & time ids grid_height = height // 8 // self.transformer.config.patch_size grid_width = width // 8 // self.transformer.config.patch_size base_size = 512 // 8 // self.transformer.config.patch_size grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) image_rotary_emb = get_2d_rotary_pos_embed( self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width), ) style = torch.tensor([0], device=device) target_size = target_size or (height, width) add_time_ids = list(original_size + target_size + crops_coords_top_left) add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) add_time_ids = torch.cat([add_time_ids] * 2, dim=0) style = torch.cat([style] * 2, dim=0) prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) prompt_embeds_2 = prompt_embeds_2.to(device=device) prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat( batch_size * num_images_per_prompt, 1 ) style = style.to(device=device).repeat(batch_size * num_images_per_prompt) # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order # preparations for diff diff original_with_noise = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, init_image, timesteps, prompt_embeds.dtype, device, generator, ) thresholds = torch.arange(total_time_steps, dtype=map.dtype) / total_time_steps thresholds = thresholds.unsqueeze(1).unsqueeze(1).to(device) masks = map.squeeze() > (thresholds + (denoising_start or 0)) # end diff diff preparations self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # diff diff if i == 0 and denoising_start is None: latents = original_with_noise[:1] else: mask = masks[i].unsqueeze(0).to(latents.dtype) mask = mask.unsqueeze(1) # fit shape latents = original_with_noise[i] * mask + latents * (1 - mask) # end diff diff # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( dtype=latent_model_input.dtype ) # predict the noise residual noise_pred = self.transformer( latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, text_embedding_mask=prompt_attention_mask, encoder_hidden_states_t5=prompt_embeds_2, text_embedding_mask_t5=prompt_attention_mask_2, image_meta_size=add_time_ids, style=style, image_rotary_emb=image_rotary_emb, return_dict=False, )[0] noise_pred, _ = noise_pred.chunk(2, dim=1) # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) prompt_embeds_2 = callback_outputs.pop("prompt_embeds_2", prompt_embeds_2) negative_prompt_embeds_2 = callback_outputs.pop( "negative_prompt_embeds_2", negative_prompt_embeds_2 ) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/pipeline_hunyuandit_differential_img2img.py/0
{ "file_path": "diffusers/examples/community/pipeline_hunyuandit_differential_img2img.py", "repo_id": "diffusers", "token_count": 24586 }
119
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F import torchvision.transforms as T from gmflow.gmflow import GMFlow from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.image_processor import VaeImageProcessor from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from diffusers.models.attention_processor import Attention, AttnProcessor from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.controlnet.pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import BaseOutput, deprecate, logging from diffusers.utils.torch_utils import is_compiled_module, randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def coords_grid(b, h, w, homogeneous=False, device=None): y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W] stacks = [x, y] if homogeneous: ones = torch.ones_like(x) # [H, W] stacks.append(ones) grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W] grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W] if device is not None: grid = grid.to(device) return grid def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False): # img: [B, C, H, W] # sample_coords: [B, 2, H, W] in image scale if sample_coords.size(1) != 2: # [B, H, W, 2] sample_coords = sample_coords.permute(0, 3, 1, 2) b, _, h, w = sample_coords.shape # Normalize to [-1, 1] x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1 y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1 grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2] img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True) if return_mask: mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W] return img, mask return img def flow_warp(feature, flow, mask=False, mode="bilinear", padding_mode="zeros"): b, c, h, w = feature.size() assert flow.size(1) == 2 grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W] grid = grid.to(feature.dtype) return bilinear_sample(feature, grid, mode=mode, padding_mode=padding_mode, return_mask=mask) def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5): # fwd_flow, bwd_flow: [B, 2, H, W] # alpha and beta values are following UnFlow # (https://arxiv.org/abs/1711.07837) assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4 assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2 flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W] warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W] warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W] diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W] diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1) threshold = alpha * flow_mag + beta fwd_occ = (diff_fwd > threshold).float() # [B, H, W] bwd_occ = (diff_bwd > threshold).float() return fwd_occ, bwd_occ @torch.no_grad() def get_warped_and_mask(flow_model, image1, image2, image3=None, pixel_consistency=False, device=None): if image3 is None: image3 = image1 padder = InputPadder(image1.shape, padding_factor=8) image1, image2 = padder.pad(image1[None].to(device), image2[None].to(device)) results_dict = flow_model( image1, image2, attn_splits_list=[2], corr_radius_list=[-1], prop_radius_list=[-1], pred_bidir_flow=True ) flow_pr = results_dict["flow_preds"][-1] # [B, 2, H, W] fwd_flow = padder.unpad(flow_pr[0]).unsqueeze(0) # [1, 2, H, W] bwd_flow = padder.unpad(flow_pr[1]).unsqueeze(0) # [1, 2, H, W] fwd_occ, bwd_occ = forward_backward_consistency_check(fwd_flow, bwd_flow) # [1, H, W] float if pixel_consistency: warped_image1 = flow_warp(image1, bwd_flow) bwd_occ = torch.clamp( bwd_occ + (abs(image2 - warped_image1).mean(dim=1) > 255 * 0.25).float(), 0, 1 ).unsqueeze(0) warped_results = flow_warp(image3, bwd_flow) return warped_results, bwd_occ, bwd_flow blur = T.GaussianBlur(kernel_size=(9, 9), sigma=(18, 18)) @dataclass class TextToVideoSDPipelineOutput(BaseOutput): """ Output class for text-to-video pipelines. Args: frames (`List[np.ndarray]` or `torch.Tensor`) List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as a `torch` tensor. The length of the list denotes the video length (the number of frames). """ frames: Union[List[np.ndarray], torch.Tensor] @torch.no_grad() def find_flat_region(mask): device = mask.device kernel_x = torch.Tensor([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]).unsqueeze(0).unsqueeze(0).to(device) kernel_y = torch.Tensor([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]).unsqueeze(0).unsqueeze(0).to(device) mask_ = F.pad(mask.unsqueeze(0), (1, 1, 1, 1), mode="replicate") grad_x = torch.nn.functional.conv2d(mask_, kernel_x) grad_y = torch.nn.functional.conv2d(mask_, kernel_y) return ((abs(grad_x) + abs(grad_y)) == 0).float()[0] class AttnState: STORE = 0 LOAD = 1 LOAD_AND_STORE_PREV = 2 def __init__(self): self.reset() @property def state(self): return self.__state @property def timestep(self): return self.__timestep def set_timestep(self, t): self.__timestep = t def reset(self): self.__state = AttnState.STORE self.__timestep = 0 def to_load(self): self.__state = AttnState.LOAD def to_load_and_store_prev(self): self.__state = AttnState.LOAD_AND_STORE_PREV class CrossFrameAttnProcessor(AttnProcessor): """ Cross frame attention processor. Each frame attends the first frame and previous frame. Args: attn_state: Whether the model is processing the first frame or an intermediate frame """ def __init__(self, attn_state: AttnState): super().__init__() self.attn_state = attn_state self.first_maps = {} self.prev_maps = {} def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): # Is self attention if encoder_hidden_states is None: t = self.attn_state.timestep if self.attn_state.state == AttnState.STORE: self.first_maps[t] = hidden_states.detach() self.prev_maps[t] = hidden_states.detach() res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb) else: if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV: tmp = hidden_states.detach() cross_map = torch.cat((self.first_maps[t], self.prev_maps[t]), dim=1) res = super().__call__(attn, hidden_states, cross_map, attention_mask, temb) if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV: self.prev_maps[t] = tmp else: res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb) return res def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline): r""" Pipeline for video-to-video translation using Stable Diffusion with Rerender Algorithm. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) In addition the pipeline inherits the following loading methods: - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets as a list, the outputs from each ControlNet are added together to create one combined additional conditioning. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder=None, requires_safety_checker: bool = True, device=None, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, image_encoder, requires_safety_checker, ) self.to(device) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.register_to_config(requires_safety_checker=requires_safety_checker) self.attn_state = AttnState() attn_processor_dict = {} for k in unet.attn_processors.keys(): if k.startswith("up"): attn_processor_dict[k] = CrossFrameAttnProcessor(self.attn_state) else: attn_processor_dict[k] = AttnProcessor() self.unet.set_attn_processor(attn_processor_dict) flow_model = GMFlow( feature_channels=128, num_scales=1, upsample_factor=8, num_head=1, attention_type="swin", ffn_dim_expansion=4, num_transformer_layers=6, ).to(self.device) checkpoint = torch.utils.model_zoo.load_url( "https://huggingface.co/Anonymous-sub/Rerender/resolve/main/models/gmflow_sintel-0c07dcb3.pth", map_location=lambda storage, loc: storage, ) weights = checkpoint["model"] if "model" in checkpoint else checkpoint flow_model.load_state_dict(weights, strict=False) flow_model.eval() self.flow_model = flow_model # Modified from src/diffusers/pipelines/controlnet/pipeline_controlnet.StableDiffusionControlNetImg2ImgPipeline.check_inputs def check_inputs( self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # `prompt` needs more sophisticated handling when there are multiple # conditionings. if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning( f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" " prompts. The conditionings will be fixed across the prompts." ) is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.sample(generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, frames: Union[List[np.ndarray], torch.Tensor] = None, control_frames: Union[List[np.ndarray], torch.Tensor] = None, strength: float = 0.8, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 0.8, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, warp_start: Union[float, List[float]] = 0.0, warp_end: Union[float, List[float]] = 0.3, mask_start: Union[float, List[float]] = 0.5, mask_end: Union[float, List[float]] = 0.8, smooth_boundary: bool = True, mask_strength: Union[float, List[float]] = 0.5, inner_strength: Union[float, List[float]] = 0.9, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. frames (`List[np.ndarray]` or `torch.Tensor`): The input images to be used as the starting point for the image generation process. control_frames (`List[np.ndarray]` or `torch.Tensor`): The ControlNet input images condition to provide guidance to the `unet` for generation. strength ('float'): SDEdit strength. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. If multiple ControlNets are specified in init, you can set the corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting than for [`~StableDiffusionControlNetPipeline.__call__`]. guess_mode (`bool`, *optional*, defaults to `False`): In this mode, the ControlNet encoder will try best to recognize the content of the input image even if you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the controlnet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the controlnet stops applying. warp_start (`float`): Shape-aware fusion start timestep. warp_end (`float`): Shape-aware fusion end timestep. mask_start (`float`): Pixel-aware fusion start timestep. mask_end (`float`):Pixel-aware fusion end timestep. smooth_boundary (`bool`): Smooth fusion boundary. Set `True` to prevent artifacts at boundary. mask_strength (`float`): Pixel-aware fusion strength. inner_strength (`float`): Pixel-aware fusion detail level. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters # Currently we only support 1 prompt if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): assert False else: assert False num_images_per_prompt = 1 device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Process the first frame height, width = None, None output_frames = [] self.attn_state.reset() # 4.1 prepare frames image = self.image_processor.preprocess(frames[0]).to(dtype=torch.float32) first_image = image[0] # C, H, W # 4.2 Prepare controlnet_conditioning_image # Currently we only support single control if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_frames[0], width=width, height=height, batch_size=batch_size, num_images_per_prompt=1, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) else: assert False # 4.3 Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size) # 4.4 Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, ) # 4.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 4.6 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) first_x0_list = [] # 4.7 Denoising loop num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order with self.progress_bar(total=cur_num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): self.attn_state.set_timestep(t.item()) # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # controlnet(s) inference if guess_mode and do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) if guess_mode and do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) alpha_prod_t = self.scheduler.alphas_cumprod[t] beta_prod_t = 1 - alpha_prod_t pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) first_x0 = pred_x0.detach() first_x0_list.append(first_x0) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents first_result = image prev_result = image do_denormalize = [True] * image.shape[0] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) output_frames.append(image[0]) # 5. Process each frame for idx in range(1, len(frames)): image = frames[idx] prev_image = frames[idx - 1] control_image = control_frames[idx] # 5.1 prepare frames image = self.image_processor.preprocess(image).to(dtype=torch.float32) prev_image = self.image_processor.preprocess(prev_image).to(dtype=torch.float32) warped_0, bwd_occ_0, bwd_flow_0 = get_warped_and_mask( self.flow_model, first_image, image[0], first_result, False, self.device ) blend_mask_0 = blur(F.max_pool2d(bwd_occ_0, kernel_size=9, stride=1, padding=4)) blend_mask_0 = torch.clamp(blend_mask_0 + bwd_occ_0, 0, 1) warped_pre, bwd_occ_pre, bwd_flow_pre = get_warped_and_mask( self.flow_model, prev_image[0], image[0], prev_result, False, self.device ) blend_mask_pre = blur(F.max_pool2d(bwd_occ_pre, kernel_size=9, stride=1, padding=4)) blend_mask_pre = torch.clamp(blend_mask_pre + bwd_occ_pre, 0, 1) warp_mask = 1 - F.max_pool2d(blend_mask_0, kernel_size=8) warp_flow = F.interpolate(bwd_flow_0 / 8.0, scale_factor=1.0 / 8, mode="bilinear") # 5.2 Prepare controlnet_conditioning_image # Currently we only support single control if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size, num_images_per_prompt=1, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) else: assert False # 5.3 Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size) skip_t = int(num_inference_steps * (1 - strength)) warp_start_t = int(warp_start * num_inference_steps) warp_end_t = int(warp_end * num_inference_steps) mask_start_t = int(mask_start * num_inference_steps) mask_end_t = int(mask_end * num_inference_steps) # 5.4 Prepare latent variables init_latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, ) # 5.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 5.6 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 5.7 Denoising loop num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order def denoising_loop(latents, mask=None, xtrg=None, noise_rescale=None): dir_xt = 0 latents_dtype = latents.dtype with self.progress_bar(total=cur_num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): self.attn_state.set_timestep(t.item()) if i + skip_t >= mask_start_t and i + skip_t <= mask_end_t and xtrg is not None: rescale = torch.maximum(1.0 - mask, (1 - mask**2) ** 0.5 * inner_strength) if noise_rescale is not None: rescale = (1.0 - mask) * (1 - noise_rescale) + rescale * noise_rescale noise = randn_tensor(xtrg.shape, generator=generator, device=device, dtype=xtrg.dtype) latents_ref = self.scheduler.add_noise(xtrg, noise, t) latents = latents_ref * mask + (1.0 - mask) * (latents - dir_xt) + rescale * dir_xt latents = latents.to(latents_dtype) # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # controlnet(s) inference if guess_mode and do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) if guess_mode and do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [ torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples ] mid_block_res_sample = torch.cat( [torch.zeros_like(mid_block_res_sample), mid_block_res_sample] ) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # Get pred_x0 from scheduler alpha_prod_t = self.scheduler.alphas_cumprod[t] beta_prod_t = 1 - alpha_prod_t pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) if i + skip_t >= warp_start_t and i + skip_t <= warp_end_t: # warp x_0 pred_x0 = ( flow_warp(first_x0_list[i], warp_flow, mode="nearest") * warp_mask + (1 - warp_mask) * pred_x0 ) # get x_t from x_0 latents = self.scheduler.add_noise(pred_x0, noise_pred, t).to(latents_dtype) prev_t = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps if i == len(timesteps) - 1: alpha_t_prev = 1.0 else: alpha_t_prev = self.scheduler.alphas_cumprod[prev_t] dir_xt = (1.0 - alpha_t_prev) ** 0.5 * noise_pred # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[ 0 ] # call the callback, if provided if i == len(timesteps) - 1 or ( (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 ): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) return latents if mask_start_t <= mask_end_t: self.attn_state.to_load() else: self.attn_state.to_load_and_store_prev() latents = denoising_loop(init_latents) if mask_start_t <= mask_end_t: direct_result = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] blend_results = (1 - blend_mask_pre) * warped_pre + blend_mask_pre * direct_result blend_results = (1 - blend_mask_0) * warped_0 + blend_mask_0 * blend_results bwd_occ = 1 - torch.clamp(1 - bwd_occ_pre + 1 - bwd_occ_0, 0, 1) blend_mask = blur(F.max_pool2d(bwd_occ, kernel_size=9, stride=1, padding=4)) blend_mask = 1 - torch.clamp(blend_mask + bwd_occ, 0, 1) blend_results = blend_results.to(latents.dtype) xtrg = self.vae.encode(blend_results).latent_dist.sample(generator) xtrg = self.vae.config.scaling_factor * xtrg blend_results_rec = self.vae.decode(xtrg / self.vae.config.scaling_factor, return_dict=False)[0] xtrg_rec = self.vae.encode(blend_results_rec).latent_dist.sample(generator) xtrg_rec = self.vae.config.scaling_factor * xtrg_rec xtrg_ = xtrg + (xtrg - xtrg_rec) blend_results_rec_new = self.vae.decode(xtrg_ / self.vae.config.scaling_factor, return_dict=False)[0] tmp = (abs(blend_results_rec_new - blend_results).mean(dim=1, keepdims=True) > 0.25).float() mask_x = F.max_pool2d( (F.interpolate(tmp, scale_factor=1 / 8.0, mode="bilinear") > 0).float(), kernel_size=3, stride=1, padding=1, ) mask = 1 - F.max_pool2d(1 - blend_mask, kernel_size=8) # * (1-mask_x) if smooth_boundary: noise_rescale = find_flat_region(mask) else: noise_rescale = torch.ones_like(mask) xtrg = (xtrg + (1 - mask_x) * (xtrg - xtrg_rec)) * mask xtrg = xtrg.to(latents.dtype) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) self.attn_state.to_load_and_store_prev() latents = denoising_loop(init_latents, mask * mask_strength, xtrg, noise_rescale) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents prev_result = image do_denormalize = [True] * image.shape[0] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) output_frames.append(image[0]) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return output_frames return TextToVideoSDPipelineOutput(frames=output_frames) class InputPadder: """Pads images such that dimensions are divisible by 8""" def __init__(self, dims, mode="sintel", padding_factor=8): self.ht, self.wd = dims[-2:] pad_ht = (((self.ht // padding_factor) + 1) * padding_factor - self.ht) % padding_factor pad_wd = (((self.wd // padding_factor) + 1) * padding_factor - self.wd) % padding_factor if mode == "sintel": self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2, pad_ht - pad_ht // 2] else: self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht] def pad(self, *inputs): return [F.pad(x, self._pad, mode="replicate") for x in inputs] def unpad(self, x): ht, wd = x.shape[-2:] c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]] return x[..., c[0] : c[1], c[2] : c[3]]
diffusers/examples/community/rerender_a_video.py/0
{ "file_path": "diffusers/examples/community/rerender_a_video.py", "repo_id": "diffusers", "token_count": 27065 }
120
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel from diffusers.configuration_utils import FrozenDict, deprecate from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( logging, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def prepare_mask_and_masked_image(image, mask): """ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the ``image`` and ``1`` for the ``mask``. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. Args: image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. mask (_type_): The mask to apply to the image, i.e. regions to inpaint. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. Raises: ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not (ot the other way around). Returns: tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. """ if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) # Batch and add channel dim for single mask if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) # Batch single mask or add channel dim if mask.ndim == 3: # Single batched mask, no channel dim or single mask not batched but channel dim if mask.shape[0] == 1: mask = mask.unsqueeze(0) # Batched masks no channel dim else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") # Check mask is in [0, 1] if mask.min() < 0 or mask.max() > 1: raise ValueError("Mask should be in [0, 1] range") # Binarize mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 # Image as float32 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 # preprocess mask if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) # masked_image = image * (mask >= 0.5) masked_image = image return mask, masked_image class StableDiffusionRepaintPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin ): r""" Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) In addition the pipeline inherits the following loading methods: - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] - *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] as well as the following saving methods: - *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate( "skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False, ) new_config = dict(scheduler.config) new_config["skip_prk_steps"] = True scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4 if unet.config.in_channels != 4: logger.warning( f"You have loaded a UNet with {unet.config.in_channels} input channels, whereas by default," f" {self.__class__} assumes that `pipeline.unet` has 4 input channels: 4 for `num_channels_latents`," ". If you did not intend to modify" " this behavior, please check whether you have loaded the right checkpoint." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance, ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) masked_image = masked_image.to(device=device, dtype=dtype) # encode the mask image into latents space so we can concatenate it to the latents if isinstance(generator, list): masked_image_latents = [ self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i]) for i in range(batch_size) ] masked_image_latents = torch.cat(masked_image_latents, dim=0) else: masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator) masked_image_latents = self.vae.config.scaling_factor * masked_image_latents # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, mask_image: Union[torch.Tensor, PIL.Image.Image] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, jump_length: Optional[int] = 10, jump_n_sample: Optional[int] = 10, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. mask_image (`PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. jump_length (`int`, *optional*, defaults to 10): The number of steps taken forward in time before going backward in time for a single jump ("j" in RePaint paper). Take a look at Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf. jump_n_sample (`int`, *optional*, defaults to 10): The number of times we will make forward time jump for a given chosen time sample. Take a look at Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Examples: ```py >>> import PIL >>> import requests >>> import torch >>> from io import BytesIO >>> from diffusers import StableDiffusionPipeline, RePaintScheduler >>> def download_image(url): ... response = requests.get(url) ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") >>> base_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/" >>> img_url = base_url + "overture-creations-5sI6fQgYIuo.png" >>> mask_url = base_url + "overture-creations-5sI6fQgYIuo_mask.png " >>> init_image = download_image(img_url).resize((512, 512)) >>> mask_image = download_image(mask_url).resize((512, 512)) >>> pipe = DiffusionPipeline.from_pretrained( ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, custom_pipeline="stable_diffusion_repaint", ... ) >>> pipe.scheduler = RePaintScheduler.from_config(pipe.scheduler.config) >>> pipe = pipe.to("cuda") >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) if image is None: raise ValueError("`image` input cannot be undefined.") if mask_image is None: raise ValueError("`mask_image` input cannot be undefined.") # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Preprocess mask and image mask, masked_image = prepare_mask_and_masked_image(image, mask_image) # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, device) self.scheduler.eta = eta timesteps = self.scheduler.timesteps # latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7. Prepare mask latent variables mask, masked_image_latents = self.prepare_mask_latents( mask, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, do_classifier_free_guidance=False, # We do not need duplicate mask and image ) # 8. Check that sizes of mask, masked image and latents match # num_channels_mask = mask.shape[1] # num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} " f" = Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) t_last = timesteps[0] + 1 # 10. Denoising loop with self.progress_bar(total=len(timesteps)) as progress_bar: for i, t in enumerate(timesteps): if t >= t_last: # compute the reverse: x_t-1 -> x_t latents = self.scheduler.undo_step(latents, t_last, generator) progress_bar.update() t_last = t continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, masked_image_latents, mask, **extra_step_kwargs, ).prev_sample # call the callback, if provided progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) t_last = t # 11. Post-processing image = self.decode_latents(latents) # 12. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # 13. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/stable_diffusion_repaint.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_repaint.py", "repo_id": "diffusers", "token_count": 19599 }
121
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import functools import gc import itertools import json import logging import math import os import random import shutil from contextlib import nullcontext from pathlib import Path from typing import List, Union import accelerate import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms.functional as TF import transformers import webdataset as wds from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from braceexpand import braceexpand from huggingface_hub import create_repo, upload_folder from packaging import version from peft import LoraConfig, get_peft_model, get_peft_model_state_dict from torch.utils.data import default_collate from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, CLIPTextModel, PretrainedConfig from webdataset.tariterators import ( base_plus_ext, tar_file_expander, url_opener, valid_sample, ) import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, LCMScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import resolve_interpolation_mode from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available MAX_SEQ_LENGTH = 77 if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.31.0.dev0") logger = get_logger(__name__) def get_module_kohya_state_dict(module, prefix: str, dtype: torch.dtype, adapter_name: str = "default"): kohya_ss_state_dict = {} for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items(): kohya_key = peft_key.replace("base_model.model", prefix) kohya_key = kohya_key.replace("lora_A", "lora_down") kohya_key = kohya_key.replace("lora_B", "lora_up") kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) kohya_ss_state_dict[kohya_key] = weight.to(dtype) # Set alpha parameter if "lora_down" in kohya_key: alpha_key = f'{kohya_key.split(".")[0]}.alpha' kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype) return kohya_ss_state_dict def filter_keys(key_set): def _f(dictionary): return {k: v for k, v in dictionary.items() if k in key_set} return _f def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None): """Return function over iterator that groups key, value pairs into samples. :param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to lower case (Default value = True) """ current_sample = None for filesample in data: assert isinstance(filesample, dict) fname, value = filesample["fname"], filesample["data"] prefix, suffix = keys(fname) if prefix is None: continue if lcase: suffix = suffix.lower() # FIXME webdataset version throws if suffix in current_sample, but we have a potential for # this happening in the current LAION400m dataset if a tar ends with same prefix as the next # begins, rare, but can happen since prefix aren't unique across tar files in that dataset if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample: if valid_sample(current_sample): yield current_sample current_sample = {"__key__": prefix, "__url__": filesample["__url__"]} if suffixes is None or suffix in suffixes: current_sample[suffix] = value if valid_sample(current_sample): yield current_sample def tarfile_to_samples_nothrow(src, handler=wds.warn_and_continue): # NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw streams = url_opener(src, handler=handler) files = tar_file_expander(streams, handler=handler) samples = group_by_keys_nothrow(files, handler=handler) return samples class WebdatasetFilter: def __init__(self, min_size=1024, max_pwatermark=0.5): self.min_size = min_size self.max_pwatermark = max_pwatermark def __call__(self, x): try: if "json" in x: x_json = json.loads(x["json"]) filter_size = (x_json.get("original_width", 0.0) or 0.0) >= self.min_size and x_json.get( "original_height", 0 ) >= self.min_size filter_watermark = (x_json.get("pwatermark", 1.0) or 1.0) <= self.max_pwatermark return filter_size and filter_watermark else: return False except Exception: return False class SDText2ImageDataset: def __init__( self, train_shards_path_or_url: Union[str, List[str]], num_train_examples: int, per_gpu_batch_size: int, global_batch_size: int, num_workers: int, resolution: int = 512, interpolation_type: str = "bilinear", shuffle_buffer_size: int = 1000, pin_memory: bool = False, persistent_workers: bool = False, ): if not isinstance(train_shards_path_or_url, str): train_shards_path_or_url = [list(braceexpand(urls)) for urls in train_shards_path_or_url] # flatten list using itertools train_shards_path_or_url = list(itertools.chain.from_iterable(train_shards_path_or_url)) interpolation_mode = resolve_interpolation_mode(interpolation_type) def transform(example): # resize image image = example["image"] image = TF.resize(image, resolution, interpolation=interpolation_mode) # get crop coordinates and crop image c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(resolution, resolution)) image = TF.crop(image, c_top, c_left, resolution, resolution) image = TF.to_tensor(image) image = TF.normalize(image, [0.5], [0.5]) example["image"] = image return example processing_pipeline = [ wds.decode("pil", handler=wds.ignore_and_continue), wds.rename(image="jpg;png;jpeg;webp", text="text;txt;caption", handler=wds.warn_and_continue), wds.map(filter_keys({"image", "text"})), wds.map(transform), wds.to_tuple("image", "text"), ] # Create train dataset and loader pipeline = [ wds.ResampledShards(train_shards_path_or_url), tarfile_to_samples_nothrow, wds.shuffle(shuffle_buffer_size), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate), ] num_worker_batches = math.ceil(num_train_examples / (global_batch_size * num_workers)) # per dataloader worker num_batches = num_worker_batches * num_workers num_samples = num_batches * global_batch_size # each worker is iterating over this self._train_dataset = wds.DataPipeline(*pipeline).with_epoch(num_worker_batches) self._train_dataloader = wds.WebLoader( self._train_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers, ) # add meta-data to dataloader instance for convenience self._train_dataloader.num_batches = num_batches self._train_dataloader.num_samples = num_samples @property def train_dataset(self): return self._train_dataset @property def train_dataloader(self): return self._train_dataloader def log_validation(vae, unet, args, accelerator, weight_dtype, step): logger.info("Running validation... ") if torch.backends.mps.is_available(): autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type, dtype=weight_dtype) unet = accelerator.unwrap_model(unet) pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_teacher_model, vae=vae, scheduler=LCMScheduler.from_pretrained(args.pretrained_teacher_model, subfolder="scheduler"), revision=args.revision, torch_dtype=weight_dtype, safety_checker=None, ) pipeline.set_progress_bar_config(disable=True) lora_state_dict = get_module_kohya_state_dict(unet, "lora_unet", weight_dtype) pipeline.load_lora_weights(lora_state_dict) pipeline.fuse_lora() pipeline = pipeline.to(accelerator.device, dtype=weight_dtype) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) validation_prompts = [ "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography", "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k", "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece", ] image_logs = [] for _, prompt in enumerate(validation_prompts): images = [] with autocast_ctx: images = pipeline( prompt=prompt, num_inference_steps=4, num_images_per_prompt=4, generator=generator, guidance_scale=1.0, ).images image_logs.append({"validation_prompt": prompt, "images": images}) for tracker in accelerator.trackers: if tracker.name == "tensorboard": for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] formatted_images = [] for image in images: formatted_images.append(np.asarray(image)) formatted_images = np.stack(formatted_images) tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") elif tracker.name == "wandb": formatted_images = [] for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] for image in images: image = wandb.Image(image, caption=validation_prompt) formatted_images.append(image) tracker.log({"validation": formatted_images}) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() torch.cuda.empty_cache() return image_logs # From LatentConsistencyModel.get_guidance_scale_embedding def guidance_scale_embedding(w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): scaled_timestep = timestep_scaling * timestep c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2) c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5 return c_skip, c_out # Compare LCMScheduler.step, Step 4 def get_predicted_original_sample(model_output, timesteps, sample, prediction_type, alphas, sigmas): alphas = extract_into_tensor(alphas, timesteps, sample.shape) sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) if prediction_type == "epsilon": pred_x_0 = (sample - sigmas * model_output) / alphas elif prediction_type == "sample": pred_x_0 = model_output elif prediction_type == "v_prediction": pred_x_0 = alphas * sample - sigmas * model_output else: raise ValueError( f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`" f" are supported." ) return pred_x_0 # Based on step 4 in DDIMScheduler.step def get_predicted_noise(model_output, timesteps, sample, prediction_type, alphas, sigmas): alphas = extract_into_tensor(alphas, timesteps, sample.shape) sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) if prediction_type == "epsilon": pred_epsilon = model_output elif prediction_type == "sample": pred_epsilon = (sample - alphas * model_output) / sigmas elif prediction_type == "v_prediction": pred_epsilon = alphas * model_output + sigmas * sample else: raise ValueError( f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`" f" are supported." ) return pred_epsilon def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def update_ema(target_params, source_params, rate=0.99): """ Update target parameters to be closer to those of source parameters using an exponential moving average. :param target_params: the target parameter sequence. :param source_params: the source parameter sequence. :param rate: the EMA rate (closer to 1 means slower). """ for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src, alpha=1 - rate) def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") # ----------Model Checkpoint Loading Arguments---------- parser.add_argument( "--pretrained_teacher_model", type=str, default=None, required=True, help="Path to pretrained LDM teacher model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--teacher_revision", type=str, default=None, required=False, help="Revision of pretrained LDM teacher model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained LDM model identifier from huggingface.co/models.", ) # ----------Training Arguments---------- # ----General Training Arguments---- parser.add_argument( "--output_dir", type=str, default="lcm-xl-distilled", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") # ----Logging---- parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) # ----Checkpointing---- parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) # ----Image Processing---- parser.add_argument( "--train_shards_path_or_url", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--interpolation_type", type=str, default="bilinear", help=( "The interpolation function used when resizing images to the desired resolution. Choose between `bilinear`," " `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) # ----Dataloader---- parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) # ----Batch Size and Training Steps---- parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) # ----Learning Rate---- parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) # ----Optimizer (Adam)---- parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") # ----Diffusion Training Arguments---- parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) # ----Latent Consistency Distillation (LCD) Specific Arguments---- parser.add_argument( "--w_min", type=float, default=5.0, required=False, help=( "The minimum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG" " formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as" " compared to the original paper." ), ) parser.add_argument( "--w_max", type=float, default=15.0, required=False, help=( "The maximum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG" " formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as" " compared to the original paper." ), ) parser.add_argument( "--num_ddim_timesteps", type=int, default=50, help="The number of timesteps to use for DDIM sampling.", ) parser.add_argument( "--loss_type", type=str, default="l2", choices=["l2", "huber"], help="The type of loss to use for the LCD loss.", ) parser.add_argument( "--huber_c", type=float, default=0.001, help="The huber loss parameter. Only used if `--loss_type=huber`.", ) parser.add_argument( "--lora_rank", type=int, default=64, help="The rank of the LoRA projection matrix.", ) parser.add_argument( "--lora_alpha", type=int, default=64, help=( "The value of the LoRA alpha parameter, which controls the scaling factor in front of the LoRA weight" " update delta_W. No scaling will be performed if this value is equal to `lora_rank`." ), ) parser.add_argument( "--lora_dropout", type=float, default=0.0, help="The dropout probability for the dropout layer added before applying the LoRA to each layer input.", ) parser.add_argument( "--lora_target_modules", type=str, default=None, help=( "A comma-separated string of target module keys to add LoRA to. If not set, a default list of modules will" " be used. By default, LoRA will be applied to all conv and linear layers." ), ) parser.add_argument( "--vae_encode_batch_size", type=int, default=32, required=False, help=( "The batch size used when encoding (and decoding) images to latents (and vice versa) using the VAE." " Encoding or decoding the whole batch at once may run into OOM issues." ), ) parser.add_argument( "--timestep_scaling_factor", type=float, default=10.0, help=( "The multiplicative timestep scaling factor used when calculating the boundary scalings for LCM. The" " higher the scaling is, the lower the approximation error, but the default value of 10.0 should typically" " suffice." ), ) # ----Mixed Precision---- parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--cast_teacher_unet", action="store_true", help="Whether to cast the teacher U-Net to the precision specified by `--mixed_precision`.", ) # ----Training Optimizations---- parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) # ----Distributed Training---- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") # ----------Validation Arguments---------- parser.add_argument( "--validation_steps", type=int, default=200, help="Run validation every X steps.", ) # ----------Huggingface Hub Arguments----------- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) # ----------Accelerate Arguments---------- parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") return args # Adapted from pipelines.StableDiffusionPipeline.encode_prompt def encode_prompt(prompt_batch, text_encoder, tokenizer, proportion_empty_prompts, is_train=True): captions = [] for caption in prompt_batch: if random.random() < proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) with torch.no_grad(): text_inputs = tokenizer( captions, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder(text_input_ids.to(text_encoder.device))[0] return prompt_embeds def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, split_batches=True, # It's important to set this to True when using webdataset to get the right number of steps for lr scheduling. If set to False, the number of steps will be devide by the number of processes assuming batches are multiplied by the number of processes ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token, private=True, ).repo_id # 1. Create the noise scheduler and the desired noise schedule. noise_scheduler = DDPMScheduler.from_pretrained( args.pretrained_teacher_model, subfolder="scheduler", revision=args.teacher_revision ) # DDPMScheduler calculates the alpha and sigma noise schedules (based on the alpha bars) for us alpha_schedule = torch.sqrt(noise_scheduler.alphas_cumprod) sigma_schedule = torch.sqrt(1 - noise_scheduler.alphas_cumprod) # Initialize the DDIM ODE solver for distillation. solver = DDIMSolver( noise_scheduler.alphas_cumprod.numpy(), timesteps=noise_scheduler.config.num_train_timesteps, ddim_timesteps=args.num_ddim_timesteps, ) # 2. Load tokenizers from SD 1.X/2.X checkpoint. tokenizer = AutoTokenizer.from_pretrained( args.pretrained_teacher_model, subfolder="tokenizer", revision=args.teacher_revision, use_fast=False ) # 3. Load text encoders from SD 1.X/2.X checkpoint. # import correct text encoder classes text_encoder = CLIPTextModel.from_pretrained( args.pretrained_teacher_model, subfolder="text_encoder", revision=args.teacher_revision ) # 4. Load VAE from SD 1.X/2.X checkpoint vae = AutoencoderKL.from_pretrained( args.pretrained_teacher_model, subfolder="vae", revision=args.teacher_revision, ) # 5. Load teacher U-Net from SD 1.X/2.X checkpoint teacher_unet = UNet2DConditionModel.from_pretrained( args.pretrained_teacher_model, subfolder="unet", revision=args.teacher_revision ) # 6. Freeze teacher vae, text_encoder, and teacher_unet vae.requires_grad_(False) text_encoder.requires_grad_(False) teacher_unet.requires_grad_(False) # 7. Create online student U-Net. unet = UNet2DConditionModel.from_pretrained( args.pretrained_teacher_model, subfolder="unet", revision=args.teacher_revision ) unet.train() # Check that all trainable models are in full precision low_precision_error_string = ( " Please make sure to always have all model weights in full float32 precision when starting training - even if" " doing mixed precision training, copy of the weights should still be float32." ) if accelerator.unwrap_model(unet).dtype != torch.float32: raise ValueError( f"Controlnet loaded as datatype {accelerator.unwrap_model(unet).dtype}. {low_precision_error_string}" ) # 8. Add LoRA to the student U-Net, only the LoRA projection matrix will be updated by the optimizer. if args.lora_target_modules is not None: lora_target_modules = [module_key.strip() for module_key in args.lora_target_modules.split(",")] else: lora_target_modules = [ "to_q", "to_k", "to_v", "to_out.0", "proj_in", "proj_out", "ff.net.0.proj", "ff.net.2", "conv1", "conv2", "conv_shortcut", "downsamplers.0.conv", "upsamplers.0.conv", "time_emb_proj", ] lora_config = LoraConfig( r=args.lora_rank, target_modules=lora_target_modules, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, ) unet = get_peft_model(unet, lora_config) # 9. Handle mixed precision and device placement # For mixed precision training we cast all non-trainable weigths to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. vae.to(accelerator.device) if args.pretrained_vae_model_name_or_path is not None: vae.to(dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) # Move teacher_unet to device, optionally cast to weight_dtype teacher_unet.to(accelerator.device) if args.cast_teacher_unet: teacher_unet.to(dtype=weight_dtype) # Also move the alpha and sigma noise schedules to accelerator.device. alpha_schedule = alpha_schedule.to(accelerator.device) sigma_schedule = sigma_schedule.to(accelerator.device) # Move the ODE solver to accelerator.device. solver = solver.to(accelerator.device) # 10. Handle saving and loading of checkpoints # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: unet_ = accelerator.unwrap_model(unet) lora_state_dict = get_peft_model_state_dict(unet_, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(output_dir, "unet_lora"), lora_state_dict) # save weights in peft format to be able to load them back unet_.save_pretrained(output_dir) for _, model in enumerate(models): # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): # load the LoRA into the model unet_ = accelerator.unwrap_model(unet) unet_.load_adapter(input_dir, "default", is_trainable=True) for _ in range(len(models)): # pop models so that they are not loaded again models.pop() accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # 11. Enable optimizations if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() teacher_unet.enable_xformers_memory_efficient_attention() # target_unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # 12. Optimizer creation optimizer = optimizer_class( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # 13. Dataset creation and data processing # Here, we compute not just the text embeddings but also the additional embeddings # needed for the SD XL UNet to operate. def compute_embeddings(prompt_batch, proportion_empty_prompts, text_encoder, tokenizer, is_train=True): prompt_embeds = encode_prompt(prompt_batch, text_encoder, tokenizer, proportion_empty_prompts, is_train) return {"prompt_embeds": prompt_embeds} dataset = SDText2ImageDataset( train_shards_path_or_url=args.train_shards_path_or_url, num_train_examples=args.max_train_samples, per_gpu_batch_size=args.train_batch_size, global_batch_size=args.train_batch_size * accelerator.num_processes, num_workers=args.dataloader_num_workers, resolution=args.resolution, interpolation_type=args.interpolation_type, shuffle_buffer_size=1000, pin_memory=True, persistent_workers=True, ) train_dataloader = dataset.train_dataloader compute_embeddings_fn = functools.partial( compute_embeddings, proportion_empty_prompts=0, text_encoder=text_encoder, tokenizer=tokenizer, ) # 14. LR Scheduler creation # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps, ) # 15. Prepare for training # Prepare everything with our `accelerator`. unet, optimizer, lr_scheduler = accelerator.prepare(unet, optimizer, lr_scheduler) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) accelerator.init_trackers(args.tracker_project_name, config=tracker_config) uncond_input_ids = tokenizer( [""] * args.train_batch_size, return_tensors="pt", padding="max_length", max_length=77 ).input_ids.to(accelerator.device) uncond_prompt_embeds = text_encoder(uncond_input_ids)[0] if torch.backends.mps.is_available(): autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type) # 16. Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num batches each epoch = {train_dataloader.num_batches}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # 1. Load and process the image and text conditioning image, text = batch image = image.to(accelerator.device, non_blocking=True) encoded_text = compute_embeddings_fn(text) pixel_values = image.to(dtype=weight_dtype) if vae.dtype != weight_dtype: vae.to(dtype=weight_dtype) # encode pixel values with batch size of at most args.vae_encode_batch_size latents = [] for i in range(0, pixel_values.shape[0], args.vae_encode_batch_size): latents.append(vae.encode(pixel_values[i : i + args.vae_encode_batch_size]).latent_dist.sample()) latents = torch.cat(latents, dim=0) latents = latents * vae.config.scaling_factor latents = latents.to(weight_dtype) bsz = latents.shape[0] # 2. Sample a random timestep for each image t_n from the ODE solver timesteps without bias. # For the DDIM solver, the timestep schedule is [T - 1, T - k - 1, T - 2 * k - 1, ...] topk = noise_scheduler.config.num_train_timesteps // args.num_ddim_timesteps index = torch.randint(0, args.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # 3. Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions( start_timesteps, timestep_scaling=args.timestep_scaling_factor ) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions( timesteps, timestep_scaling=args.timestep_scaling_factor ) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # 4. Sample noise from the prior and add it to the latents according to the noise magnitude at each # timestep (this is the forward diffusion process) [z_{t_{n + k}} in Algorithm 1] noise = torch.randn_like(latents) noisy_model_input = noise_scheduler.add_noise(latents, noise, start_timesteps) # 5. Sample a random guidance scale w from U[w_min, w_max] # Note that for LCM-LoRA distillation it is not necessary to use a guidance scale embedding w = (args.w_max - args.w_min) * torch.rand((bsz,)) + args.w_min w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # 6. Prepare prompt embeds and unet_added_conditions prompt_embeds = encoded_text.pop("prompt_embeds") # 7. Get online LCM prediction on z_{t_{n + k}} (noisy_model_input), w, c, t_{n + k} (start_timesteps) noise_pred = unet( noisy_model_input, start_timesteps, timestep_cond=None, encoder_hidden_states=prompt_embeds.float(), added_cond_kwargs=encoded_text, ).sample pred_x_0 = get_predicted_original_sample( noise_pred, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # 8. Compute the conditional and unconditional teacher model predictions to get CFG estimates of the # predicted noise eps_0 and predicted original sample x_0, then run the ODE solver using these # estimates to predict the data point in the augmented PF-ODE trajectory corresponding to the next ODE # solver timestep. with torch.no_grad(): with autocast_ctx: # 1. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and conditional embedding c cond_teacher_output = teacher_unet( noisy_model_input.to(weight_dtype), start_timesteps, encoder_hidden_states=prompt_embeds.to(weight_dtype), ).sample cond_pred_x0 = get_predicted_original_sample( cond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) cond_pred_noise = get_predicted_noise( cond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) # 2. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and unconditional embedding 0 uncond_teacher_output = teacher_unet( noisy_model_input.to(weight_dtype), start_timesteps, encoder_hidden_states=uncond_prompt_embeds.to(weight_dtype), ).sample uncond_pred_x0 = get_predicted_original_sample( uncond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) uncond_pred_noise = get_predicted_noise( uncond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) # 3. Calculate the CFG estimate of x_0 (pred_x0) and eps_0 (pred_noise) # Note that this uses the LCM paper's CFG formulation rather than the Imagen CFG formulation pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_pred_noise + w * (cond_pred_noise - uncond_pred_noise) # 4. Run one step of the ODE solver to estimate the next point x_prev on the # augmented PF-ODE trajectory (solving backward in time) # Note that the DDIM step depends on both the predicted x_0 and source noise eps_0. x_prev = solver.ddim_step(pred_x0, pred_noise, index) # 9. Get target LCM prediction on x_prev, w, c, t_n (timesteps) # Note that we do not use a separate target network for LCM-LoRA distillation. with torch.no_grad(): with autocast_ctx: target_noise_pred = unet( x_prev.float(), timesteps, timestep_cond=None, encoder_hidden_states=prompt_embeds.float(), ).sample pred_x_0 = get_predicted_original_sample( target_noise_pred, timesteps, x_prev, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) target = c_skip * x_prev + c_out * pred_x_0 # 10. Calculate loss if args.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif args.loss_type == "huber": loss = torch.mean( torch.sqrt((model_pred.float() - target.float()) ** 2 + args.huber_c**2) - args.huber_c ) # 11. Backpropagate on the online student model (`unet`) accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if global_step % args.validation_steps == 0: log_validation(vae, unet, args, accelerator, weight_dtype, global_step) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) unet.save_pretrained(args.output_dir) lora_state_dict = get_peft_model_state_dict(unet, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(args.output_dir, "unet_lora"), lora_state_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py/0
{ "file_path": "diffusers/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py", "repo_id": "diffusers", "token_count": 27023 }
122
# Copyright 2024 Custom Diffusion authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def retrieve(class_prompt, class_data_dir, num_class_images): factor = 1.5 num_images = int(factor * num_class_images) client = ClipClient( url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1 ) os.makedirs(f"{class_data_dir}/images", exist_ok=True) if len(list(Path(f"{class_data_dir}/images").iterdir())) >= num_class_images: return while True: class_images = client.query(text=class_prompt) if len(class_images) >= factor * num_class_images or num_images > 1e4: break else: num_images = int(factor * num_images) client = ClipClient( url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1, ) count = 0 total = 0 pbar = tqdm(desc="downloading real regularization images", total=num_class_images) with open(f"{class_data_dir}/caption.txt", "w") as f1, open(f"{class_data_dir}/urls.txt", "w") as f2, open( f"{class_data_dir}/images.txt", "w" ) as f3: while total < num_class_images: images = class_images[count] count += 1 try: img = requests.get(images["url"], timeout=30) if img.status_code == 200: _ = Image.open(BytesIO(img.content)) with open(f"{class_data_dir}/images/{total}.jpg", "wb") as f: f.write(img.content) f1.write(images["caption"] + "\n") f2.write(images["url"] + "\n") f3.write(f"{class_data_dir}/images/{total}.jpg" + "\n") total += 1 pbar.update(1) else: continue except Exception: continue return def parse_args(): parser = argparse.ArgumentParser("", add_help=False) parser.add_argument("--class_prompt", help="text prompt to retrieve images", required=True, type=str) parser.add_argument("--class_data_dir", help="path to save images", required=True, type=str) parser.add_argument("--num_class_images", help="number of images to download", default=200, type=int) return parser.parse_args() if __name__ == "__main__": args = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
diffusers/examples/custom_diffusion/retrieve.py/0
{ "file_path": "diffusers/examples/custom_diffusion/retrieve.py", "repo_id": "diffusers", "token_count": 1429 }
123
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBoothLoRAFlux(ExamplesTestsAccelerate): instance_data_dir = "docs/source/en/imgs" instance_prompt = "photo" pretrained_model_name_or_path = "hf-internal-testing/tiny-flux-pipe" script_path = "examples/dreambooth/train_dreambooth_lora_flux.py" def test_dreambooth_lora_flux(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_text_encoder_flux(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --train_text_encoder --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) starts_with_expected_prefix = all( (key.startswith("transformer") or key.startswith("text_encoder")) for key in lora_state_dict.keys() ) self.assertTrue(starts_with_expected_prefix) def test_dreambooth_lora_flux_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_lora_flux_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}) resume_run_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
diffusers/examples/dreambooth/test_dreambooth_lora_flux.py/0
{ "file_path": "diffusers/examples/dreambooth/test_dreambooth_lora_flux.py", "repo_id": "diffusers", "token_count": 3156 }
124
import argparse import math import os from pathlib import Path import colossalai import torch import torch.nn.functional as F import torch.utils.checkpoint from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import disable_existing_loggers, get_dist_logger from colossalai.nn.optimizer.gemini_optimizer import GeminiAdamOptimizer from colossalai.nn.parallel.utils import get_static_torch_model from colossalai.utils import get_current_device from colossalai.utils.model.colo_init_context import ColoInitContext from huggingface_hub import create_repo, upload_folder from huggingface_hub.utils import insecure_hashlib from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler disable_existing_loggers() logger = get_dist_logger() def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default="a photo of sks dog", required=False, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--placement", type=str, default="cpu", help="Placement Policy for Gemini. Valid when using colossalai as dist plan.", ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: if args.class_data_dir is not None: logger.warning("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: logger.warning("You need not use --class_prompt without --with_prior_preservation.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids return example class PromptDataset(Dataset): """A simple dataset to prepare the prompts to generate class images on multiple GPUs.""" def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example # Gemini + ZeRO DDP def gemini_zero_dpp(model: torch.nn.Module, placememt_policy: str = "auto"): from colossalai.nn.parallel import GeminiDDP model = GeminiDDP( model, device=get_current_device(), placement_policy=placememt_policy, pin_memory=True, search_range_mb=64 ) return model def main(args): if args.seed is None: colossalai.launch_from_torch(config={}) else: colossalai.launch_from_torch(config={}, seed=args.seed) local_rank = gpc.get_local_rank(ParallelMode.DATA) world_size = gpc.get_world_size(ParallelMode.DATA) if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if get_current_device() == "cuda" else torch.float32 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) pipeline.to(get_current_device()) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not local_rank == 0, ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline # Handle the repository creation if local_rank == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: logger.info(f"Loading tokenizer from {args.tokenizer_name}", ranks=[0]) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name, revision=args.revision, use_fast=False, ) elif args.pretrained_model_name_or_path: logger.info("Loading tokenizer from pretrained model", ranks=[0]) tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path) # Load models and create wrapper for stable diffusion logger.info(f"Loading text_encoder from {args.pretrained_model_name_or_path}", ranks=[0]) text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) logger.info(f"Loading AutoencoderKL from {args.pretrained_model_name_or_path}", ranks=[0]) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, ) logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0]) with ColoInitContext(device=get_current_device()): unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, low_cpu_mem_usage=False ) vae.requires_grad_(False) text_encoder.requires_grad_(False) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.scale_lr: args.learning_rate = args.learning_rate * args.train_batch_size * world_size unet = gemini_zero_dpp(unet, args.placement) # config optimizer for colossalai zero optimizer = GeminiAdamOptimizer(unet, lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm) # load noise_scheduler noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") # prepare dataset logger.info(f"Prepare dataset from {args.instance_data_dir}", ranks=[0]) train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) def collate_fn(examples): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if args.with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad( {"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, num_workers=1 ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader)) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps, ) weight_dtype = torch.float32 if args.mixed_precision == "fp16": weight_dtype = torch.float16 elif args.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu. # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. vae.to(get_current_device(), dtype=weight_dtype) text_encoder.to(get_current_device(), dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader)) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Train! total_batch_size = args.train_batch_size * world_size logger.info("***** Running training *****", ranks=[0]) logger.info(f" Num examples = {len(train_dataset)}", ranks=[0]) logger.info(f" Num batches each epoch = {len(train_dataloader)}", ranks=[0]) logger.info(f" Num Epochs = {args.num_train_epochs}", ranks=[0]) logger.info(f" Instantaneous batch size per device = {args.train_batch_size}", ranks=[0]) logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}", ranks=[0]) logger.info(f" Total optimization steps = {args.max_train_steps}", ranks=[0]) # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not local_rank == 0) progress_bar.set_description("Steps") global_step = 0 torch.cuda.synchronize() for epoch in range(args.num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): torch.cuda.reset_peak_memory_stats() # Move batch to gpu for key, value in batch.items(): batch[key] = value.to(get_current_device(), non_blocking=True) # Convert images to latent space optimizer.zero_grad() latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") optimizer.backward(loss) optimizer.step() lr_scheduler.step() logger.info(f"max GPU_mem cost is {torch.cuda.max_memory_allocated()/2**20} MB", ranks=[0]) # Checks if the accelerator has performed an optimization step behind the scenes progress_bar.update(1) global_step += 1 logs = { "loss": loss.detach().item(), "lr": optimizer.param_groups[0]["lr"], } # lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step % args.save_steps == 0: torch.cuda.synchronize() torch_unet = get_static_torch_model(unet) if local_rank == 0: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=torch_unet, revision=args.revision, ) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") pipeline.save_pretrained(save_path) logger.info(f"Saving model checkpoint to {save_path}", ranks=[0]) if global_step >= args.max_train_steps: break torch.cuda.synchronize() unet = get_static_torch_model(unet) if local_rank == 0: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unet, revision=args.revision, ) pipeline.save_pretrained(args.output_dir) logger.info(f"Saving model checkpoint to {args.output_dir}", ranks=[0]) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/colossalai/train_dreambooth_colossalai.py/0
{ "file_path": "diffusers/examples/research_projects/colossalai/train_dreambooth_colossalai.py", "repo_id": "diffusers", "token_count": 11177 }
125
import argparse import itertools import math import os import random from pathlib import Path import intel_extension_for_pytorch as ipex import numpy as np import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.utils import check_min_version if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") logger = get_logger(__name__) def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path): logger.info("Saving embeddings") learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id] learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()} torch.save(learned_embeds_dict, save_path) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--only_save_embeds", action="store_true", default=False, help="Save only the embeddings for the new concept.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=True, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def freeze_params(params): for param in params: param.requires_grad = False def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, ) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder.resize_token_embeddings(len(tokenizer)) # Initialise the newly added placeholder token with the embeddings of the initializer token token_embeds = text_encoder.get_input_embeddings().weight.data token_embeds[placeholder_token_id] = token_embeds[initializer_token_id] # Freeze vae and unet freeze_params(vae.parameters()) freeze_params(unet.parameters()) # Freeze all parameters except for the token embeddings in text encoder params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze) if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer optimizer = torch.optim.AdamW( text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( text_encoder, optimizer, train_dataloader, lr_scheduler ) # Move vae and unet to device vae.to(accelerator.device) unet.to(accelerator.device) # Keep vae and unet in eval model as we don't train these vae.eval() unet.eval() unet = ipex.optimize(unet, dtype=torch.bfloat16, inplace=True) vae = ipex.optimize(vae, dtype=torch.bfloat16, inplace=True) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("textual_inversion", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") global_step = 0 text_encoder.train() text_encoder, optimizer = ipex.optimize(text_encoder, optimizer=optimizer, dtype=torch.bfloat16) for epoch in range(args.num_train_epochs): for step, batch in enumerate(train_dataloader): with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): with accelerator.accumulate(text_encoder): # Convert images to latent space latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn(latents.shape).to(latents.device) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ).long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = F.mse_loss(model_pred, target, reduction="none").mean([1, 2, 3]).mean() accelerator.backward(loss) # Zero out the gradients for all token embeddings except the newly added # embeddings for the concept, as we only want to optimize the concept embeddings if accelerator.num_processes > 1: grads = text_encoder.module.get_input_embeddings().weight.grad else: grads = text_encoder.get_input_embeddings().weight.grad # Get the index for tokens that we want to zero the grads for index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % args.save_steps == 0: save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin") save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() # Create the pipeline using using the trained modules and save it. if accelerator.is_main_process: if args.push_to_hub and args.only_save_embeds: logger.warning("Enabling full model saving because --push_to_hub=True was specified.") save_full_model = True else: save_full_model = not args.only_save_embeds if save_full_model: pipeline = StableDiffusionPipeline( text_encoder=accelerator.unwrap_model(text_encoder), vae=vae, unet=unet, tokenizer=tokenizer, scheduler=PNDMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler"), safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"), feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) pipeline.save_pretrained(args.output_dir) # Save the newly trained embeddings save_path = os.path.join(args.output_dir, "learned_embeds.bin") save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py", "repo_id": "diffusers", "token_count": 10732 }
126
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning script for Stable Diffusion XL for text2image.""" import argparse import functools import gc import logging import math import os import random import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import concatenate_datasets, load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from torchvision.transforms.functional import crop from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__) DATASET_NAME_MAPPING = { "lambdalabs/naruto-blip-captions": ("image", "text"), } def save_model_card( repo_id: str, images: list = None, validation_prompt: str = None, base_model: str = None, dataset_name: str = None, repo_folder: str = None, vae_path: str = None, ): img_str = "" if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" model_description = f""" # Text-to-image finetuning - {repo_id} This pipeline was finetuned from **{base_model}** on the **{dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompt: {validation_prompt}: \n {img_str} Special VAE used for training: {vae_path}. """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=base_model, model_description=model_description, inference=True, ) tags = [ "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "text-to-image", "diffusers-training", "diffusers", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=1, help=( "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) parser.add_argument( "--output_dir", type=str, default="sdxl-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=1024, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--timestep_bias_strategy", type=str, default="none", choices=["earlier", "later", "range", "none"], help=( "The timestep bias strategy, which may help direct the model toward learning low or high frequency details." " Choices: ['earlier', 'later', 'range', 'none']." " The default is 'none', which means no bias is applied, and training proceeds normally." " The value of 'later' will increase the frequency of the model's final training timesteps." ), ) parser.add_argument( "--timestep_bias_multiplier", type=float, default=1.0, help=( "The multiplier for the bias. Defaults to 1.0, which means no bias is applied." " A value of 2.0 will double the weight of the bias, and a value of 0.5 will halve it." ), ) parser.add_argument( "--timestep_bias_begin", type=int, default=0, help=( "When using `--timestep_bias_strategy=range`, the beginning (inclusive) timestep to bias." " Defaults to zero, which equates to having no specific bias." ), ) parser.add_argument( "--timestep_bias_end", type=int, default=1000, help=( "When using `--timestep_bias_strategy=range`, the final timestep (inclusive) to bias." " Defaults to 1000, which is the number of timesteps that Stable Diffusion is trained on." ), ) parser.add_argument( "--timestep_bias_portion", type=float, default=0.25, help=( "The portion of timesteps to bias. Defaults to 0.25, which 25% of timesteps will be biased." " A value of 0.5 will bias one half of the timesteps. The value provided for `--timestep_bias_strategy` determines" " whether the biased portions are in the earlier or later timesteps." ), ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--loss_type", type=str, default="l2", choices=["l2", "huber", "smooth_l1"], help="The type of loss to use and whether it's timestep-scheduled. See Issue #7488 for more info.", ) parser.add_argument( "--huber_schedule", type=str, default="snr", choices=["constant", "exponential", "snr"], help="The schedule to use for the huber losses parameter", ) parser.add_argument( "--huber_c", type=float, default=0.1, help="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") return args # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompt(batch, text_encoders, tokenizers, proportion_empty_prompts, caption_column, is_train=True): prompt_embeds_list = [] prompt_batch = batch[caption_column] captions = [] for caption in prompt_batch: if random.random() < proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) with torch.no_grad(): for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( captions, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, return_dict=False, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds[-1][-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return {"prompt_embeds": prompt_embeds.cpu(), "pooled_prompt_embeds": pooled_prompt_embeds.cpu()} def compute_vae_encodings(batch, vae): images = batch.pop("pixel_values") pixel_values = torch.stack(list(images)) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) with torch.no_grad(): model_input = vae.encode(pixel_values).latent_dist.sample() model_input = model_input * vae.config.scaling_factor return {"model_input": model_input.cpu()} def generate_timestep_weights(args, num_timesteps): weights = torch.ones(num_timesteps) # Determine the indices to bias num_to_bias = int(args.timestep_bias_portion * num_timesteps) if args.timestep_bias_strategy == "later": bias_indices = slice(-num_to_bias, None) elif args.timestep_bias_strategy == "earlier": bias_indices = slice(0, num_to_bias) elif args.timestep_bias_strategy == "range": # Out of the possible 1000 timesteps, we might want to focus on eg. 200-500. range_begin = args.timestep_bias_begin range_end = args.timestep_bias_end if range_begin < 0: raise ValueError( "When using the range strategy for timestep bias, you must provide a beginning timestep greater or equal to zero." ) if range_end > num_timesteps: raise ValueError( "When using the range strategy for timestep bias, you must provide an ending timestep smaller than the number of timesteps." ) bias_indices = slice(range_begin, range_end) else: # 'none' or any other string return weights if args.timestep_bias_multiplier <= 0: return ValueError( "The parameter --timestep_bias_multiplier is not intended to be used to disable the training of specific timesteps." " If it was intended to disable timestep bias, use `--timestep_bias_strategy none` instead." " A timestep bias multiplier less than or equal to 0 is not allowed." ) # Apply the bias weights[bias_indices] *= args.timestep_bias_multiplier # Normalize weights /= weights.sum() return weights # NOTE: if you're using the scheduled version, huber_c has to depend on the timesteps already def conditional_loss( model_pred: torch.Tensor, target: torch.Tensor, reduction: str = "mean", loss_type: str = "l2", huber_c: float = 0.1, ): if loss_type == "l2": loss = F.mse_loss(model_pred, target, reduction=reduction) elif loss_type == "huber": loss = 2 * huber_c * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) elif loss_type == "smooth_l1": loss = 2 * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) else: raise NotImplementedError(f"Unsupported Loss Type {loss_type}") return loss def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") import wandb # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizers tokenizer_one = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False, ) # import correct text encoder classes text_encoder_cls_one = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision ) text_encoder_cls_two = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") # Check for terminal SNR in combination with SNR Gamma text_encoder_one = text_encoder_cls_one.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder_two = text_encoder_cls_two.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant ) vae_path = ( args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # Freeze vae and text encoders. vae.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) # Set unet as trainable. unet.train() # For mixed precision training we cast all non-trainable weights to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. vae.to(accelerator.device, dtype=torch.float32) text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for _ in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = unet.parameters() optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR) train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) train_flip = transforms.RandomHorizontalFlip(p=1.0) train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] # image aug original_sizes = [] all_images = [] crop_top_lefts = [] for image in images: original_sizes.append((image.height, image.width)) image = train_resize(image) if args.random_flip and random.random() < 0.5: # flip image = train_flip(image) if args.center_crop: y1 = max(0, int(round((image.height - args.resolution) / 2.0))) x1 = max(0, int(round((image.width - args.resolution) / 2.0))) image = train_crop(image) else: y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) image = crop(image, y1, x1, h, w) crop_top_left = (y1, x1) crop_top_lefts.append(crop_top_left) image = train_transforms(image) all_images.append(image) examples["original_sizes"] = original_sizes examples["crop_top_lefts"] = crop_top_lefts examples["pixel_values"] = all_images return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) # Let's first compute all the embeddings so that we can free up the text encoders # from memory. We will pre-compute the VAE encodings too. text_encoders = [text_encoder_one, text_encoder_two] tokenizers = [tokenizer_one, tokenizer_two] compute_embeddings_fn = functools.partial( encode_prompt, text_encoders=text_encoders, tokenizers=tokenizers, proportion_empty_prompts=args.proportion_empty_prompts, caption_column=args.caption_column, ) compute_vae_encodings_fn = functools.partial(compute_vae_encodings, vae=vae) with accelerator.main_process_first(): from datasets.fingerprint import Hasher # fingerprint used by the cache for the other processes to load the result # details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401 new_fingerprint = Hasher.hash(args) new_fingerprint_for_vae = Hasher.hash(vae_path) train_dataset_with_embeddings = train_dataset.map( compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint ) train_dataset_with_vae = train_dataset.map( compute_vae_encodings_fn, batched=True, batch_size=args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps, new_fingerprint=new_fingerprint_for_vae, ) precomputed_dataset = concatenate_datasets( [train_dataset_with_embeddings, train_dataset_with_vae.remove_columns(["image", "text"])], axis=1 ) precomputed_dataset = precomputed_dataset.with_transform(preprocess_train) del compute_vae_encodings_fn, compute_embeddings_fn, text_encoder_one, text_encoder_two del text_encoders, tokenizers, vae gc.collect() torch.cuda.empty_cache() def collate_fn(examples): model_input = torch.stack([torch.tensor(example["model_input"]) for example in examples]) original_sizes = [example["original_sizes"] for example in examples] crop_top_lefts = [example["crop_top_lefts"] for example in examples] prompt_embeds = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) pooled_prompt_embeds = torch.stack([torch.tensor(example["pooled_prompt_embeds"]) for example in examples]) return { "model_input": model_input, "prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "original_sizes": original_sizes, "crop_top_lefts": crop_top_lefts, } # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( precomputed_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("text2image-fine-tune-sdxl", config=vars(args)) # Function for unwrapping if torch.compile() was used in accelerate. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(precomputed_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Sample noise that we'll add to the latents model_input = batch["model_input"].to(accelerator.device) noise = torch.randn_like(model_input) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device ) bsz = model_input.shape[0] if args.timestep_bias_strategy == "none": # Sample a random timestep for each image if args.loss_type == "huber" or args.loss_type == "smooth_l1": timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (1,), device="cpu") timestep = timesteps.item() if args.huber_schedule == "exponential": alpha = -math.log(args.huber_c) / noise_scheduler.config.num_train_timesteps huber_c = math.exp(-alpha * timestep) elif args.huber_schedule == "snr": alphas_cumprod = noise_scheduler.alphas_cumprod[timestep] sigmas = ((1.0 - alphas_cumprod) / alphas_cumprod) ** 0.5 huber_c = (1 - args.huber_c) / (1 + sigmas) ** 2 + args.huber_c elif args.huber_schedule == "constant": huber_c = args.huber_c else: raise NotImplementedError(f"Unknown Huber loss schedule {args.huber_schedule}!") timesteps = timesteps.repeat(bsz).to(model_input.device) elif args.loss_type == "l2": timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device ) huber_c = 1 # may be anything, as it's not used else: raise NotImplementedError(f"Unknown loss type {args.loss_type}") timesteps = timesteps.long() else: if "huber_scheduled" in args.loss_type: raise NotImplementedError( "Randomly weighted timesteps not implemented yet for scheduled huber loss!" ) else: huber_c = args.huber_c # Sample a random timestep for each image, potentially biased by the timestep weights. # Biasing the timestep weights allows us to spend less time training irrelevant timesteps. weights = generate_timestep_weights(args, noise_scheduler.config.num_train_timesteps).to( model_input.device ) timesteps = torch.multinomial(weights, bsz, replacement=True).long() # Add noise to the model input according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) # time ids def compute_time_ids(original_size, crops_coords_top_left): # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids target_size = (args.resolution, args.resolution) add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = torch.tensor([add_time_ids]) add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) return add_time_ids add_time_ids = torch.cat( [compute_time_ids(s, c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"])] ) # Predict the noise residual unet_added_conditions = {"time_ids": add_time_ids} prompt_embeds = batch["prompt_embeds"].to(accelerator.device) pooled_prompt_embeds = batch["pooled_prompt_embeds"].to(accelerator.device) unet_added_conditions.update({"text_embeds": pooled_prompt_embeds}) model_pred = unet( noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs=unet_added_conditions, return_dict=False, )[0] # Get the target for loss depending on the prediction type if args.prediction_type is not None: # set prediction_type of scheduler if defined noise_scheduler.register_to_config(prediction_type=args.prediction_type) if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(model_input, noise, timesteps) elif noise_scheduler.config.prediction_type == "sample": # We set the target to latents here, but the model_pred will return the noise sample prediction. target = model_input # We will have to subtract the noise residual from the prediction to get the target sample. model_pred = model_pred - noise else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.snr_gamma is None: loss = conditional_loss( model_pred.float(), target.float(), reduction="mean", loss_type=args.loss_type, huber_c=huber_c ) else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = conditional_loss( model_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c ) loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = unet.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) # create pipeline vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=vae, unet=accelerator.unwrap_model(unet), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) if args.prediction_type is not None: scheduler_args = {"prediction_type": args.prediction_type} pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None pipeline_args = {"prompt": args.validation_prompt} with torch.cuda.amp.autocast(): images = [ pipeline(**pipeline_args, generator=generator, num_inference_steps=25).images[0] for _ in range(args.num_validation_images) ] for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) # Serialize pipeline. vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unet, vae=vae, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) if args.prediction_type is not None: scheduler_args = {"prediction_type": args.prediction_type} pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline.save_pretrained(args.output_dir) # run inference images = [] if args.validation_prompt and args.num_validation_images > 0: pipeline = pipeline.to(accelerator.device) generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None with torch.cuda.amp.autocast(): images = [ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] for _ in range(args.num_validation_images) ] for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "test": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) if args.push_to_hub: save_model_card( repo_id=repo_id, images=images, validation_prompt=args.validation_prompt, base_model=args.pretrained_model_name_or_path, dataset_name=args.dataset_name, repo_folder=args.output_dir, vae_path=args.pretrained_vae_model_name_or_path, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image_sdxl.py/0
{ "file_path": "diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image_sdxl.py", "repo_id": "diffusers", "token_count": 26773 }
127
# Stable Diffusion text-to-image fine-tuning The `train_text_to_image.py` script shows how to fine-tune stable diffusion model on your own dataset. ___Note___: ___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___ ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Naruto example You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-4`, so you'll need to visit [its card](https://huggingface.co/CompVis/stable-diffusion-v1-4), read the license and tick the checkbox if you agree. You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). Run the following command to authenticate your token ```bash huggingface-cli login ``` If you have already cloned the repo, then you won't need to go through these steps. <br> #### Hardware With `gradient_checkpointing` and `mixed_precision` it should be possible to fine tune the model on a single 24GB GPU. For higher `batch_size` and faster training it's better to use GPUs with >30GB memory. **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** <!-- accelerate_snippet_start --> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" ``` <!-- accelerate_snippet_end --> To run on your own training files prepare the dataset according to the format required by `datasets`, you can find the instructions for how to do that in this [document](https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder-with-metadata). If you wish to use custom loading logic, you should modify the script, we have left pointers for that in the training script. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export TRAIN_DIR="path_to_your_dataset" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" ``` Once the training is finished the model will be saved in the `output_dir` specified in the command. In this example it's `sd-naruto-model`. To load the fine-tuned model for inference just pass that path to `StableDiffusionPipeline` ```python import torch from diffusers import StableDiffusionPipeline model_path = "path_to_saved_model" pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) pipe.to("cuda") image = pipe(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` Checkpoints only save the unet, so to run inference from a checkpoint, just load the unet ```python import torch from diffusers import StableDiffusionPipeline, UNet2DConditionModel model_path = "path_to_saved_model" unet = UNet2DConditionModel.from_pretrained(model_path + "/checkpoint-<N>/unet", torch_dtype=torch.float16) pipe = StableDiffusionPipeline.from_pretrained("<initial model>", unet=unet, torch_dtype=torch.float16) pipe.to("cuda") image = pipe(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` #### Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" --multi_gpu train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" ``` #### Training with Min-SNR weighting We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps to achieve faster convergence by rebalancing the loss. In order to use it, one needs to set the `--snr_gamma` argument. The recommended value when using it is 5.0. You can find [this project on Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) that compares the loss surfaces of the following setups: * Training without the Min-SNR weighting strategy * Training with the Min-SNR weighting strategy (`snr_gamma` set to 5.0) * Training with the Min-SNR weighting strategy (`snr_gamma` set to 1.0) For our small Narutos dataset, the effects of Min-SNR weighting strategy might not appear to be pronounced, but for larger datasets, we believe the effects will be more pronounced. Also, note that in this example, we either predict `epsilon` (i.e., the noise) or the `v_prediction`. For both of these cases, the formulation of the Min-SNR weighting strategy that we have used holds. #### Training with EMA weights Through the `EMAModel` class, we support a convenient method of tracking an exponential moving average of model parameters. This helps to smooth out noise in model parameter updates and generally improves model performance. If enabled with the `--use_ema` argument, the final model checkpoint that is saved at the end of training will use the EMA weights. EMA weights require an additional full-precision copy of the model parameters to be stored in memory, but otherwise have very little performance overhead. `--foreach_ema` can be used to further reduce the overhead. If you are short on VRAM and still want to use EMA weights, you can store them in CPU RAM by using the `--offload_ema` argument. This will keep the EMA weights in pinned CPU memory during the training step. Then, once every model parameter update, it will transfer the EMA weights back to the GPU which can then update the parameters on the GPU, before sending them back to the CPU. Both of these transfers are set up as non-blocking, so CUDA devices should be able to overlap this transfer with other computations. With sufficient bandwidth between the host and device and a sufficiently long gap between model parameter updates, storing EMA weights in CPU RAM should have no additional performance overhead, as long as no other calls force synchronization. #### Training with DREAM We support training epsilon (noise) prediction models using the [DREAM (Diffusion Rectification and Estimation-Adaptive Models) strategy](https://arxiv.org/abs/2312.00210). DREAM claims to increase model fidelity for the performance cost of an extra grad-less unet `forward` step in the training loop. You can turn on DREAM training by using the `--dream_training` argument. The `--dream_detail_preservation` argument controls the detail preservation variable p and is the default of 1 from the paper. ## Training with LoRA Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset on consumer GPUs like Tesla T4, Tesla V100. ### Training First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Stable Diffusion v1-4](https://hf.co/CompVis/stable-diffusion-v1-4) and the [Narutos dataset](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions). **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** **___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATASET_NAME="lambdalabs/naruto-blip-captions" ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag. ```bash huggingface-cli login ``` Now we can start training! ```bash accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=512 --random_flip \ --train_batch_size=1 \ --num_train_epochs=100 --checkpointing_steps=5000 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --output_dir="sd-naruto-model-lora" \ --validation_prompt="cute dragon creature" --report_to="wandb" ``` The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases. **___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run `train_text_to_image_lora.py` in consumer GPUs like T4 or V100.___** The final LoRA embedding weights have been uploaded to [sayakpaul/sd-model-finetuned-lora-t4](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4). **___Note: [The final weights](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/pytorch_lora_weights.bin) are only 3 MB in size, which is orders of magnitudes smaller than the original model.___** You can check some inference samples that were logged during the course of the fine-tuning process [here](https://wandb.ai/sayakpaul/text2image-fine-tune/runs/q4lc0xsw). ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline` after loading the trained LoRA weights. You need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-naruto-model-lora`. ```python from diffusers import StableDiffusionPipeline import torch model_path = "sayakpaul/sd-model-finetuned-lora-t4" pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.unet.load_attn_procs(model_path) pipe.to("cuda") prompt = "A naruto with green eyes and red legs." image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] image.save("naruto.png") ``` If you are loading the LoRA parameters from the Hub and if the Hub repository has a `base_model` tag (such as [this](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/README.md?code=true#L4)), then you can do: ```py from huggingface_hub.repocard import RepoCard lora_model_id = "sayakpaul/sd-model-finetuned-lora-t4" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) ... ``` ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. **___Note: The flax example doesn't yet support features like gradient checkpoint, gradient accumulation etc, so to use flax for faster training we will need >30GB cards or TPU v3.___** Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -U -r requirements_flax.txt ``` ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATASET_NAME="lambdalabs/naruto-blip-captions" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --mixed_precision="fp16" \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-naruto-model" ``` To run on your own training files prepare the dataset according to the format required by `datasets`, you can find the instructions for how to do that in this [document](https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder-with-metadata). If you wish to use custom loading logic, you should modify the script, we have left pointers for that in the training script. ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export TRAIN_DIR="path_to_your_dataset" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --mixed_precision="fp16" \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-naruto-model" ``` ### Training with xFormers: You can enable memory efficient attention by [installing xFormers](https://huggingface.co/docs/diffusers/main/en/optimization/xformers) and passing the `--enable_xformers_memory_efficient_attention` argument to the script. xFormers training is not available for Flax/JAX. **Note**: According to [this issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training in some GPUs. If you observe that problem, please install a development version as indicated in that comment. ## Stable Diffusion XL * We support fine-tuning the UNet shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) via the `train_text_to_image_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md). * We also support fine-tuning of the UNet and Text Encoder shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) with LoRA via the `train_text_to_image_lora_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md).
diffusers/examples/text_to_image/README.md/0
{ "file_path": "diffusers/examples/text_to_image/README.md", "repo_id": "diffusers", "token_count": 5282 }
128
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class TextualInversion(ExamplesTestsAccelerate): def test_textual_inversion(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "learned_embeds.safetensors"))) def test_textual_inversion_checkpointing(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 3 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-3"}, ) def test_textual_inversion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-1", "checkpoint-2"}, ) resume_run_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --resume_from_checkpoint=checkpoint-2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-3"}, )
diffusers/examples/textual_inversion/test_textual_inversion.py/0
{ "file_path": "diffusers/examples/textual_inversion/test_textual_inversion.py", "repo_id": "diffusers", "token_count": 2914 }
129
import torch.nn as nn from torchvision.models import efficientnet_v2_l, efficientnet_v2_s from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.models.modeling_utils import ModelMixin class EfficientNetEncoder(ModelMixin, ConfigMixin): @register_to_config def __init__(self, c_latent=16, c_cond=1280, effnet="efficientnet_v2_s"): super().__init__() if effnet == "efficientnet_v2_s": self.backbone = efficientnet_v2_s(weights="DEFAULT").features else: self.backbone = efficientnet_v2_l(weights="DEFAULT").features self.mapper = nn.Sequential( nn.Conv2d(c_cond, c_latent, kernel_size=1, bias=False), nn.BatchNorm2d(c_latent), # then normalize them to have mean 0 and std 1 ) def forward(self, x): return self.mapper(self.backbone(x))
diffusers/examples/wuerstchen/text_to_image/modeling_efficient_net_encoder.py/0
{ "file_path": "diffusers/examples/wuerstchen/text_to_image/modeling_efficient_net_encoder.py", "repo_id": "diffusers", "token_count": 374 }
130
import math import os import urllib import warnings from argparse import ArgumentParser import torch import torch.nn as nn import torch.nn.functional as F from huggingface_hub.utils import insecure_hashlib from safetensors.torch import load_file as stl from tqdm import tqdm from diffusers import AutoencoderKL, ConsistencyDecoderVAE, DiffusionPipeline, StableDiffusionPipeline, UNet2DModel from diffusers.models.autoencoders.vae import Encoder from diffusers.models.embeddings import TimestepEmbedding from diffusers.models.unets.unet_2d_blocks import ResnetDownsampleBlock2D, ResnetUpsampleBlock2D, UNetMidBlock2D args = ArgumentParser() args.add_argument("--save_pretrained", required=False, default=None, type=str) args.add_argument("--test_image", required=True, type=str) args = args.parse_args() def _extract_into_tensor(arr, timesteps, broadcast_shape): # from: https://github.com/openai/guided-diffusion/blob/22e0df8183507e13a7813f8d38d51b072ca1e67c/guided_diffusion/gaussian_diffusion.py#L895 """ res = arr[timesteps].float() dims_to_append = len(broadcast_shape) - len(res.shape) return res[(...,) + (None,) * dims_to_append] def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): # from: https://github.com/openai/guided-diffusion/blob/22e0df8183507e13a7813f8d38d51b072ca1e67c/guided_diffusion/gaussian_diffusion.py#L45 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return torch.tensor(betas) def _download(url: str, root: str): os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): if insecure_hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: return download_target else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024, ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) if insecure_hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match") return download_target class ConsistencyDecoder: def __init__(self, device="cuda:0", download_root=os.path.expanduser("~/.cache/clip")): self.n_distilled_steps = 64 download_target = _download( "https://openaipublic.azureedge.net/diff-vae/c9cebd3132dd9c42936d803e33424145a748843c8f716c0814838bdc8a2fe7cb/decoder.pt", download_root, ) self.ckpt = torch.jit.load(download_target).to(device) self.device = device sigma_data = 0.5 betas = betas_for_alpha_bar(1024, lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2).to(device) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod) self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod) sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / alphas_cumprod) sigmas = torch.sqrt(1.0 / alphas_cumprod - 1) self.c_skip = sqrt_recip_alphas_cumprod * sigma_data**2 / (sigmas**2 + sigma_data**2) self.c_out = sigmas * sigma_data / (sigmas**2 + sigma_data**2) ** 0.5 self.c_in = sqrt_recip_alphas_cumprod / (sigmas**2 + sigma_data**2) ** 0.5 @staticmethod def round_timesteps(timesteps, total_timesteps, n_distilled_steps, truncate_start=True): with torch.no_grad(): space = torch.div(total_timesteps, n_distilled_steps, rounding_mode="floor") rounded_timesteps = (torch.div(timesteps, space, rounding_mode="floor") + 1) * space if truncate_start: rounded_timesteps[rounded_timesteps == total_timesteps] -= space else: rounded_timesteps[rounded_timesteps == total_timesteps] -= space rounded_timesteps[rounded_timesteps == 0] += space return rounded_timesteps @staticmethod def ldm_transform_latent(z, extra_scale_factor=1): channel_means = [0.38862467, 0.02253063, 0.07381133, -0.0171294] channel_stds = [0.9654121, 1.0440036, 0.76147926, 0.77022034] if len(z.shape) != 4: raise ValueError() z = z * 0.18215 channels = [z[:, i] for i in range(z.shape[1])] channels = [extra_scale_factor * (c - channel_means[i]) / channel_stds[i] for i, c in enumerate(channels)] return torch.stack(channels, dim=1) @torch.no_grad() def __call__( self, features: torch.Tensor, schedule=[1.0, 0.5], generator=None, ): features = self.ldm_transform_latent(features) ts = self.round_timesteps( torch.arange(0, 1024), 1024, self.n_distilled_steps, truncate_start=False, ) shape = ( features.size(0), 3, 8 * features.size(2), 8 * features.size(3), ) x_start = torch.zeros(shape, device=features.device, dtype=features.dtype) schedule_timesteps = [int((1024 - 1) * s) for s in schedule] for i in schedule_timesteps: t = ts[i].item() t_ = torch.tensor([t] * features.shape[0]).to(self.device) # noise = torch.randn_like(x_start) noise = torch.randn(x_start.shape, dtype=x_start.dtype, generator=generator).to(device=x_start.device) x_start = ( _extract_into_tensor(self.sqrt_alphas_cumprod, t_, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t_, x_start.shape) * noise ) c_in = _extract_into_tensor(self.c_in, t_, x_start.shape) import torch.nn.functional as F from diffusers import UNet2DModel if isinstance(self.ckpt, UNet2DModel): input = torch.concat([c_in * x_start, F.upsample_nearest(features, scale_factor=8)], dim=1) model_output = self.ckpt(input, t_).sample else: model_output = self.ckpt(c_in * x_start, t_, features=features) B, C = x_start.shape[:2] model_output, _ = torch.split(model_output, C, dim=1) pred_xstart = ( _extract_into_tensor(self.c_out, t_, x_start.shape) * model_output + _extract_into_tensor(self.c_skip, t_, x_start.shape) * x_start ).clamp(-1, 1) x_start = pred_xstart return x_start def save_image(image, name): import numpy as np from PIL import Image image = image[0].cpu().numpy() image = (image + 1.0) * 127.5 image = image.clip(0, 255).astype(np.uint8) image = Image.fromarray(image.transpose(1, 2, 0)) image.save(name) def load_image(uri, size=None, center_crop=False): import numpy as np from PIL import Image image = Image.open(uri) if center_crop: image = image.crop( ( (image.width - min(image.width, image.height)) // 2, (image.height - min(image.width, image.height)) // 2, (image.width + min(image.width, image.height)) // 2, (image.height + min(image.width, image.height)) // 2, ) ) if size is not None: image = image.resize(size) image = torch.tensor(np.array(image).transpose(2, 0, 1)).unsqueeze(0).float() image = image / 127.5 - 1.0 return image class TimestepEmbedding_(nn.Module): def __init__(self, n_time=1024, n_emb=320, n_out=1280) -> None: super().__init__() self.emb = nn.Embedding(n_time, n_emb) self.f_1 = nn.Linear(n_emb, n_out) self.f_2 = nn.Linear(n_out, n_out) def forward(self, x) -> torch.Tensor: x = self.emb(x) x = self.f_1(x) x = F.silu(x) return self.f_2(x) class ImageEmbedding(nn.Module): def __init__(self, in_channels=7, out_channels=320) -> None: super().__init__() self.f = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) def forward(self, x) -> torch.Tensor: return self.f(x) class ImageUnembedding(nn.Module): def __init__(self, in_channels=320, out_channels=6) -> None: super().__init__() self.gn = nn.GroupNorm(32, in_channels) self.f = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) def forward(self, x) -> torch.Tensor: return self.f(F.silu(self.gn(x))) class ConvResblock(nn.Module): def __init__(self, in_features=320, out_features=320) -> None: super().__init__() self.f_t = nn.Linear(1280, out_features * 2) self.gn_1 = nn.GroupNorm(32, in_features) self.f_1 = nn.Conv2d(in_features, out_features, kernel_size=3, padding=1) self.gn_2 = nn.GroupNorm(32, out_features) self.f_2 = nn.Conv2d(out_features, out_features, kernel_size=3, padding=1) skip_conv = in_features != out_features self.f_s = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0) if skip_conv else nn.Identity() def forward(self, x, t): x_skip = x t = self.f_t(F.silu(t)) t = t.chunk(2, dim=1) t_1 = t[0].unsqueeze(dim=2).unsqueeze(dim=3) + 1 t_2 = t[1].unsqueeze(dim=2).unsqueeze(dim=3) gn_1 = F.silu(self.gn_1(x)) f_1 = self.f_1(gn_1) gn_2 = self.gn_2(f_1) return self.f_s(x_skip) + self.f_2(F.silu(gn_2 * t_1 + t_2)) # Also ConvResblock class Downsample(nn.Module): def __init__(self, in_channels=320) -> None: super().__init__() self.f_t = nn.Linear(1280, in_channels * 2) self.gn_1 = nn.GroupNorm(32, in_channels) self.f_1 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) self.gn_2 = nn.GroupNorm(32, in_channels) self.f_2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) def forward(self, x, t) -> torch.Tensor: x_skip = x t = self.f_t(F.silu(t)) t_1, t_2 = t.chunk(2, dim=1) t_1 = t_1.unsqueeze(2).unsqueeze(3) + 1 t_2 = t_2.unsqueeze(2).unsqueeze(3) gn_1 = F.silu(self.gn_1(x)) avg_pool2d = F.avg_pool2d(gn_1, kernel_size=(2, 2), stride=None) f_1 = self.f_1(avg_pool2d) gn_2 = self.gn_2(f_1) f_2 = self.f_2(F.silu(t_2 + (t_1 * gn_2))) return f_2 + F.avg_pool2d(x_skip, kernel_size=(2, 2), stride=None) # Also ConvResblock class Upsample(nn.Module): def __init__(self, in_channels=1024) -> None: super().__init__() self.f_t = nn.Linear(1280, in_channels * 2) self.gn_1 = nn.GroupNorm(32, in_channels) self.f_1 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) self.gn_2 = nn.GroupNorm(32, in_channels) self.f_2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1) def forward(self, x, t) -> torch.Tensor: x_skip = x t = self.f_t(F.silu(t)) t_1, t_2 = t.chunk(2, dim=1) t_1 = t_1.unsqueeze(2).unsqueeze(3) + 1 t_2 = t_2.unsqueeze(2).unsqueeze(3) gn_1 = F.silu(self.gn_1(x)) upsample = F.upsample_nearest(gn_1, scale_factor=2) f_1 = self.f_1(upsample) gn_2 = self.gn_2(f_1) f_2 = self.f_2(F.silu(t_2 + (t_1 * gn_2))) return f_2 + F.upsample_nearest(x_skip, scale_factor=2) class ConvUNetVAE(nn.Module): def __init__(self) -> None: super().__init__() self.embed_image = ImageEmbedding() self.embed_time = TimestepEmbedding_() down_0 = nn.ModuleList( [ ConvResblock(320, 320), ConvResblock(320, 320), ConvResblock(320, 320), Downsample(320), ] ) down_1 = nn.ModuleList( [ ConvResblock(320, 640), ConvResblock(640, 640), ConvResblock(640, 640), Downsample(640), ] ) down_2 = nn.ModuleList( [ ConvResblock(640, 1024), ConvResblock(1024, 1024), ConvResblock(1024, 1024), Downsample(1024), ] ) down_3 = nn.ModuleList( [ ConvResblock(1024, 1024), ConvResblock(1024, 1024), ConvResblock(1024, 1024), ] ) self.down = nn.ModuleList( [ down_0, down_1, down_2, down_3, ] ) self.mid = nn.ModuleList( [ ConvResblock(1024, 1024), ConvResblock(1024, 1024), ] ) up_3 = nn.ModuleList( [ ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), Upsample(1024), ] ) up_2 = nn.ModuleList( [ ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 * 2, 1024), ConvResblock(1024 + 640, 1024), Upsample(1024), ] ) up_1 = nn.ModuleList( [ ConvResblock(1024 + 640, 640), ConvResblock(640 * 2, 640), ConvResblock(640 * 2, 640), ConvResblock(320 + 640, 640), Upsample(640), ] ) up_0 = nn.ModuleList( [ ConvResblock(320 + 640, 320), ConvResblock(320 * 2, 320), ConvResblock(320 * 2, 320), ConvResblock(320 * 2, 320), ] ) self.up = nn.ModuleList( [ up_0, up_1, up_2, up_3, ] ) self.output = ImageUnembedding() def forward(self, x, t, features) -> torch.Tensor: converted = hasattr(self, "converted") and self.converted x = torch.cat([x, F.upsample_nearest(features, scale_factor=8)], dim=1) if converted: t = self.time_embedding(self.time_proj(t)) else: t = self.embed_time(t) x = self.embed_image(x) skips = [x] for i, down in enumerate(self.down): if converted and i in [0, 1, 2, 3]: x, skips_ = down(x, t) for skip in skips_: skips.append(skip) else: for block in down: x = block(x, t) skips.append(x) print(x.float().abs().sum()) if converted: x = self.mid(x, t) else: for i in range(2): x = self.mid[i](x, t) print(x.float().abs().sum()) for i, up in enumerate(self.up[::-1]): if converted and i in [0, 1, 2, 3]: skip_4 = skips.pop() skip_3 = skips.pop() skip_2 = skips.pop() skip_1 = skips.pop() skips_ = (skip_1, skip_2, skip_3, skip_4) x = up(x, skips_, t) else: for block in up: if isinstance(block, ConvResblock): x = torch.concat([x, skips.pop()], dim=1) x = block(x, t) return self.output(x) def rename_state_dict_key(k): k = k.replace("blocks.", "") for i in range(5): k = k.replace(f"down_{i}_", f"down.{i}.") k = k.replace(f"conv_{i}.", f"{i}.") k = k.replace(f"up_{i}_", f"up.{i}.") k = k.replace(f"mid_{i}", f"mid.{i}") k = k.replace("upsamp.", "4.") k = k.replace("downsamp.", "3.") k = k.replace("f_t.w", "f_t.weight").replace("f_t.b", "f_t.bias") k = k.replace("f_1.w", "f_1.weight").replace("f_1.b", "f_1.bias") k = k.replace("f_2.w", "f_2.weight").replace("f_2.b", "f_2.bias") k = k.replace("f_s.w", "f_s.weight").replace("f_s.b", "f_s.bias") k = k.replace("f.w", "f.weight").replace("f.b", "f.bias") k = k.replace("gn_1.g", "gn_1.weight").replace("gn_1.b", "gn_1.bias") k = k.replace("gn_2.g", "gn_2.weight").replace("gn_2.b", "gn_2.bias") k = k.replace("gn.g", "gn.weight").replace("gn.b", "gn.bias") return k def rename_state_dict(sd, embedding): sd = {rename_state_dict_key(k): v for k, v in sd.items()} sd["embed_time.emb.weight"] = embedding["weight"] return sd # encode with stable diffusion vae pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe.vae.cuda() # construct original decoder with jitted model decoder_consistency = ConsistencyDecoder(device="cuda:0") # construct UNet code, overwrite the decoder with conv_unet_vae model = ConvUNetVAE() model.load_state_dict( rename_state_dict( stl("consistency_decoder.safetensors"), stl("embedding.safetensors"), ) ) model = model.cuda() decoder_consistency.ckpt = model image = load_image(args.test_image, size=(256, 256), center_crop=True) latent = pipe.vae.encode(image.half().cuda()).latent_dist.sample() # decode with gan sample_gan = pipe.vae.decode(latent).sample.detach() save_image(sample_gan, "gan.png") # decode with conv_unet_vae sample_consistency_orig = decoder_consistency(latent, generator=torch.Generator("cpu").manual_seed(0)) save_image(sample_consistency_orig, "con_orig.png") ########### conversion print("CONVERSION") print("DOWN BLOCK ONE") block_one_sd_orig = model.down[0].state_dict() block_one_sd_new = {} for i in range(3): block_one_sd_new[f"resnets.{i}.norm1.weight"] = block_one_sd_orig.pop(f"{i}.gn_1.weight") block_one_sd_new[f"resnets.{i}.norm1.bias"] = block_one_sd_orig.pop(f"{i}.gn_1.bias") block_one_sd_new[f"resnets.{i}.conv1.weight"] = block_one_sd_orig.pop(f"{i}.f_1.weight") block_one_sd_new[f"resnets.{i}.conv1.bias"] = block_one_sd_orig.pop(f"{i}.f_1.bias") block_one_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_one_sd_orig.pop(f"{i}.f_t.weight") block_one_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_one_sd_orig.pop(f"{i}.f_t.bias") block_one_sd_new[f"resnets.{i}.norm2.weight"] = block_one_sd_orig.pop(f"{i}.gn_2.weight") block_one_sd_new[f"resnets.{i}.norm2.bias"] = block_one_sd_orig.pop(f"{i}.gn_2.bias") block_one_sd_new[f"resnets.{i}.conv2.weight"] = block_one_sd_orig.pop(f"{i}.f_2.weight") block_one_sd_new[f"resnets.{i}.conv2.bias"] = block_one_sd_orig.pop(f"{i}.f_2.bias") block_one_sd_new["downsamplers.0.norm1.weight"] = block_one_sd_orig.pop("3.gn_1.weight") block_one_sd_new["downsamplers.0.norm1.bias"] = block_one_sd_orig.pop("3.gn_1.bias") block_one_sd_new["downsamplers.0.conv1.weight"] = block_one_sd_orig.pop("3.f_1.weight") block_one_sd_new["downsamplers.0.conv1.bias"] = block_one_sd_orig.pop("3.f_1.bias") block_one_sd_new["downsamplers.0.time_emb_proj.weight"] = block_one_sd_orig.pop("3.f_t.weight") block_one_sd_new["downsamplers.0.time_emb_proj.bias"] = block_one_sd_orig.pop("3.f_t.bias") block_one_sd_new["downsamplers.0.norm2.weight"] = block_one_sd_orig.pop("3.gn_2.weight") block_one_sd_new["downsamplers.0.norm2.bias"] = block_one_sd_orig.pop("3.gn_2.bias") block_one_sd_new["downsamplers.0.conv2.weight"] = block_one_sd_orig.pop("3.f_2.weight") block_one_sd_new["downsamplers.0.conv2.bias"] = block_one_sd_orig.pop("3.f_2.bias") assert len(block_one_sd_orig) == 0 block_one = ResnetDownsampleBlock2D( in_channels=320, out_channels=320, temb_channels=1280, num_layers=3, add_downsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_one.load_state_dict(block_one_sd_new) print("DOWN BLOCK TWO") block_two_sd_orig = model.down[1].state_dict() block_two_sd_new = {} for i in range(3): block_two_sd_new[f"resnets.{i}.norm1.weight"] = block_two_sd_orig.pop(f"{i}.gn_1.weight") block_two_sd_new[f"resnets.{i}.norm1.bias"] = block_two_sd_orig.pop(f"{i}.gn_1.bias") block_two_sd_new[f"resnets.{i}.conv1.weight"] = block_two_sd_orig.pop(f"{i}.f_1.weight") block_two_sd_new[f"resnets.{i}.conv1.bias"] = block_two_sd_orig.pop(f"{i}.f_1.bias") block_two_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_two_sd_orig.pop(f"{i}.f_t.weight") block_two_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_two_sd_orig.pop(f"{i}.f_t.bias") block_two_sd_new[f"resnets.{i}.norm2.weight"] = block_two_sd_orig.pop(f"{i}.gn_2.weight") block_two_sd_new[f"resnets.{i}.norm2.bias"] = block_two_sd_orig.pop(f"{i}.gn_2.bias") block_two_sd_new[f"resnets.{i}.conv2.weight"] = block_two_sd_orig.pop(f"{i}.f_2.weight") block_two_sd_new[f"resnets.{i}.conv2.bias"] = block_two_sd_orig.pop(f"{i}.f_2.bias") if i == 0: block_two_sd_new[f"resnets.{i}.conv_shortcut.weight"] = block_two_sd_orig.pop(f"{i}.f_s.weight") block_two_sd_new[f"resnets.{i}.conv_shortcut.bias"] = block_two_sd_orig.pop(f"{i}.f_s.bias") block_two_sd_new["downsamplers.0.norm1.weight"] = block_two_sd_orig.pop("3.gn_1.weight") block_two_sd_new["downsamplers.0.norm1.bias"] = block_two_sd_orig.pop("3.gn_1.bias") block_two_sd_new["downsamplers.0.conv1.weight"] = block_two_sd_orig.pop("3.f_1.weight") block_two_sd_new["downsamplers.0.conv1.bias"] = block_two_sd_orig.pop("3.f_1.bias") block_two_sd_new["downsamplers.0.time_emb_proj.weight"] = block_two_sd_orig.pop("3.f_t.weight") block_two_sd_new["downsamplers.0.time_emb_proj.bias"] = block_two_sd_orig.pop("3.f_t.bias") block_two_sd_new["downsamplers.0.norm2.weight"] = block_two_sd_orig.pop("3.gn_2.weight") block_two_sd_new["downsamplers.0.norm2.bias"] = block_two_sd_orig.pop("3.gn_2.bias") block_two_sd_new["downsamplers.0.conv2.weight"] = block_two_sd_orig.pop("3.f_2.weight") block_two_sd_new["downsamplers.0.conv2.bias"] = block_two_sd_orig.pop("3.f_2.bias") assert len(block_two_sd_orig) == 0 block_two = ResnetDownsampleBlock2D( in_channels=320, out_channels=640, temb_channels=1280, num_layers=3, add_downsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_two.load_state_dict(block_two_sd_new) print("DOWN BLOCK THREE") block_three_sd_orig = model.down[2].state_dict() block_three_sd_new = {} for i in range(3): block_three_sd_new[f"resnets.{i}.norm1.weight"] = block_three_sd_orig.pop(f"{i}.gn_1.weight") block_three_sd_new[f"resnets.{i}.norm1.bias"] = block_three_sd_orig.pop(f"{i}.gn_1.bias") block_three_sd_new[f"resnets.{i}.conv1.weight"] = block_three_sd_orig.pop(f"{i}.f_1.weight") block_three_sd_new[f"resnets.{i}.conv1.bias"] = block_three_sd_orig.pop(f"{i}.f_1.bias") block_three_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_three_sd_orig.pop(f"{i}.f_t.weight") block_three_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_three_sd_orig.pop(f"{i}.f_t.bias") block_three_sd_new[f"resnets.{i}.norm2.weight"] = block_three_sd_orig.pop(f"{i}.gn_2.weight") block_three_sd_new[f"resnets.{i}.norm2.bias"] = block_three_sd_orig.pop(f"{i}.gn_2.bias") block_three_sd_new[f"resnets.{i}.conv2.weight"] = block_three_sd_orig.pop(f"{i}.f_2.weight") block_three_sd_new[f"resnets.{i}.conv2.bias"] = block_three_sd_orig.pop(f"{i}.f_2.bias") if i == 0: block_three_sd_new[f"resnets.{i}.conv_shortcut.weight"] = block_three_sd_orig.pop(f"{i}.f_s.weight") block_three_sd_new[f"resnets.{i}.conv_shortcut.bias"] = block_three_sd_orig.pop(f"{i}.f_s.bias") block_three_sd_new["downsamplers.0.norm1.weight"] = block_three_sd_orig.pop("3.gn_1.weight") block_three_sd_new["downsamplers.0.norm1.bias"] = block_three_sd_orig.pop("3.gn_1.bias") block_three_sd_new["downsamplers.0.conv1.weight"] = block_three_sd_orig.pop("3.f_1.weight") block_three_sd_new["downsamplers.0.conv1.bias"] = block_three_sd_orig.pop("3.f_1.bias") block_three_sd_new["downsamplers.0.time_emb_proj.weight"] = block_three_sd_orig.pop("3.f_t.weight") block_three_sd_new["downsamplers.0.time_emb_proj.bias"] = block_three_sd_orig.pop("3.f_t.bias") block_three_sd_new["downsamplers.0.norm2.weight"] = block_three_sd_orig.pop("3.gn_2.weight") block_three_sd_new["downsamplers.0.norm2.bias"] = block_three_sd_orig.pop("3.gn_2.bias") block_three_sd_new["downsamplers.0.conv2.weight"] = block_three_sd_orig.pop("3.f_2.weight") block_three_sd_new["downsamplers.0.conv2.bias"] = block_three_sd_orig.pop("3.f_2.bias") assert len(block_three_sd_orig) == 0 block_three = ResnetDownsampleBlock2D( in_channels=640, out_channels=1024, temb_channels=1280, num_layers=3, add_downsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_three.load_state_dict(block_three_sd_new) print("DOWN BLOCK FOUR") block_four_sd_orig = model.down[3].state_dict() block_four_sd_new = {} for i in range(3): block_four_sd_new[f"resnets.{i}.norm1.weight"] = block_four_sd_orig.pop(f"{i}.gn_1.weight") block_four_sd_new[f"resnets.{i}.norm1.bias"] = block_four_sd_orig.pop(f"{i}.gn_1.bias") block_four_sd_new[f"resnets.{i}.conv1.weight"] = block_four_sd_orig.pop(f"{i}.f_1.weight") block_four_sd_new[f"resnets.{i}.conv1.bias"] = block_four_sd_orig.pop(f"{i}.f_1.bias") block_four_sd_new[f"resnets.{i}.time_emb_proj.weight"] = block_four_sd_orig.pop(f"{i}.f_t.weight") block_four_sd_new[f"resnets.{i}.time_emb_proj.bias"] = block_four_sd_orig.pop(f"{i}.f_t.bias") block_four_sd_new[f"resnets.{i}.norm2.weight"] = block_four_sd_orig.pop(f"{i}.gn_2.weight") block_four_sd_new[f"resnets.{i}.norm2.bias"] = block_four_sd_orig.pop(f"{i}.gn_2.bias") block_four_sd_new[f"resnets.{i}.conv2.weight"] = block_four_sd_orig.pop(f"{i}.f_2.weight") block_four_sd_new[f"resnets.{i}.conv2.bias"] = block_four_sd_orig.pop(f"{i}.f_2.bias") assert len(block_four_sd_orig) == 0 block_four = ResnetDownsampleBlock2D( in_channels=1024, out_channels=1024, temb_channels=1280, num_layers=3, add_downsample=False, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) block_four.load_state_dict(block_four_sd_new) print("MID BLOCK 1") mid_block_one_sd_orig = model.mid.state_dict() mid_block_one_sd_new = {} for i in range(2): mid_block_one_sd_new[f"resnets.{i}.norm1.weight"] = mid_block_one_sd_orig.pop(f"{i}.gn_1.weight") mid_block_one_sd_new[f"resnets.{i}.norm1.bias"] = mid_block_one_sd_orig.pop(f"{i}.gn_1.bias") mid_block_one_sd_new[f"resnets.{i}.conv1.weight"] = mid_block_one_sd_orig.pop(f"{i}.f_1.weight") mid_block_one_sd_new[f"resnets.{i}.conv1.bias"] = mid_block_one_sd_orig.pop(f"{i}.f_1.bias") mid_block_one_sd_new[f"resnets.{i}.time_emb_proj.weight"] = mid_block_one_sd_orig.pop(f"{i}.f_t.weight") mid_block_one_sd_new[f"resnets.{i}.time_emb_proj.bias"] = mid_block_one_sd_orig.pop(f"{i}.f_t.bias") mid_block_one_sd_new[f"resnets.{i}.norm2.weight"] = mid_block_one_sd_orig.pop(f"{i}.gn_2.weight") mid_block_one_sd_new[f"resnets.{i}.norm2.bias"] = mid_block_one_sd_orig.pop(f"{i}.gn_2.bias") mid_block_one_sd_new[f"resnets.{i}.conv2.weight"] = mid_block_one_sd_orig.pop(f"{i}.f_2.weight") mid_block_one_sd_new[f"resnets.{i}.conv2.bias"] = mid_block_one_sd_orig.pop(f"{i}.f_2.bias") assert len(mid_block_one_sd_orig) == 0 mid_block_one = UNetMidBlock2D( in_channels=1024, temb_channels=1280, num_layers=1, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, add_attention=False, ) mid_block_one.load_state_dict(mid_block_one_sd_new) print("UP BLOCK ONE") up_block_one_sd_orig = model.up[-1].state_dict() up_block_one_sd_new = {} for i in range(4): up_block_one_sd_new[f"resnets.{i}.norm1.weight"] = up_block_one_sd_orig.pop(f"{i}.gn_1.weight") up_block_one_sd_new[f"resnets.{i}.norm1.bias"] = up_block_one_sd_orig.pop(f"{i}.gn_1.bias") up_block_one_sd_new[f"resnets.{i}.conv1.weight"] = up_block_one_sd_orig.pop(f"{i}.f_1.weight") up_block_one_sd_new[f"resnets.{i}.conv1.bias"] = up_block_one_sd_orig.pop(f"{i}.f_1.bias") up_block_one_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_one_sd_orig.pop(f"{i}.f_t.weight") up_block_one_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_one_sd_orig.pop(f"{i}.f_t.bias") up_block_one_sd_new[f"resnets.{i}.norm2.weight"] = up_block_one_sd_orig.pop(f"{i}.gn_2.weight") up_block_one_sd_new[f"resnets.{i}.norm2.bias"] = up_block_one_sd_orig.pop(f"{i}.gn_2.bias") up_block_one_sd_new[f"resnets.{i}.conv2.weight"] = up_block_one_sd_orig.pop(f"{i}.f_2.weight") up_block_one_sd_new[f"resnets.{i}.conv2.bias"] = up_block_one_sd_orig.pop(f"{i}.f_2.bias") up_block_one_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_one_sd_orig.pop(f"{i}.f_s.weight") up_block_one_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_one_sd_orig.pop(f"{i}.f_s.bias") up_block_one_sd_new["upsamplers.0.norm1.weight"] = up_block_one_sd_orig.pop("4.gn_1.weight") up_block_one_sd_new["upsamplers.0.norm1.bias"] = up_block_one_sd_orig.pop("4.gn_1.bias") up_block_one_sd_new["upsamplers.0.conv1.weight"] = up_block_one_sd_orig.pop("4.f_1.weight") up_block_one_sd_new["upsamplers.0.conv1.bias"] = up_block_one_sd_orig.pop("4.f_1.bias") up_block_one_sd_new["upsamplers.0.time_emb_proj.weight"] = up_block_one_sd_orig.pop("4.f_t.weight") up_block_one_sd_new["upsamplers.0.time_emb_proj.bias"] = up_block_one_sd_orig.pop("4.f_t.bias") up_block_one_sd_new["upsamplers.0.norm2.weight"] = up_block_one_sd_orig.pop("4.gn_2.weight") up_block_one_sd_new["upsamplers.0.norm2.bias"] = up_block_one_sd_orig.pop("4.gn_2.bias") up_block_one_sd_new["upsamplers.0.conv2.weight"] = up_block_one_sd_orig.pop("4.f_2.weight") up_block_one_sd_new["upsamplers.0.conv2.bias"] = up_block_one_sd_orig.pop("4.f_2.bias") assert len(up_block_one_sd_orig) == 0 up_block_one = ResnetUpsampleBlock2D( in_channels=1024, prev_output_channel=1024, out_channels=1024, temb_channels=1280, num_layers=4, add_upsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_one.load_state_dict(up_block_one_sd_new) print("UP BLOCK TWO") up_block_two_sd_orig = model.up[-2].state_dict() up_block_two_sd_new = {} for i in range(4): up_block_two_sd_new[f"resnets.{i}.norm1.weight"] = up_block_two_sd_orig.pop(f"{i}.gn_1.weight") up_block_two_sd_new[f"resnets.{i}.norm1.bias"] = up_block_two_sd_orig.pop(f"{i}.gn_1.bias") up_block_two_sd_new[f"resnets.{i}.conv1.weight"] = up_block_two_sd_orig.pop(f"{i}.f_1.weight") up_block_two_sd_new[f"resnets.{i}.conv1.bias"] = up_block_two_sd_orig.pop(f"{i}.f_1.bias") up_block_two_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_two_sd_orig.pop(f"{i}.f_t.weight") up_block_two_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_two_sd_orig.pop(f"{i}.f_t.bias") up_block_two_sd_new[f"resnets.{i}.norm2.weight"] = up_block_two_sd_orig.pop(f"{i}.gn_2.weight") up_block_two_sd_new[f"resnets.{i}.norm2.bias"] = up_block_two_sd_orig.pop(f"{i}.gn_2.bias") up_block_two_sd_new[f"resnets.{i}.conv2.weight"] = up_block_two_sd_orig.pop(f"{i}.f_2.weight") up_block_two_sd_new[f"resnets.{i}.conv2.bias"] = up_block_two_sd_orig.pop(f"{i}.f_2.bias") up_block_two_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_two_sd_orig.pop(f"{i}.f_s.weight") up_block_two_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_two_sd_orig.pop(f"{i}.f_s.bias") up_block_two_sd_new["upsamplers.0.norm1.weight"] = up_block_two_sd_orig.pop("4.gn_1.weight") up_block_two_sd_new["upsamplers.0.norm1.bias"] = up_block_two_sd_orig.pop("4.gn_1.bias") up_block_two_sd_new["upsamplers.0.conv1.weight"] = up_block_two_sd_orig.pop("4.f_1.weight") up_block_two_sd_new["upsamplers.0.conv1.bias"] = up_block_two_sd_orig.pop("4.f_1.bias") up_block_two_sd_new["upsamplers.0.time_emb_proj.weight"] = up_block_two_sd_orig.pop("4.f_t.weight") up_block_two_sd_new["upsamplers.0.time_emb_proj.bias"] = up_block_two_sd_orig.pop("4.f_t.bias") up_block_two_sd_new["upsamplers.0.norm2.weight"] = up_block_two_sd_orig.pop("4.gn_2.weight") up_block_two_sd_new["upsamplers.0.norm2.bias"] = up_block_two_sd_orig.pop("4.gn_2.bias") up_block_two_sd_new["upsamplers.0.conv2.weight"] = up_block_two_sd_orig.pop("4.f_2.weight") up_block_two_sd_new["upsamplers.0.conv2.bias"] = up_block_two_sd_orig.pop("4.f_2.bias") assert len(up_block_two_sd_orig) == 0 up_block_two = ResnetUpsampleBlock2D( in_channels=640, prev_output_channel=1024, out_channels=1024, temb_channels=1280, num_layers=4, add_upsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_two.load_state_dict(up_block_two_sd_new) print("UP BLOCK THREE") up_block_three_sd_orig = model.up[-3].state_dict() up_block_three_sd_new = {} for i in range(4): up_block_three_sd_new[f"resnets.{i}.norm1.weight"] = up_block_three_sd_orig.pop(f"{i}.gn_1.weight") up_block_three_sd_new[f"resnets.{i}.norm1.bias"] = up_block_three_sd_orig.pop(f"{i}.gn_1.bias") up_block_three_sd_new[f"resnets.{i}.conv1.weight"] = up_block_three_sd_orig.pop(f"{i}.f_1.weight") up_block_three_sd_new[f"resnets.{i}.conv1.bias"] = up_block_three_sd_orig.pop(f"{i}.f_1.bias") up_block_three_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_three_sd_orig.pop(f"{i}.f_t.weight") up_block_three_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_three_sd_orig.pop(f"{i}.f_t.bias") up_block_three_sd_new[f"resnets.{i}.norm2.weight"] = up_block_three_sd_orig.pop(f"{i}.gn_2.weight") up_block_three_sd_new[f"resnets.{i}.norm2.bias"] = up_block_three_sd_orig.pop(f"{i}.gn_2.bias") up_block_three_sd_new[f"resnets.{i}.conv2.weight"] = up_block_three_sd_orig.pop(f"{i}.f_2.weight") up_block_three_sd_new[f"resnets.{i}.conv2.bias"] = up_block_three_sd_orig.pop(f"{i}.f_2.bias") up_block_three_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_three_sd_orig.pop(f"{i}.f_s.weight") up_block_three_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_three_sd_orig.pop(f"{i}.f_s.bias") up_block_three_sd_new["upsamplers.0.norm1.weight"] = up_block_three_sd_orig.pop("4.gn_1.weight") up_block_three_sd_new["upsamplers.0.norm1.bias"] = up_block_three_sd_orig.pop("4.gn_1.bias") up_block_three_sd_new["upsamplers.0.conv1.weight"] = up_block_three_sd_orig.pop("4.f_1.weight") up_block_three_sd_new["upsamplers.0.conv1.bias"] = up_block_three_sd_orig.pop("4.f_1.bias") up_block_three_sd_new["upsamplers.0.time_emb_proj.weight"] = up_block_three_sd_orig.pop("4.f_t.weight") up_block_three_sd_new["upsamplers.0.time_emb_proj.bias"] = up_block_three_sd_orig.pop("4.f_t.bias") up_block_three_sd_new["upsamplers.0.norm2.weight"] = up_block_three_sd_orig.pop("4.gn_2.weight") up_block_three_sd_new["upsamplers.0.norm2.bias"] = up_block_three_sd_orig.pop("4.gn_2.bias") up_block_three_sd_new["upsamplers.0.conv2.weight"] = up_block_three_sd_orig.pop("4.f_2.weight") up_block_three_sd_new["upsamplers.0.conv2.bias"] = up_block_three_sd_orig.pop("4.f_2.bias") assert len(up_block_three_sd_orig) == 0 up_block_three = ResnetUpsampleBlock2D( in_channels=320, prev_output_channel=1024, out_channels=640, temb_channels=1280, num_layers=4, add_upsample=True, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_three.load_state_dict(up_block_three_sd_new) print("UP BLOCK FOUR") up_block_four_sd_orig = model.up[-4].state_dict() up_block_four_sd_new = {} for i in range(4): up_block_four_sd_new[f"resnets.{i}.norm1.weight"] = up_block_four_sd_orig.pop(f"{i}.gn_1.weight") up_block_four_sd_new[f"resnets.{i}.norm1.bias"] = up_block_four_sd_orig.pop(f"{i}.gn_1.bias") up_block_four_sd_new[f"resnets.{i}.conv1.weight"] = up_block_four_sd_orig.pop(f"{i}.f_1.weight") up_block_four_sd_new[f"resnets.{i}.conv1.bias"] = up_block_four_sd_orig.pop(f"{i}.f_1.bias") up_block_four_sd_new[f"resnets.{i}.time_emb_proj.weight"] = up_block_four_sd_orig.pop(f"{i}.f_t.weight") up_block_four_sd_new[f"resnets.{i}.time_emb_proj.bias"] = up_block_four_sd_orig.pop(f"{i}.f_t.bias") up_block_four_sd_new[f"resnets.{i}.norm2.weight"] = up_block_four_sd_orig.pop(f"{i}.gn_2.weight") up_block_four_sd_new[f"resnets.{i}.norm2.bias"] = up_block_four_sd_orig.pop(f"{i}.gn_2.bias") up_block_four_sd_new[f"resnets.{i}.conv2.weight"] = up_block_four_sd_orig.pop(f"{i}.f_2.weight") up_block_four_sd_new[f"resnets.{i}.conv2.bias"] = up_block_four_sd_orig.pop(f"{i}.f_2.bias") up_block_four_sd_new[f"resnets.{i}.conv_shortcut.weight"] = up_block_four_sd_orig.pop(f"{i}.f_s.weight") up_block_four_sd_new[f"resnets.{i}.conv_shortcut.bias"] = up_block_four_sd_orig.pop(f"{i}.f_s.bias") assert len(up_block_four_sd_orig) == 0 up_block_four = ResnetUpsampleBlock2D( in_channels=320, prev_output_channel=640, out_channels=320, temb_channels=1280, num_layers=4, add_upsample=False, resnet_time_scale_shift="scale_shift", resnet_eps=1e-5, ) up_block_four.load_state_dict(up_block_four_sd_new) print("initial projection (conv_in)") conv_in_sd_orig = model.embed_image.state_dict() conv_in_sd_new = {} conv_in_sd_new["weight"] = conv_in_sd_orig.pop("f.weight") conv_in_sd_new["bias"] = conv_in_sd_orig.pop("f.bias") assert len(conv_in_sd_orig) == 0 block_out_channels = [320, 640, 1024, 1024] in_channels = 7 conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) conv_in.load_state_dict(conv_in_sd_new) print("out projection (conv_out) (conv_norm_out)") out_channels = 6 norm_num_groups = 32 norm_eps = 1e-5 act_fn = "silu" conv_out_kernel = 3 conv_out_padding = (conv_out_kernel - 1) // 2 conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) # uses torch.functional in orig # conv_act = get_activation(act_fn) conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding) conv_norm_out.load_state_dict(model.output.gn.state_dict()) conv_out.load_state_dict(model.output.f.state_dict()) print("timestep projection (time_proj) (time_embedding)") f1_sd = model.embed_time.f_1.state_dict() f2_sd = model.embed_time.f_2.state_dict() time_embedding_sd = { "linear_1.weight": f1_sd.pop("weight"), "linear_1.bias": f1_sd.pop("bias"), "linear_2.weight": f2_sd.pop("weight"), "linear_2.bias": f2_sd.pop("bias"), } assert len(f1_sd) == 0 assert len(f2_sd) == 0 time_embedding_type = "learned" num_train_timesteps = 1024 time_embedding_dim = 1280 time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0]) timestep_input_dim = block_out_channels[0] time_embedding = TimestepEmbedding(timestep_input_dim, time_embedding_dim) time_proj.load_state_dict(model.embed_time.emb.state_dict()) time_embedding.load_state_dict(time_embedding_sd) print("CONVERT") time_embedding.to("cuda") time_proj.to("cuda") conv_in.to("cuda") block_one.to("cuda") block_two.to("cuda") block_three.to("cuda") block_four.to("cuda") mid_block_one.to("cuda") up_block_one.to("cuda") up_block_two.to("cuda") up_block_three.to("cuda") up_block_four.to("cuda") conv_norm_out.to("cuda") conv_out.to("cuda") model.time_proj = time_proj model.time_embedding = time_embedding model.embed_image = conv_in model.down[0] = block_one model.down[1] = block_two model.down[2] = block_three model.down[3] = block_four model.mid = mid_block_one model.up[-1] = up_block_one model.up[-2] = up_block_two model.up[-3] = up_block_three model.up[-4] = up_block_four model.output.gn = conv_norm_out model.output.f = conv_out model.converted = True sample_consistency_new = decoder_consistency(latent, generator=torch.Generator("cpu").manual_seed(0)) save_image(sample_consistency_new, "con_new.png") assert (sample_consistency_orig == sample_consistency_new).all() print("making unet") unet = UNet2DModel( in_channels=in_channels, out_channels=out_channels, down_block_types=( "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", ), up_block_types=( "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ), block_out_channels=block_out_channels, layers_per_block=3, norm_num_groups=norm_num_groups, norm_eps=norm_eps, resnet_time_scale_shift="scale_shift", time_embedding_type="learned", num_train_timesteps=num_train_timesteps, add_attention=False, ) unet_state_dict = {} def add_state_dict(prefix, mod): for k, v in mod.state_dict().items(): unet_state_dict[f"{prefix}.{k}"] = v add_state_dict("conv_in", conv_in) add_state_dict("time_proj", time_proj) add_state_dict("time_embedding", time_embedding) add_state_dict("down_blocks.0", block_one) add_state_dict("down_blocks.1", block_two) add_state_dict("down_blocks.2", block_three) add_state_dict("down_blocks.3", block_four) add_state_dict("mid_block", mid_block_one) add_state_dict("up_blocks.0", up_block_one) add_state_dict("up_blocks.1", up_block_two) add_state_dict("up_blocks.2", up_block_three) add_state_dict("up_blocks.3", up_block_four) add_state_dict("conv_norm_out", conv_norm_out) add_state_dict("conv_out", conv_out) unet.load_state_dict(unet_state_dict) print("running with diffusers unet") unet.to("cuda") decoder_consistency.ckpt = unet sample_consistency_new_2 = decoder_consistency(latent, generator=torch.Generator("cpu").manual_seed(0)) save_image(sample_consistency_new_2, "con_new_2.png") assert (sample_consistency_orig == sample_consistency_new_2).all() print("running with diffusers model") Encoder.old_constructor = Encoder.__init__ def new_constructor(self, **kwargs): self.old_constructor(**kwargs) self.constructor_arguments = kwargs Encoder.__init__ = new_constructor vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae") consistency_vae = ConsistencyDecoderVAE( encoder_args=vae.encoder.constructor_arguments, decoder_args=unet.config, scaling_factor=vae.config.scaling_factor, block_out_channels=vae.config.block_out_channels, latent_channels=vae.config.latent_channels, ) consistency_vae.encoder.load_state_dict(vae.encoder.state_dict()) consistency_vae.quant_conv.load_state_dict(vae.quant_conv.state_dict()) consistency_vae.decoder_unet.load_state_dict(unet.state_dict()) consistency_vae.to(dtype=torch.float16, device="cuda") sample_consistency_new_3 = consistency_vae.decode( 0.18215 * latent, generator=torch.Generator("cpu").manual_seed(0) ).sample print("max difference") print((sample_consistency_orig - sample_consistency_new_3).abs().max()) print("total difference") print((sample_consistency_orig - sample_consistency_new_3).abs().sum()) # assert (sample_consistency_orig == sample_consistency_new_3).all() print("running with diffusers pipeline") pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", vae=consistency_vae, torch_dtype=torch.float16 ) pipe.to("cuda") pipe("horse", generator=torch.Generator("cpu").manual_seed(0)).images[0].save("horse.png") if args.save_pretrained is not None: consistency_vae.save_pretrained(args.save_pretrained)
diffusers/scripts/convert_consistency_decoder.py/0
{ "file_path": "diffusers/scripts/convert_consistency_decoder.py", "repo_id": "diffusers", "token_count": 21911 }
131