text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# candle-flash-attn
candle/candle-flash-attn/README.md/0
{ "file_path": "candle/candle-flash-attn/README.md", "repo_id": "candle", "token_count": 8 }
37
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once #include <cute/tensor.hpp> #include "utils.h" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace flash { using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_K=true, bool Clear_OOB_K=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> __forceinline__ __device__ void copy_rotary_interleaved(Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &Cos, Tensor<Engine2, Layout2> const &Sin, Tensor<Engine3, Layout3> const &identity_MN, const int max_MN, const int min_MN, const int dim, const int rotary_dim) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); // MMA_K static_assert(decltype(size<0>(S))::value == decltype(size<0>(Cos))::value * 2); static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32 Tensor rCos = make_fragment_like(Cos); Tensor rSin = make_fragment_like(Sin); Tensor rS = make_fragment_like(S); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) { cute::copy(S(_, m, k), rS(_, m, k)); if (get<1>(identity_MN(0, 0, k)) < rotary_dim) { cute::copy(Cos(_, m, k), rCos(_, m, k)); cute::copy(Sin(_, m, k), rSin(_, m, k)); Tensor S_fp32 = convert_type<float>(rS(_, m, k)); Tensor cos_fp32 = convert_type<float>(rCos(_, m, k)); Tensor sin_fp32 = convert_type<float>(rSin(_, m, k)); #pragma unroll for (int i = 0; i < size<0>(rS) / 2; ++i) { float real = S_fp32(2 * i) * cos_fp32(i) - S_fp32(2 * i + 1) * sin_fp32(i); float imag = S_fp32(2 * i) * sin_fp32(i) + S_fp32(2 * i + 1) * cos_fp32(i); S_fp32(2 * i) = real; S_fp32(2 * i + 1) = imag; } // Idk but I need to copy for the convert_type to work Tensor S_fp32_copy = make_fragment_like(S_fp32); cute::copy(S_fp32, S_fp32_copy); using T = typename Engine0::value_type; Tensor S_og_type = convert_type<T>(S_fp32_copy); cute::copy(S_og_type, rS(_, m, k)); } cute::copy(rS(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { cute::clear(D(_, m, k)); } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_K=true, bool Clear_OOB_K=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> __forceinline__ __device__ void copy_rotary_contiguous(Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &Cos, Tensor<Engine2, Layout2> const &Sin, Tensor<Engine3, Layout3> const &identity_MN, const int max_MN, const int min_MN, const int dim, const int rotary_dim) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(Cos)); // MMA CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32 Tensor rCos = make_fragment_like(Cos); Tensor rSin = make_fragment_like(Sin); Tensor rS = make_fragment_like(S); Tensor rS_other = make_fragment_like(rS(_, 0, 0)); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) { cute::copy(S(_, m, k), rS(_, m, k)); if (get<1>(identity_MN(0, 0, k)) < rotary_dim) { const bool is_left = get<1>(identity_MN(0, 0, k)) < rotary_dim / 2; Tensor gS_other = make_tensor(S(_, m, k).data() + (is_left ? rotary_dim / 2 : -rotary_dim / 2), S(_, m, k).layout()); cute::copy(gS_other, rS_other); // if (cute::thread0()) { print_tensor(rS(_, m, k)); print_tensor(rS_other); } Tensor gCos = make_tensor(Cos(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Cos(_, m, k).layout()); Tensor gSin = make_tensor(Sin(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Sin(_, m, k).layout()); cute::copy(gCos, rCos(_, m, k)); cute::copy(gSin, rSin(_, m, k)); // if (cute::thread0()) { print_tensor(rCos(_, m, k)); print_tensor(rSin(_, m, k)); } Tensor S_fp32 = convert_type<float>(rS(_, m, k)); Tensor S_other_fp32 = convert_type<float>(rS_other); Tensor cos_fp32 = convert_type<float>(rCos(_, m, k)); Tensor sin_fp32 = convert_type<float>(rSin(_, m, k)); #pragma unroll for (int i = 0; i < size<0>(rS); ++i) { S_fp32(i) = S_fp32(i) * cos_fp32(i) + S_other_fp32(i) * (is_left ? -sin_fp32(i) : sin_fp32(i)); } // Idk but I need to copy for the convert_type to work Tensor S_fp32_copy = make_fragment_like(S_fp32); cute::copy(S_fp32, S_fp32_copy); using T = typename Engine0::value_type; Tensor S_og_type = convert_type<T>(S_fp32_copy); cute::copy(S_og_type, rS(_, m, k)); // if (cute::thread0()) { print_tensor(rS(_, m, k)); } } cute::copy(rS(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { cute::clear(D(_, m, k)); } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
candle/candle-flash-attn/kernels/rotary.h/0
{ "file_path": "candle/candle-flash-attn/kernels/rotary.h", "repo_id": "candle", "token_count": 5052 }
38
#include "compatibility.cuh" #include<stdint.h> #include<cmath> // TODO: This is often used to check that the data is contiguous so that // kernels can be easily mapped. However this only returns true for row // major, if all the inputs are column major, we could apply the fast path // too (but we wouldn't if some of them are row major and some column major). __device__ bool is_contiguous( const size_t num_dims, const size_t *dims, const size_t *strides ) { size_t acc = 1; for (unsigned int d = 0; d < num_dims; d++) { unsigned int dim_idx = num_dims - 1 - d; if (dims[dim_idx] > 1 && acc != strides[dim_idx]) { return false; } acc *= dims[dim_idx]; } return true; } __device__ unsigned int get_strided_index( unsigned int idx, const size_t num_dims, const size_t *dims, const size_t *strides ) { unsigned int strided_i = 0; for (unsigned int d = 0; d < num_dims; d++) { unsigned int dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } __device__ unsigned int restrided( const unsigned int strided_i, const size_t num_dims, const size_t *dims, const size_t *strides, const size_t *new_strides ) { unsigned int idx = 0; for (int d = 0; d < num_dims; d++) { idx += (strides[d] == 0 ? 0 : (strided_i / strides[d]) % dims[d]) * new_strides[d]; } return idx; } // Sourced from https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 // Input must be less than or equal to 2 ^ 16 // used in reductions __device__ __forceinline__ unsigned int next_power_of_two(unsigned int v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v++; return v; } // Efficiently computes the sum of each chunk in "data" of size chunk_len, and // stores the sums in out[i / chunk_len] template<typename T> __device__ void chunk_sum( const size_t chunk_len, const T data, T* out ) { __shared__ T buf[1024]; // assumes that threads where i >= numel have already exited unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int block_i = threadIdx.x; // Fall back to atomicAdd if chunk_len is small to reduce overhead if (chunk_len <= 2) { atomicAdd(out + i / chunk_len, data); return; } buf[block_i] = data; unsigned int chunk_i = i % chunk_len; unsigned int chunk_start = max((int)(block_i - chunk_i), 0); unsigned int chunk_end = min((unsigned int)(block_i + chunk_len - chunk_i), blockDim.x); chunk_i = block_i - chunk_start; size_t max_chunk_len = min(chunk_end - chunk_start, blockDim.x); size_t incr = next_power_of_two(max_chunk_len) >> 1; __syncthreads(); // Uses sequential addressing as discussed in // https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf for (; incr > 0; incr >>= 1) { unsigned int block_i_2 = block_i + incr; if (block_i_2 < chunk_end && chunk_i < incr) { // This is sound because __syncthreads and the conditions above // ensure that no data races occur buf[block_i] += buf[block_i_2]; } __syncthreads(); } if (block_i == chunk_start) { atomicAdd(out + i / chunk_len, buf[block_i]); } } __device__ __forceinline__ bool isnang(float a) { return isnan(a); } __device__ __forceinline__ bool isnang(double a) { return isnan(a); } __device__ __forceinline__ float recipg(float a) { return 1.0 / a; } __device__ __forceinline__ double recipg(double a) { return 1.0 / a; } __device__ __forceinline__ float cosg(float a) { return cosf(a); } __device__ __forceinline__ double cosg(double a) { return cos(a); } __device__ __forceinline__ float sing(float a) { return sinf(a); } __device__ __forceinline__ double sing(double a) { return sin(a); } __device__ __forceinline__ float sqrtg(float a) { return sqrtf(a); } __device__ __forceinline__ double sqrtg(double a) { return sqrt(a); } __device__ __forceinline__ float powg(float a, float b) { return powf(a, b); } __device__ __forceinline__ double powg(double a, double b) { return pow(a, b); } __device__ __forceinline__ float tanhg(float a) { return tanhf(a); } __device__ __forceinline__ double tanhg(double a) { return tanh(a); } __device__ __forceinline__ float erfg(float a) { return erff(a); } __device__ __forceinline__ double erfg(double a) { return erf(a); } __device__ __forceinline__ float ceilg(float a) { return ceilf(a); } __device__ __forceinline__ double ceilg(double a) { return ceil(a); } __device__ __forceinline__ float floorg(float a) { return floorf(a); } __device__ __forceinline__ double floorg(double a) { return floor(a); } __device__ __forceinline__ float roundg(float a) { return roundf(a); } __device__ __forceinline__ double roundg(double a) { return round(a); } __device__ __forceinline__ float normcdfg(float a) { return normcdff(a); } __device__ __forceinline__ double normcdfg(double a) { return normcdf(a); } __device__ __forceinline__ float maxg(float a, float b) { return fmaxf(a, b); } __device__ __forceinline__ double maxg(double a, double b) { return fmax(a, b); } __device__ __forceinline__ float ming(float a, float b) { return fminf(a, b); } __device__ __forceinline__ double ming(double a, double b) { return fmin(a, b); } __device__ __forceinline__ float logg(float a) { return logf(a); } __device__ __forceinline__ double logg(double a) { return log(a); } __device__ __forceinline__ float expg(float a) { return expf(a); } __device__ __forceinline__ double expg(double a) { return exp(a); } __device__ __forceinline__ float absg(float a) { return fabsf(a); } __device__ __forceinline__ double absg(double a) { return fabs(a); } __device__ __forceinline__ float copysigng(float a, float b) { return copysignf(a, b); } __device__ __forceinline__ double copysigng(double a, double b) { return copysign(a, b); } __device__ __forceinline__ int64_t ming(int64_t a, int64_t b) { return min(a, b); } __device__ __forceinline__ int64_t maxg(int64_t a, int64_t b) { return max(a, b); } __device__ __forceinline__ uint32_t ming(uint32_t a, uint32_t b) { return min(a, b); } __device__ __forceinline__ uint32_t maxg(uint32_t a, uint32_t b) { return max(a, b); } __device__ __forceinline__ uint8_t ming(uint8_t a, uint8_t b) { return min(a, b); } __device__ __forceinline__ uint8_t maxg(uint8_t a, uint8_t b) { return max(a, b); } #if __CUDA_ARCH__ >= 530 __device__ __forceinline__ __half powg(__half a, __half b) { return __float2half(powf(__half2float(a), __half2float(b))); } __device__ __forceinline__ bool isnang(__half a) { return __hisnan(a); } __device__ __forceinline__ __half sqrtg(__half a) { return hsqrt(a); } __device__ __forceinline__ __half cosg(__half a) { return hcos(a); } __device__ __forceinline__ __half sing(__half a) { return hsin(a); } __device__ __forceinline__ __half recipg(__half a) { __half one = 1.0; return one / a; } __device__ __forceinline__ __half maxg(__half a, __half b) { return __hmax_nan(a, b); } __device__ __forceinline__ __half tanhg(__half a) { return __float2half(tanhf(__half2float(a))); } __device__ __forceinline__ __half erfg(__half a) { return __float2half(erff(__half2float(a))); } __device__ __forceinline__ __half ceilg(__half a) { return __float2half(ceilf(__half2float(a))); } __device__ __forceinline__ __half floorg(__half a) { return __float2half(floorf(__half2float(a))); } __device__ __forceinline__ __half roundg(__half a) { return __float2half(roundf(__half2float(a))); } __device__ __forceinline__ __half normcdfg(__half a) { return __float2half(normcdff(__half2float(a))); } __device__ __forceinline__ __half ming(__half a, __half b) { return __hmin_nan(a, b); } __device__ __forceinline__ __half logg(__half a) { return hlog(a); } __device__ __forceinline__ __half expg(__half a) { return hexp(a); } __device__ __forceinline__ __half absg(__half a) { return __habs(a); } __device__ __forceinline__ __half copysigng(__half a, __half b) { return __float2half(copysignf(__half2float(a), __half2float(b))); } #endif #if __CUDA_ARCH__ >= 800 __device__ __forceinline__ __nv_bfloat16 powg(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(powf(__bfloat162float(a), __bfloat162float(b))); } __device__ __forceinline__ bool isnang(__nv_bfloat16 a) { return __hisnan(a); } __device__ __forceinline__ __nv_bfloat16 sqrtg(__nv_bfloat16 a) { return hsqrt(a); } __device__ __forceinline__ __nv_bfloat16 cosg(__nv_bfloat16 a) { return hcos(a); } __device__ __forceinline__ __nv_bfloat16 sing(__nv_bfloat16 a) { return hsin(a); } __device__ __forceinline__ __nv_bfloat16 recipg(__nv_bfloat16 a) { __nv_bfloat16 one = 1.0; return one / a; } __device__ __forceinline__ __nv_bfloat16 maxg(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmax_nan(a, b); } __device__ __forceinline__ __nv_bfloat16 tanhg(__nv_bfloat16 a) { return __float2bfloat16(tanhf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 erfg(__nv_bfloat16 a) { return __float2bfloat16(erff(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 ceilg(__nv_bfloat16 a) { return __float2bfloat16(ceilf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 floorg(__nv_bfloat16 a) { return __float2bfloat16(floorf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 roundg(__nv_bfloat16 a) { return __float2bfloat16(roundf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 normcdfg(__nv_bfloat16 a) { return __float2bfloat16(normcdff(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 ming(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmin_nan(a, b); } __device__ __forceinline__ __nv_bfloat16 logg(__nv_bfloat16 a) { return hlog(a); } __device__ __forceinline__ __nv_bfloat16 expg(__nv_bfloat16 a) { return hexp(a); } __device__ __forceinline__ __nv_bfloat16 absg(__nv_bfloat16 a) { return __habs(a); } __device__ __forceinline__ __nv_bfloat16 copysigng(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(copysignf(__bfloat162float(a), __bfloat162float(b))); } #endif
candle/candle-kernels/src/cuda_utils.cuh/0
{ "file_path": "candle/candle-kernels/src/cuda_utils.cuh", "repo_id": "candle", "token_count": 3947 }
39
use metal::{ Buffer, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState, Device, Function, FunctionConstantValues, Library, MTLDataType, MTLSize, NSUInteger, }; use std::collections::HashMap; use std::ffi::c_void; use std::sync::RwLock; mod utils; pub use utils::BufferOffset; use utils::{get_block_dims, linear_split, EncoderProvider}; const AFFINE: &str = include_str!("affine.metal"); const INDEXING: &str = include_str!("indexing.metal"); const UNARY: &str = include_str!("unary.metal"); const BINARY: &str = include_str!("binary.metal"); const TERNARY: &str = include_str!("ternary.metal"); const CAST: &str = include_str!("cast.metal"); const CONV: &str = include_str!("conv.metal"); const REDUCE: &str = include_str!("reduce.metal"); const RANDOM: &str = include_str!("random.metal"); // Current source: https://github.com/ivarflakstad/metal-flash-attention/tree/candle const MFA: &[u8] = include_bytes!("libMetalFlashAttention.metallib"); const QUANTIZED: &str = include_str!("quantized.metal"); const SORT: &str = include_str!("sort.metal"); #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Source { Affine, Indexing, Unary, Binary, Ternary, Cast, Reduce, Mfa, Conv, Random, Quantized, Sort, } pub mod copy2d { pub struct Kernel(pub &'static str); pub const FLOAT: Kernel = Kernel("copy2d_f32"); pub const HALF: Kernel = Kernel("copy2d_f16"); pub const BFLOAT: Kernel = Kernel("copy2d_bf16"); pub const I64: Kernel = Kernel("copy2d_i64"); pub const U32: Kernel = Kernel("copy2d_u32"); pub const U8: Kernel = Kernel("copy2d_u8"); } macro_rules! ops{ ($($name:ident),+) => { pub mod contiguous { pub struct Kernel(pub &'static str); $( pub mod $name { use super::Kernel; pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32")); pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16")); pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16")); pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64")); pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32")); pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8")); } )+ pub mod copy { use super::Kernel; pub const FLOAT: Kernel = Kernel("copy_f32"); pub const HALF: Kernel = Kernel("copy_f16"); pub const BFLOAT: Kernel = Kernel("copy_bf16"); pub const I64: Kernel = Kernel("copy_i64"); pub const U32: Kernel = Kernel("copy_u32"); pub const U8: Kernel = Kernel("copy_u8"); } } pub mod contiguous_tiled { pub struct Kernel(pub &'static str); $( pub mod $name { use super::Kernel; pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32_tiled")); pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16_tiled")); pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16_tiled")); pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64_tiled")); pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32_tiled")); pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8_tiled")); } )+ pub mod copy { use super::Kernel; pub const FLOAT: Kernel = Kernel("copy_f32_tiled"); pub const HALF: Kernel = Kernel("copy_f16_tiled"); pub const BFLOAT: Kernel = Kernel("copy_bf16_tiled"); pub const I64: Kernel = Kernel("copy_i64_tiled"); pub const U32: Kernel = Kernel("copy_u32_tiled"); pub const U8: Kernel = Kernel("copy_u8_tiled"); } } pub mod strided { pub struct Kernel(pub &'static str); $( pub mod $name { use super::Kernel; pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32_strided")); pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16_strided")); pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16_strided")); pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64_strided")); pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32_strided")); pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8_strided")); } )+ pub mod copy { use super::Kernel; pub const FLOAT: Kernel = Kernel("copy_f32_strided"); pub const HALF: Kernel = Kernel("copy_f16_strided"); pub const BFLOAT: Kernel = Kernel("copy_bf16_strided"); pub const I64: Kernel = Kernel("copy_i64_strided"); pub const U32: Kernel = Kernel("copy_u32_strided"); pub const U8: Kernel = Kernel("copy_u8_strided"); } } }; } pub mod unary { ops!( cos, sin, exp, sqr, sqrt, neg, log, gelu, abs, ceil, floor, relu, round, erf, gelu_erf, tanh, recip, silu, sign, sigmoid ); } pub mod binary { ops!(add, sub, mul, div, min, max, eq, ne, le, lt, ge, gt); } #[derive(thiserror::Error, Debug)] pub enum MetalKernelError { #[error("Could not lock kernel map: {0}")] LockError(String), #[error("Error while loading library: {0}")] LoadLibraryError(String), #[error("Error while loading function: {0:?}")] LoadFunctionError(String), #[error("Failed to create compute function")] FailedToCreateComputeFunction, #[error("Failed to create pipeline")] FailedToCreatePipeline(String), #[error("Invalid matmul arguments {lhs_stride:?} {rhs_stride:?} {mnk:?}")] MatMulNonContiguous { lhs_stride: Vec<usize>, rhs_stride: Vec<usize>, mnk: (usize, usize, usize), }, } impl<T> From<std::sync::PoisonError<T>> for MetalKernelError { fn from(e: std::sync::PoisonError<T>) -> Self { Self::LockError(e.to_string()) } } type Libraries = HashMap<Source, Library>; type Pipelines = HashMap<(&'static str, Option<ConstantValues>), ComputePipelineState>; #[derive(Debug)] pub struct Kernels { libraries: RwLock<Libraries>, pipelines: RwLock<Pipelines>, } impl Default for Kernels { fn default() -> Self { Self::new() } } impl Kernels { pub fn new() -> Self { let libraries = RwLock::new(Libraries::new()); let pipelines = RwLock::new(Pipelines::new()); Self { libraries, pipelines, } } fn get_library_source(&self, source: Source) -> &'static str { match source { Source::Affine => AFFINE, Source::Unary => UNARY, Source::Binary => BINARY, Source::Ternary => TERNARY, Source::Indexing => INDEXING, Source::Cast => CAST, Source::Reduce => REDUCE, Source::Conv => CONV, Source::Random => RANDOM, Source::Quantized => QUANTIZED, Source::Sort => SORT, Source::Mfa => panic!("Invalid lib"), } } /// Load the give library from its [`source`]. /// If this has been previously loaded it will just fetch it from cache. pub fn load_library( &self, device: &Device, source: Source, ) -> Result<Library, MetalKernelError> { let mut libraries = self.libraries.write()?; if let Some(lib) = libraries.get(&source) { Ok(lib.clone()) } else { let lib = match source { Source::Mfa => { let source_data = MFA; device.new_library_with_data(source_data).map_err(|e| { MetalKernelError::LoadLibraryError(format!( "Candle metal requires macosx > 13.0 or higher, cannot load mfa: {e}" )) })? } source => { let source_content = self.get_library_source(source); device .new_library_with_source(source_content, &CompileOptions::new()) .map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))? } }; libraries.insert(source, lib.clone()); Ok(lib) } } fn load_function( &self, device: &Device, source: Source, name: &'static str, constants: Option<FunctionConstantValues>, ) -> Result<Function, MetalKernelError> { let func = self .load_library(device, source)? .get_function(name, constants) .map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?; Ok(func) } /// Load the give pipeline /// loads the library from source, then gets the function [`name`] from /// that source fn load_pipeline_with_constants( &self, device: &Device, source: Source, name: &'static str, constants: Option<ConstantValues>, ) -> Result<ComputePipelineState, MetalKernelError> { let mut pipelines = self.pipelines.write()?; let key = (name, constants); if let Some(pipeline) = pipelines.get(&key) { Ok(pipeline.clone()) } else { let (name, constants) = key; let func = self.load_function( device, source, name, constants.as_ref().map(|c| c.function_constant_values()), )?; let pipeline = device .new_compute_pipeline_state_with_function(&func) .map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?; pipelines.insert((name, constants), pipeline.clone()); Ok(pipeline) } } /// Load the give pipeline /// loads the library from source, then gets the function [`name`] from /// that source (without constants) pub fn load_pipeline( &self, device: &Device, source: Source, name: &'static str, ) -> Result<ComputePipelineState, MetalKernelError> { self.load_pipeline_with_constants(device, source, name, None) } } #[allow(clippy::too_many_arguments)] pub fn call_copy2d( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: copy2d::Kernel, input: &Buffer, output: &Buffer, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o_in_bytes: usize, dst_o_in_bytes: usize, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Unary, name.0)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( d1 as i64, d2 as i64, src_s as i64, dst_s as i64, (input, src_o_in_bytes), (output, dst_o_in_bytes) ) ); let grid_dims = MTLSize { width: d1 as u64, height: d2 as u64, depth: 1, }; let group_dims = get_block_dims(d1 as u64, d2 as u64, 1); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_threads(grid_dims, group_dims); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_unary_contiguous_tiled( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: unary::contiguous_tiled::Kernel, length: usize, input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let tile_size = 2; let tiles = (length + tile_size - 1) / tile_size; encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, &input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, tiles); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_unary_contiguous( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: unary::contiguous::Kernel, length: usize, input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, &input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_unary_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: unary::strided::Kernel, shape: &[usize], input: BufferOffset, strides: &[usize], output: BufferOffset, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Unary, name.0)?; let length: usize = shape.iter().product(); let num_dims: usize = shape.len(); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, num_dims, shape, strides, &input, &output)); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output.buffer, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_binary_contiguous( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: binary::contiguous::Kernel, length: usize, left: BufferOffset, right: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Binary, kernel_name.0)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, &left, &right, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(left.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(right.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_binary_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: binary::strided::Kernel, shape: &[usize], left_input: BufferOffset, left_strides: &[usize], right_input: BufferOffset, right_strides: &[usize], output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Binary, name.0)?; let num_dims: usize = shape.len(); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let width: usize = shape.iter().product(); let length: usize = shape.iter().product(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, width); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( length, num_dims, shape, left_strides, right_strides, &left_input, &right_input, output ) ); encoder.use_resource(left_input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(right_input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_cast_contiguous( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, length: usize, input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, &input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_cast_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, shape: &[usize], input: BufferOffset, input_strides: &[usize], output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); let length: usize = shape.iter().product(); set_params!( encoder, (length, shape.len(), shape, input_strides, &input, output) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, length); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_reduce_contiguous( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, length: usize, out_length: usize, input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let elements_to_sum = length / out_length; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, elements_to_sum, &input, output)); let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), (elements_to_sum as u64 + 2 - 1) / 2, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_reduce_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, shape: &[usize], strides: &[usize], out_length: usize, input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let length: usize = shape.iter().product(); let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let elements_to_sum = length / out_length; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (shape.len(), shape, strides, elements_to_sum, &input, output) ); let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), elements_to_sum as u64, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_last_softmax( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, length: usize, elements_to_sum: usize, input: &Buffer, input_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (length, elements_to_sum, (input, input_offset), output) ); let out_length = length / elements_to_sum; let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), elements_to_sum as u64, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_rms_norm( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, length: usize, elements_to_sum: usize, eps: f32, input: &Buffer, input_offset: usize, alpha: &Buffer, alpha_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( length, elements_to_sum, (input, input_offset), output, (alpha, alpha_offset), eps ) ); let out_length = length / elements_to_sum; let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), elements_to_sum as u64, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.set_threadgroup_memory_length(0, (width * 4).max(16) as u64); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_layer_norm( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, length: usize, elements_to_sum: usize, eps: f32, input: &Buffer, input_offset: usize, alpha: &Buffer, alpha_offset: usize, beta: &Buffer, beta_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( length, elements_to_sum, (input, input_offset), output, (alpha, alpha_offset), (beta, beta_offset), eps ) ); let out_length = length / elements_to_sum; let thread_group_count = MTLSize { width: out_length as u64, height: 1, depth: 1, }; let width = std::cmp::min( pipeline.max_total_threads_per_threadgroup(), elements_to_sum as u64, ) .next_power_of_two(); let thread_group_size = MTLSize { width, height: 1, depth: 1, }; encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.set_threadgroup_memory_length(0, (width * 8).max(32) as u64); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_rope_i( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, bh: usize, td: usize, src: &Buffer, src_offset: usize, cos: &Buffer, cos_offset: usize, sin: &Buffer, sin_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( bh, td, (src, src_offset), (cos, cos_offset), (sin, sin_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, (bh * td) / 2); encoder.use_resource(src, metal::MTLResourceUsage::Read); encoder.use_resource(cos, metal::MTLResourceUsage::Read); encoder.use_resource(sin, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_rope_thd( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, b: usize, t: usize, h: usize, d: usize, src: &Buffer, src_offset: usize, cos: &Buffer, cos_offset: usize, sin: &Buffer, sin_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( b, t, h, d, (src, src_offset), (cos, cos_offset), (sin, sin_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, (b * t * h * d) / 2); encoder.use_resource(src, metal::MTLResourceUsage::Read); encoder.use_resource(cos, metal::MTLResourceUsage::Read); encoder.use_resource(sin, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_rope( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, kernel_name: &'static str, bh: usize, td: usize, d: usize, src: &Buffer, src_offset: usize, cos: &Buffer, cos_offset: usize, sin: &Buffer, sin_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( bh, td, d, (src, src_offset), (cos, cos_offset), (sin, sin_offset), output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, (bh * td) / 2); encoder.use_resource(src, metal::MTLResourceUsage::Read); encoder.use_resource(cos, metal::MTLResourceUsage::Read); encoder.use_resource(sin, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_affine( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, size: usize, input: BufferOffset, output: &Buffer, mul: f32, add: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (size, mul, add, &input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_affine_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], input: BufferOffset, input_stride: &[usize], output: &Buffer, mul: f32, add: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let size: usize = shape.iter().product(); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( size, shape.len(), shape, input_stride, mul, add, &input, output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_powf( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, size: usize, input: BufferOffset, output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (size, mul, &input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_powf_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], input: BufferOffset, input_stride: &[usize], output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let size: usize = shape.iter().product(); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (size, shape.len(), shape, input_stride, mul, &input, output) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_elu( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, size: usize, input: BufferOffset, output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (size, mul, &input, output)); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_elu_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], input: BufferOffset, input_stride: &[usize], output: &Buffer, mul: f32, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Affine, name)?; let size: usize = shape.iter().product(); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (size, shape.len(), shape, input_stride, mul, &input, output) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_where_cond_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], cond: BufferOffset, cond_stride: &[usize], left: BufferOffset, left_stride: &[usize], right: BufferOffset, right_stride: &[usize], output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Ternary, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); let size: usize = shape.iter().product(); let rank = shape.len(); set_params!( encoder, ( size, rank, shape, cond_stride, left_stride, right_stride, &cond, &left, &right, output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); encoder.use_resource(cond.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(left.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(right.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_index_select( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], ids_size: usize, dim: usize, contiguous: bool, src_dims: &[usize], src_strides: &[usize], input: BufferOffset, ids: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = shape[..dim].iter().product(); let right_size: usize = shape[dim + 1..].iter().product(); let src_dim_size = shape[dim]; let dst_el = ids_size * left_size * right_size; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, ids_size, contiguous, src_dims, src_strides, &input, &ids, output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_gather( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], ids_size: usize, dim: usize, input: BufferOffset, ids: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = shape[..dim].iter().product(); let right_size: usize = shape[dim + 1..].iter().product(); let src_dim_size = shape[dim]; let dst_el = ids_size * left_size * right_size; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, ids_size, &input, &ids, output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_scatter_add( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, src_shape: &[usize], dst_shape: &[usize], dim: usize, input: BufferOffset, ids: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = src_shape[..dim].iter().product(); let right_size: usize = src_shape[dim + 1..].iter().product(); let src_dim_size = src_shape[dim]; let dst_el = left_size * right_size; let dst_dim_size = dst_shape[dim]; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, dst_dim_size, &input, &ids, output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_index_add( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, src_shape: &[usize], dst_shape: &[usize], ids_shape: &[usize], dim: usize, input: BufferOffset, ids: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let left_size: usize = src_shape[..dim].iter().product(); let right_size: usize = src_shape[dim + 1..].iter().product(); let src_dim_size = src_shape[dim]; let dst_el = left_size * right_size; let dst_dim_size = dst_shape[dim]; let ids_dim_size = ids_shape[0]; let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, left_size, src_dim_size, right_size, dst_dim_size, ids_dim_size, &input, &ids, output ) ); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[derive(Debug, PartialEq)] pub enum Value { USize(usize), Bool(bool), F32(f32), U16(u16), } impl std::hash::Hash for Value { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { match self { Value::F32(v) => v.to_bits().hash(state), Value::USize(v) => v.hash(state), Value::U16(v) => v.hash(state), Value::Bool(v) => v.hash(state), } } } impl Value { fn data_type(&self) -> MTLDataType { match self { Value::USize(_) => MTLDataType::UInt, Value::F32(_) => MTLDataType::Float, Value::U16(_) => MTLDataType::UShort, Value::Bool(_) => MTLDataType::Bool, } } } /// Not true, good enough for our purposes. impl Eq for Value {} #[derive(Debug, Eq, PartialEq, Hash)] struct ConstantValues(Vec<(usize, Value)>); impl ConstantValues { pub fn new(values: Vec<(usize, Value)>) -> Self { Self(values) } fn function_constant_values(&self) -> FunctionConstantValues { let f = FunctionConstantValues::new(); for (index, value) in &self.0 { let ty = value.data_type(); match value { Value::USize(v) => { f.set_constant_value_at_index( v as *const usize as *const c_void, ty, *index as u64, ); } Value::F32(v) => { f.set_constant_value_at_index( v as *const f32 as *const c_void, ty, *index as u64, ); } Value::U16(v) => { f.set_constant_value_at_index( v as *const u16 as *const c_void, ty, *index as u64, ); } Value::Bool(v) => { f.set_constant_value_at_index( v as *const bool as *const c_void, ty, *index as u64, ); } } } f } } #[allow(clippy::too_many_arguments)] pub fn call_gemm( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, (b, m, n, k): (usize, usize, usize, usize), lhs_stride: &[usize], lhs_offset: usize, lhs_buffer: &Buffer, rhs_stride: &[usize], rhs_offset: usize, rhs_buffer: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { assert!(rhs_stride.len() >= 2); assert!(lhs_stride.len() >= 2); let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; // lhs has shape b, m, k // We also allow for the case where the stride on the minor dimension is not as expected but // there is a single element. let a_trans = if (lhs_m1 == 1 || k == 1) && (lhs_m2 == k || m == 1) { false } else if (lhs_m1 == m || k == 1) && (lhs_m2 == 1 || m == 1) { true } else { return Err(MetalKernelError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?; }; // rhs has shape b, k, n let b_trans = if (rhs_m1 == 1 || n == 1) && (rhs_m2 == n || k == 1) { false } else if (rhs_m1 == k || n == 1) && (rhs_m2 == 1 || k == 1) { true } else { return Err(MetalKernelError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?; }; let d_trans = false; let alpha = 1.0f32; let beta = 0.0f32; let batched = b > 1; let fused_activation = false; let fused_bias = false; let (m_simd, n_simd, k_simd, m_splits, n_splits) = if m == 1 { let m_simd = 8; let n_simd = 8; let k_simd = 64; let m_splits = 1; let n_splits = 1; (m_simd, n_simd, k_simd, m_splits, n_splits) } else { let m_simd = 40; let n_simd = 40; let k_simd = 32; let m_splits = 1; let n_splits = 1; (m_simd, n_simd, k_simd, m_splits, n_splits) }; let constants = Some(ConstantValues::new(vec![ (0, Value::USize(m)), (1, Value::USize(n)), (2, Value::USize(k)), (10, Value::Bool(a_trans)), (11, Value::Bool(b_trans)), (13, Value::Bool(d_trans)), (20, Value::F32(alpha)), (21, Value::F32(beta)), (100, Value::Bool(batched)), (101, Value::Bool(fused_activation)), // Garbage (102, Value::Bool(false)), (103, Value::Bool(false)), (113, Value::Bool(false)), (50_000, Value::Bool(false)), // End garbage (200, Value::U16(m_simd)), (201, Value::U16(n_simd)), (202, Value::U16(k_simd)), (210, Value::U16(m_splits)), (211, Value::U16(n_splits)), (50_001, Value::Bool(fused_bias)), ])); let pipeline = kernels.load_pipeline_with_constants(device, Source::Mfa, name, constants)?; let m_group = m_simd * m_splits; let n_group = n_simd * n_splits; let a_block_length = m_group * k_simd; let b_block_length = k_simd * n_group; let mut block_elements = a_block_length + b_block_length; if (m % 8 != 0) && (n % 8 != 0) { let c_block_length = m_group * n_group; block_elements = std::cmp::max(c_block_length, block_elements) } if fused_bias { if d_trans { block_elements = std::cmp::max(block_elements, m_group); } else { block_elements = std::cmp::max(block_elements, n_group); } } let bytes = match name { "sgemm" => 4, "hgemm" => 2, "bgemm" => 2, other => { return Err(MetalKernelError::LoadLibraryError(format!( "{other} is not a valid kernel for gemm" ))); } }; let block_bytes = block_elements * bytes; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); encoder.set_threadgroup_memory_length(0, block_bytes.into()); encoder.set_buffer(0, Some(lhs_buffer), lhs_offset as NSUInteger); encoder.set_buffer(1, Some(rhs_buffer), rhs_offset as NSUInteger); encoder.set_buffer(2, Some(output), 0); // TODO Tensor D let grid_z = b; if batched { let byte_stride_a: usize = lhs_stride[lhs_stride.len() - 3] * bytes as usize; let byte_stride_b: usize = rhs_stride[rhs_stride.len() - 3] * bytes as usize; let byte_stride_c = m * n * bytes as usize; // TODO byte_stride_d let byte_stride_d = 0; let buffer: Vec<u64> = vec![ byte_stride_a as _, byte_stride_b as _, byte_stride_c as _, byte_stride_d as _, ]; encoder.set_bytes( 10, (buffer.len() * core::mem::size_of::<u64>()) as NSUInteger, buffer.as_ptr() as *const NSUInteger as *const c_void, ); } let grid_size = MTLSize { width: divide(n, n_group.into()), height: divide(m, m_group.into()), depth: grid_z as NSUInteger, }; let group_size = MTLSize { width: 32 * (m_splits as u64) * (n_splits as u64), height: 1, depth: 1, }; encoder.use_resource(lhs_buffer, metal::MTLResourceUsage::Read); encoder.use_resource(rhs_buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(grid_size, group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_im2col1d_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], (k_size, stride, padding, dilation): (usize, usize, usize, usize), input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Conv, name)?; let l_out = (shape[2] + 2 * padding - dilation * (k_size - 1) - 1) / stride + 1; let dst_el = shape[0] * l_out * shape[1] * k_size; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (dst_el, l_out, k_size, stride, padding, dilation, shape, strides, &input, output) ); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_col2im1d( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], k_size: usize, stride: usize, input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Conv, name)?; let l_in = shape[1]; let c_out = shape[2]; let l_out = (l_in - 1) * stride + k_size; let dst_el = shape[0] * c_out * l_out; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (dst_el, l_out, l_in, c_out, k_size, stride, &input, output) ); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_im2col_strided( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], (h_k, w_k, stride, padding, dilation): (usize, usize, usize, usize, usize), input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Conv, name)?; let h = shape[2]; let w = shape[3]; let h_out = (h + 2 * padding - dilation * (h_k - 1) - 1) / stride + 1; let w_out = (w + 2 * padding - dilation * (w_k - 1) - 1) / stride + 1; let dst_el = shape[0] * h_out * w_out * shape[1] * h_k * w_k; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( dst_el, h_out, w_out, h_k, w_k, stride, padding, dilation, shape, strides, &input, output ) ); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_upsample_nearest_2d( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], out_w: usize, out_h: usize, input: BufferOffset, output: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Conv, name)?; let dst_el = out_w * out_h * shape[0] * shape[1]; let scale_w = shape[2] as f32 / out_w as f32; let scale_h = shape[3] as f32 / out_h as f32; let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (out_w, out_h, scale_w, scale_h, shape, strides, &input, output) ); encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_random_uniform( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, min: f32, max: f32, length: usize, seed: &Buffer, buffer: &Buffer, ) -> Result<(), MetalKernelError> { if min >= max { return Err(MetalKernelError::LoadLibraryError( "min must be less than max".to_string(), )); } let pipeline = kernels.load_pipeline(device, Source::Random, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let odd = (length % 2 != 0) as usize; let (thread_group_count, thread_group_size) = linear_split(&pipeline, length / 2 + odd); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, min, max, seed, buffer)); encoder.use_resource( seed, metal::MTLResourceUsage::Read | metal::MTLResourceUsage::Write, ); encoder.use_resource(buffer, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_random_normal( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, mean: f32, stddev: f32, length: usize, seed: &Buffer, buffer: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Random, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); let odd = (length % 2 != 0) as usize; let (thread_group_count, thread_group_size) = linear_split(&pipeline, length / 2 + odd); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (length, mean, stddev, seed, buffer)); encoder.use_resource( seed, metal::MTLResourceUsage::Read | metal::MTLResourceUsage::Write, ); encoder.use_resource(buffer, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[derive(Debug, Clone, Copy)] pub enum GgmlDType { Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1, Q2K, Q3K, Q4K, Q5K, Q6K, Q8K, F16, F32, } #[allow(clippy::too_many_arguments)] pub fn call_quantized_matmul_mv_t( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, dtype: GgmlDType, (b, m, n, k): (usize, usize, usize, usize), lhs: &Buffer, lhs_offset: usize, rhs: &Buffer, dst_offset: usize, dst: &Buffer, ) -> Result<(), MetalKernelError> { // Everything is in reverse let ne00 = k as i64; let ne01 = n as i64; let ne02 = b as i64; let ne03 = 1i64; let nb00 = 0i64; let nb01 = 0i64; let nb02 = 0i64; let ne10 = k as i64; let ne11 = m as i64; let ne12 = b as i64; let ne13 = 1i64; let nb10 = 0i64; let nb11 = 0i64; let nb12 = 0i64; let ne0 = n as i64; let ne1 = m as i64; let r2: u32 = (ne12 / ne02) as u32; let r3: u32 = (ne13 / ne03) as u32; let (nth0, nth1, align) = match dtype { GgmlDType::Q4_0 | GgmlDType::Q4_1 | GgmlDType::Q5_0 | GgmlDType::Q5_1 | GgmlDType::Q8_0 | GgmlDType::Q8_1 => { let nth0 = 8; let nth1 = 8; let align = 8; (nth0, nth1, align) } GgmlDType::Q2K => { // Fixing a bug in Metal for GGML // https://github.com/ggerganov/llama.cpp/blob/b8109bc0139f15a5b321909f47510b89dca47ffc/ggml-metal.m#L1576 let nth0 = 2; let nth1 = 32; let align = 4; (nth0, nth1, align) } GgmlDType::Q4K => { let nth0 = 4; let nth1 = 8; let align = 4; (nth0, nth1, align) } GgmlDType::Q3K | GgmlDType::Q5K => { let nth0 = 2; let nth1 = 32; let align = 4; (nth0, nth1, align) } GgmlDType::Q6K => { let nth0 = 2; let nth1 = 32; let align = 2; (nth0, nth1, align) } GgmlDType::F16 | GgmlDType::Q8K => { // Original implem uses rows let nth0 = 32; let nth1 = 1; let align = 8; (nth0, nth1, align) } GgmlDType::F32 => { let nth0 = 32; let nth1 = 1; let align = 8; (nth0, nth1, align) } }; let thread_groups_count = MTLSize { width: divide(ne01 as usize, align), height: ne11 as u64, depth: (ne12 * ne13) as u64, }; let threads_per_threadgroup = MTLSize { width: nth0, height: nth1, depth: 1, }; let name = match dtype { GgmlDType::Q4_0 => "kernel_mul_mv_q4_0_f32", GgmlDType::Q4_1 => "kernel_mul_mv_q4_1_f32", GgmlDType::Q5_0 => "kernel_mul_mv_q5_0_f32", GgmlDType::Q5_1 => "kernel_mul_mv_q5_1_f32", GgmlDType::Q8_0 => "kernel_mul_mv_q8_0_f32", GgmlDType::Q8_1 => "kernel_mul_mv_q8_1_f32", GgmlDType::Q2K => "kernel_mul_mv_q2_K_f32", GgmlDType::Q3K => "kernel_mul_mv_q3_K_f32", GgmlDType::Q4K => "kernel_mul_mv_q4_K_f32", GgmlDType::Q5K => "kernel_mul_mv_q5_K_f32", GgmlDType::Q6K => "kernel_mul_mv_q6_K_f32", GgmlDType::Q8K => "kernel_mul_mv_q8_K_f32", GgmlDType::F16 => "kernel_mul_mv_f16_f32", GgmlDType::F32 => "kernel_mul_mv_f32_f32", }; let pipeline = kernels.load_pipeline(device, Source::Quantized, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( rhs, (lhs, lhs_offset), (dst, dst_offset), ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3 ) ); encoder.use_resource(lhs, metal::MTLResourceUsage::Read); encoder.use_resource(rhs, metal::MTLResourceUsage::Read); encoder.use_resource(dst, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_groups_count, threads_per_threadgroup); Ok(()) } fn divide(m: usize, b: usize) -> NSUInteger { ((m + b - 1) / b) as NSUInteger } #[allow(clippy::too_many_arguments)] pub fn call_pool2d( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, shape: &[usize], strides: &[usize], out_w: usize, out_h: usize, w_k: usize, h_k: usize, w_stride: usize, h_stride: usize, input: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { let dst_el = out_w * out_h * shape[0] * shape[1]; let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?; let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, (w_k, h_k, w_stride, h_stride, shape, strides, input, output) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_conv_transpose1d( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, dilation: usize, stride: usize, padding: usize, out_padding: usize, c_out: usize, l_out: usize, b_size: usize, src_shape: &[usize], src_strides: &[usize], kernel_shape: &[usize], kernel_strides: &[usize], input: &Buffer, input_offset: usize, kernel: &Buffer, kernel_offset: usize, output: &Buffer, ) -> Result<(), MetalKernelError> { let dst_el = c_out * l_out * b_size; let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?; let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( l_out, stride, padding, out_padding, dilation, src_shape, src_strides, kernel_shape, kernel_strides, (input, input_offset), (kernel, kernel_offset), output ) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(kernel, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } pub struct CallConvTranspose2dCfg<'a> { pub dilation: usize, pub stride: usize, pub padding: usize, pub output_padding: usize, pub c_out: usize, pub out_w: usize, pub out_h: usize, pub b_size: usize, pub input_dims: &'a [usize], pub input_stride: &'a [usize], pub kernel_dims: &'a [usize], pub kernel_stride: &'a [usize], pub input_offset: usize, pub kernel_offset: usize, } #[allow(clippy::too_many_arguments)] pub fn call_conv_transpose2d( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, cfg: CallConvTranspose2dCfg, input: &Buffer, kernel: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { let dst_el = cfg.c_out * cfg.out_w * cfg.out_h * cfg.b_size; let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?; let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el); let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( cfg.out_w, cfg.out_h, cfg.stride, cfg.padding, cfg.output_padding, cfg.dilation, cfg.input_dims, cfg.input_stride, cfg.kernel_dims, cfg.kernel_stride, (input, cfg.input_offset), (kernel, cfg.kernel_offset), output ) ); encoder.use_resource(input, metal::MTLResourceUsage::Read); encoder.use_resource(kernel, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_arg_sort( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, nrows: usize, ncols: usize, ncols_pad: usize, src: BufferOffset, dst: &Buffer, ) -> Result<(), MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Sort, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (&src, dst, ncols as i64, ncols_pad as i64)); let thread_group_count = MTLSize { width: 1, height: nrows as u64, depth: 1, }; let thread_group_size = MTLSize { width: ncols_pad as u64, height: 1, depth: 1, }; encoder.use_resource(src.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(dst, metal::MTLResourceUsage::Write); encoder.set_threadgroup_memory_length(0, (ncols_pad * 4).max(16) as u64); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[cfg(test)] mod tests;
candle/candle-metal-kernels/src/lib.rs/0
{ "file_path": "candle/candle-metal-kernels/src/lib.rs", "repo_id": "candle", "token_count": 31139 }
40
mod benchmarks; use criterion::criterion_main; criterion_main!(benchmarks::layer_norm::benches, benchmarks::conv::benches);
candle/candle-nn/benches/bench_main.rs/0
{ "file_path": "candle/candle-nn/benches/bench_main.rs", "repo_id": "candle", "token_count": 38 }
41
pub mod activation; pub mod batch_norm; pub mod conv; pub mod embedding; pub mod encoding; pub mod func; pub mod group_norm; pub mod init; pub mod kv_cache; pub mod layer_norm; pub mod linear; pub mod loss; pub mod ops; pub mod optim; pub mod rnn; pub mod rotary_emb; pub mod sequential; pub mod var_builder; pub mod var_map; pub use activation::{prelu, Activation, PReLU}; pub use batch_norm::{batch_norm, BatchNorm, BatchNormConfig}; pub use conv::{ conv1d, conv1d_no_bias, conv2d, conv2d_no_bias, conv_transpose1d, conv_transpose1d_no_bias, conv_transpose2d, conv_transpose2d_no_bias, Conv1d, Conv1dConfig, Conv2d, Conv2dConfig, ConvTranspose1d, ConvTranspose1dConfig, ConvTranspose2d, ConvTranspose2dConfig, }; pub use embedding::{embedding, Embedding}; pub use func::{func, func_t, Func, FuncT}; pub use group_norm::{group_norm, GroupNorm}; pub use init::Init; pub use layer_norm::{layer_norm, rms_norm, LayerNorm, LayerNormConfig, RmsNorm}; pub use linear::{linear, linear_b, linear_no_bias, Linear}; pub use ops::Dropout; pub use optim::{AdamW, Optimizer, ParamsAdamW, SGD}; pub use rnn::{gru, lstm, GRUConfig, LSTMConfig, GRU, LSTM, RNN}; pub use sequential::{seq, Sequential}; pub use var_builder::VarBuilder; pub use var_map::VarMap; pub use candle::{Module, ModuleT};
candle/candle-nn/src/lib.rs/0
{ "file_path": "candle/candle-nn/src/lib.rs", "repo_id": "candle", "token_count": 486 }
42
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::{to_vec0_round, to_vec2_round}; use anyhow::Result; use candle::{DType, Device, Tensor, Var}; use candle_nn::{AdamW, Linear, Module, Optimizer, ParamsAdamW, SGD}; #[test] fn sgd_optim() -> Result<()> { let x = Var::new(0f32, &Device::Cpu)?; let mut sgd = SGD::new(vec![x.clone()], 0.1)?; let xt = x.as_tensor(); for _step in 0..100 { let loss = ((xt - 4.2)? * (xt - 4.2)?)?; sgd.backward_step(&loss)? } assert_eq!(x.to_scalar::<f32>()?, 4.199999); Ok(()) } /* The results of this test have been checked against the following PyTorch code. import torch from torch import optim w_gen = torch.tensor([[3., 1.]]) b_gen = torch.tensor([-2.]) sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]]) sample_ys = sample_xs.matmul(w_gen.t()) + b_gen m = torch.nn.Linear(2, 1) with torch.no_grad(): m.weight.zero_() m.bias.zero_() optimizer = optim.SGD(m.parameters(), lr=0.004, momentum=0.) for _step in range(1000): optimizer.zero_grad() ys = m(sample_xs) loss = ((ys - sample_ys)**2).sum() loss.backward() optimizer.step() print(m.weight) print(m.bias) */ #[test] fn sgd_linear_regression() -> Result<()> { // Generate some linear data, y = 3.x1 + x2 - 2. let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; // Now use backprop to run a linear regression between samples and get the coefficients back. let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?; let b = Var::new(0f32, &Device::Cpu)?; let mut sgd = SGD::new(vec![w.clone(), b.clone()], 0.004)?; let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone())); for _step in 0..1000 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; sgd.backward_step(&loss)?; } assert_eq!(w.to_vec2::<f32>()?, &[[2.9983196, 0.99790204]]); assert_eq!(b.to_scalar::<f32>()?, -1.9796902); Ok(()) } /* The following test returns the same values as the PyTorch code below. import torch from torch import optim w_gen = torch.tensor([[3., 1.]]) b_gen = torch.tensor([-2.]) sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]]) sample_ys = sample_xs.matmul(w_gen.t()) + b_gen m = torch.nn.Linear(2, 1) with torch.no_grad(): m.weight.zero_() m.bias.zero_() optimizer = optim.AdamW(m.parameters(), lr=0.1) for _step in range(100): optimizer.zero_grad() ys = m(sample_xs) loss = ((ys - sample_ys)**2).sum() loss.backward() optimizer.step() print(m.weight) print(m.bias) */ #[test] fn adamw_linear_regression() -> Result<()> { let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; // Now use backprop to run a linear regression between samples and get the coefficients back. let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?; let b = Var::new(0f32, &Device::Cpu)?; let params = ParamsAdamW { lr: 0.1, ..Default::default() }; let mut opt = AdamW::new(vec![w.clone(), b.clone()], params)?; let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone())); for _step in 0..100 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; opt.backward_step(&loss)?; } assert_eq!(to_vec2_round(w.as_tensor(), 4)?, &[[2.7257, 0.7097]]); assert_eq!(to_vec0_round(b.as_tensor(), 4)?, 0.7873); Ok(()) } #[test] fn adamw_linear_regression_varmap() -> Result<()> { use candle_nn::Init::Const; // Similar as the previous test but using a VarMap. let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; let mut var_map = candle_nn::VarMap::new(); let w = var_map.get((1, 2), "w", Const(0.), DType::F32, &Device::Cpu)?; let b = var_map.get((), "b", Const(0.), DType::F32, &Device::Cpu)?; let params = ParamsAdamW { lr: 0.1, ..Default::default() }; let mut opt = AdamW::new(var_map.all_vars(), params)?; let lin = Linear::new(w, Some(b)); for _step in 0..100 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; opt.backward_step(&loss)?; } assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[2.7257, 0.7097]]); assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 0.7873); var_map.set([("w", Tensor::zeros((1, 2), DType::F32, &Device::Cpu)?)].into_iter())?; var_map.set([("b", Tensor::ones((), DType::F32, &Device::Cpu)?)].into_iter())?; assert_eq!(to_vec2_round(lin.weight(), 4)?, &[[0., 0.]]); assert_eq!(to_vec0_round(lin.bias().unwrap(), 4)?, 1.); Ok(()) }
candle/candle-nn/tests/optim.rs/0
{ "file_path": "candle/candle-nn/tests/optim.rs", "repo_id": "candle", "token_count": 2568 }
43
import logging try: from .candle import * except ImportError as e: # If we are in development mode, or we did not bundle the DLLs, we try to locate them here # PyO3 wont give us any information about what DLLs are missing, so we can only try to load # the DLLs and re-import the module logging.warning("DLLs were not bundled with this package. Trying to locate them...") import os import platform def locate_cuda_dlls(): logging.warning("Locating CUDA DLLs...") # Try to locate CUDA_PATH environment variable cuda_path = os.environ.get("CUDA_PATH", None) if cuda_path: logging.warning(f"Found CUDA_PATH environment variable: {cuda_path}") if platform.system() == "Windows": cuda_path = os.path.join(cuda_path, "bin") else: cuda_path = os.path.join(cuda_path, "lib64") logging.warning(f"Adding {cuda_path} to DLL search path...") os.add_dll_directory(cuda_path) else: logging.warning("CUDA_PATH environment variable not found!") def locate_mkl_dlls(): # Try to locate ONEAPI_ROOT environment variable oneapi_root = os.environ.get("ONEAPI_ROOT", None) if oneapi_root: if platform.system() == "Windows": mkl_path = os.path.join( oneapi_root, "compiler", "latest", "windows", "redist", "intel64_win", "compiler" ) else: mkl_path = os.path.join(oneapi_root, "mkl", "latest", "lib", "intel64") logging.warning(f"Adding {mkl_path} to DLL search path...") os.add_dll_directory(mkl_path) else: logging.warning("ONEAPI_ROOT environment variable not found!") locate_cuda_dlls() locate_mkl_dlls() try: from .candle import * except ImportError as inner_e: raise ImportError("Could not locate DLLs. Please check the documentation for more information.") __doc__ = candle.__doc__ if hasattr(candle, "__all__"): __all__ = candle.__all__
candle/candle-pyo3/py_src/candle/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/__init__.py", "repo_id": "candle", "token_count": 919 }
44
from typing import TypeVar, Union, Sequence _T = TypeVar("_T") _ArrayLike = Union[ _T, Sequence[_T], Sequence[Sequence[_T]], Sequence[Sequence[Sequence[_T]]], Sequence[Sequence[Sequence[Sequence[_T]]]], ] CPU: str = "cpu" CUDA: str = "cuda" Device = TypeVar("Device", CPU, CUDA) Scalar = Union[int, float] Index = Union[int, slice, None, "Ellipsis"] Shape = Union[int, Sequence[int]]
candle/candle-pyo3/py_src/candle/typing/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/typing/__init__.py", "repo_id": "candle", "token_count": 166 }
45
from candle import Tensor from candle import rand import pytest def test_absolute_shapes_are_valid(): a = rand((10, 20)) assert a.shape == (10, 20) b = rand(10, 20) assert b.shape == (10, 20) pytest.raises(OverflowError, lambda: rand((10, 20, -1))) pytest.raises(OverflowError, lambda: rand(-1, 20)) pytest.raises(TypeError, lambda: rand("foo", True)) def test_relative_shapes_are_valid(): a = rand(10, 20) a = a.reshape((1, -1)) assert a.shape == (1, 200) b = rand(10, 20) b = b.reshape(-1, 1) assert b.shape == (200, 1) c = rand(10, 20) pytest.raises(TypeError, lambda: c.reshape(1, "foo")) pytest.raises(ValueError, lambda: c.reshape(1, -2)) pytest.raises(ValueError, lambda: c.reshape((-2, 1))) pytest.raises(ValueError, lambda: c.reshape((0, 1))) pytest.raises(ValueError, lambda: c.reshape((1, -1, -1)))
candle/candle-pyo3/tests/native/test_shape.py/0
{ "file_path": "candle/candle-pyo3/tests/native/test_shape.py", "repo_id": "candle", "token_count": 385 }
46
//! Contrastive Language-Image Pre-Training //! //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! https://github.com/openai/CLIP //! https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip use candle::{IndexOp, Result, Shape, Tensor, D}; use candle_nn as nn; use candle_nn::Module; use nn::Conv2dConfig; use super::{ text_model::{Activation, ClipEncoder}, EncoderConfig, }; #[derive(Debug, Clone)] pub struct ClipVisionConfig { pub embed_dim: usize, pub activation: Activation, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, #[allow(dead_code)] pub projection_dim: usize, pub num_channels: usize, pub image_size: usize, pub patch_size: usize, } impl ClipVisionConfig { // The config details can be found in the "vision_config" section of this json file: // https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json pub fn vit_base_patch32() -> Self { Self { embed_dim: 768, activation: Activation::QuickGelu, intermediate_size: 3072, num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 512, num_channels: 3, image_size: 224, patch_size: 32, } } pub fn clip_vit_large_patch14_336() -> Self { Self { embed_dim: 1024, activation: Activation::QuickGelu, intermediate_size: 4096, num_hidden_layers: 24, num_attention_heads: 16, projection_dim: 768, num_channels: 3, image_size: 336, patch_size: 14, } } } // https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L112 #[derive(Clone, Debug)] struct ClipVisionEmbeddings { patch_embedding: candle_nn::Conv2d, position_ids: Tensor, class_embedding: Tensor, position_embedding: candle_nn::Embedding, } impl ClipVisionEmbeddings { fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> { // originally nn.Parameter let class_embedding = if vs.contains_tensor("class_embedding") { vs.get(c.embed_dim, "class_embedding")? } else { Tensor::randn(0f32, 1f32, c.embed_dim, vs.device())? }; let num_patches = (c.image_size / c.patch_size).pow(2); let num_positions = num_patches + 1; let position_ids = Tensor::arange(0, num_positions as i64, vs.device())?; let conv2dconfig = Conv2dConfig { stride: c.patch_size, ..Default::default() }; let position_embedding = candle_nn::embedding(num_positions, c.embed_dim, vs.pp("position_embedding"))?; let patch_embedding = candle_nn::conv2d_no_bias( c.num_channels, c.embed_dim, c.patch_size, conv2dconfig, vs.pp("patch_embedding"), )?; Ok(Self { patch_embedding, position_ids, class_embedding, position_embedding, }) } } impl Module for ClipVisionEmbeddings { fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> { let batch_size = pixel_values.shape().dims(); let patch_embeds = self .patch_embedding .forward(pixel_values)? .flatten_from(2)? .transpose(1, 2)?; let shape = Shape::from((batch_size[0], 1, self.class_embedding.dim(D::Minus1)?)); let class_embeds = self.class_embedding.expand(shape)?; let embeddings = Tensor::cat(&[class_embeds, patch_embeds], 1)?; let position_embedding = self.position_embedding.forward(&self.position_ids)?; embeddings.broadcast_add(&position_embedding) } } // https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L743 #[derive(Clone, Debug)] pub struct ClipVisionTransformer { embeddings: ClipVisionEmbeddings, encoder: ClipEncoder, pre_layer_norm: candle_nn::LayerNorm, final_layer_norm: candle_nn::LayerNorm, } impl ClipVisionTransformer { pub fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> { let embeddings = ClipVisionEmbeddings::new(vs.pp("embeddings"), c)?; let pre_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("pre_layrnorm"))?; let encoder = ClipEncoder::new(vs.pp("encoder"), &EncoderConfig::Vision(c.clone()))?; let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, final_layer_norm, pre_layer_norm, }) } // required by LLaVA pub fn output_hidden_states(&self, pixel_values: &Tensor) -> Result<Vec<Tensor>> { let hidden_states = pixel_values .apply(&self.embeddings)? .apply(&self.pre_layer_norm)?; let mut result = self.encoder.output_hidden_states(&hidden_states, None)?; let encoder_outputs = result.last().unwrap(); let pooled_output = encoder_outputs.i((.., 0, ..))?; result.push(self.final_layer_norm.forward(&pooled_output)?.clone()); Ok(result) } } impl Module for ClipVisionTransformer { fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> { let hidden_states = pixel_values .apply(&self.embeddings)? .apply(&self.pre_layer_norm)?; let encoder_outputs = self.encoder.forward(&hidden_states, None)?; // https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L787 // pooled_output = encoder_outputs[:, 0, :] let pooled_output = encoder_outputs.i((.., 0, ..))?; self.final_layer_norm.forward(&pooled_output) } }
candle/candle-transformers/src/models/clip/vision_model.rs/0
{ "file_path": "candle/candle-transformers/src/models/clip/vision_model.rs", "repo_id": "candle", "token_count": 2831 }
47
pub mod autoencoder; pub mod model; pub mod sampling;
candle/candle-transformers/src/models/flux/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/flux/mod.rs", "repo_id": "candle", "token_count": 18 }
48
use candle::{DType, Device, Error as E, IndexOp, Module, Result, Tensor, D}; use candle_nn::{embedding, linear_b, rms_norm, Embedding, Linear, RmsNorm, VarBuilder}; // Equivalent to torch.repeat_interleave pub(crate) fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> { let img = img.unsqueeze(dim + 1)?; let mut dims = img.dims().to_vec(); dims[dim + 1] = repeats; img.broadcast_as(dims)?.flatten(dim, dim + 1) } pub mod speaker_encoder { use super::*; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub sampling_rate: usize, pub partial_n_frames: usize, pub model_hidden_size: usize, pub model_embedding_size: usize, pub model_num_layers: usize, pub mel_window_length: usize, pub mel_window_step: usize, pub mel_n_channels: usize, } impl Config { pub fn cfg() -> Self { Self { sampling_rate: 16_000, partial_n_frames: 160, model_hidden_size: 256, model_embedding_size: 256, model_num_layers: 3, mel_window_length: 25, mel_window_step: 10, mel_n_channels: 40, } } } pub struct Model { lstms: Vec<candle_nn::LSTM>, linear: Linear, cfg: Config, } type Slice = (usize, usize); impl Model { pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> { let mut lstms = Vec::with_capacity(cfg.model_num_layers); let vb_l = vb.pp("lstm"); for layer_idx in 0..cfg.model_num_layers { let c = candle_nn::LSTMConfig { layer_idx, ..Default::default() }; let lstm = candle_nn::lstm( cfg.mel_n_channels, cfg.model_hidden_size, c, vb_l.pp(layer_idx), )?; lstms.push(lstm) } let linear = linear_b( cfg.model_hidden_size, cfg.model_embedding_size, true, vb.pp("linear"), )?; Ok(Self { lstms, linear, cfg }) } fn compute_partial_slices( &self, n_samples: usize, rate: f64, min_coverage: f64, ) -> (Vec<Slice>, Vec<Slice>) { let c = &self.cfg; // Compute how many frames separate two partial utterances let samples_per_frame = c.sampling_rate * c.mel_window_step / 1000; let n_frames = n_samples / samples_per_frame + 1; let frame_step = (c.sampling_rate as f64 / rate / samples_per_frame as f64).round() as usize; let steps = (n_frames + frame_step).saturating_sub(c.partial_n_frames) + 1; // Compute the slices. let mut wav_slices = vec![]; let mut mel_slices = vec![]; for i in (0..steps).step_by(frame_step) { let mel_range = (i, i + c.partial_n_frames); let wav_range = ( i * samples_per_frame, (i + c.partial_n_frames) * samples_per_frame, ); mel_slices.push(mel_range); wav_slices.push(wav_range); } // Evaluate whether extra padding is warranted or not. let last_wav_range = match wav_slices.last() { None => return (wav_slices, mel_slices), Some(l) => *l, }; let coverage = (n_samples - last_wav_range.0) as f64 / (last_wav_range.1 - last_wav_range.0) as f64; if coverage > min_coverage && mel_slices.len() > 1 { mel_slices.pop(); wav_slices.pop(); } (wav_slices, mel_slices) } pub fn embed_utterance( &self, wav: &[f32], mel_filters: &[f32], rate: f64, min_c: f64, device: &Device, ) -> Result<Tensor> { let (wav_slices, mel_slices) = self.compute_partial_slices(wav.len(), rate, min_c); let max_wave_length = match wav_slices.last() { Some(v) => v.1, None => candle::bail!("empty wav slices"), }; let wav = if max_wave_length > wav.len() { let mut wav = wav.to_vec(); wav.resize(max_wave_length - wav.len(), 0.0); std::borrow::Cow::Owned(wav) } else { std::borrow::Cow::Borrowed(wav) }; let mel = crate::models::whisper::audio::log_mel_spectrogram_( wav.as_ref(), mel_filters, /* fft_size */ self.cfg.mel_window_length, /* fft_step */ self.cfg.mel_window_step, self.cfg.mel_n_channels, false, ); let mels = mel_slices .iter() .flat_map(|s| [mel[s.0], mel[s.1]]) .collect::<Vec<_>>(); let mels = Tensor::from_vec(mels, (mel_slices.len(), 2), device)?; let partial_embeds = self.forward(&mels)?; let raw_embed = partial_embeds.mean(0)?; let norm = raw_embed.sqr()?.sum_all()?.sqrt()?; raw_embed.broadcast_div(&norm) } } impl Module for Model { fn forward(&self, xs: &Tensor) -> Result<Tensor> { use candle_nn::RNN; // This is different from the Python transformers version as candle LSTM is batch first. let xs = xs.t()?; let mut xs = xs.clone(); for layer in self.lstms.iter() { let states = layer.seq(&xs)?; xs = layer.states_to_tensor(&states)?; } let xs = xs.t()?; let embeds_raw = xs.apply(&self.linear)?.relu()?; let norm = embeds_raw.sqr()?.sum_keepdim(1)?.sqrt()?; embeds_raw.broadcast_div(&norm) } } } type Rank = u32; pub mod tokenizers { use super::*; use std::collections::HashMap; pub struct BPE { pub re: fancy_regex::Regex, pub end_of_text: usize, pub offset: usize, pub ranks: HashMap<Vec<u8>, Rank>, span: tracing::Span, } impl BPE { pub fn from_json(json: &serde_json::Value, end_of_text: usize) -> Result<Self> { let json = match json.as_object() { None => candle::bail!("json value is not an object"), Some(json) => json, }; let re = match json.get("pat_str") { None => candle::bail!("json object has no pat_str field"), Some(pat_str) => match pat_str.as_str() { None => candle::bail!("pat_str field is not a string"), Some(pat_str) => fancy_regex::Regex::new(pat_str).map_err(E::wrap)?, }, }; let offset = match json.get("offset") { None => candle::bail!("json object has no offset field"), Some(offset) => match offset.as_u64() { None => candle::bail!("offset field is not a positive int"), Some(offset) => offset as usize, }, }; let mut ranks = HashMap::new(); for id in 0u8..=255 { ranks.insert(vec![id], id as u32); } let mergeable_ranks = match json.get("mergeable_ranks") { None => candle::bail!("json object has no mergeable_ranks field"), Some(mr) => match mr.as_object() { None => candle::bail!("mergeable_ranks is not an object"), Some(mr) => mr, }, }; for (key, value) in mergeable_ranks.iter() { let value = match value.as_u64() { None => candle::bail!("mergeable_ranks '{key}' is not a u64"), Some(value) => value as u32, }; if value < 256 { continue; } // No escaping for other keys. let key = key.as_bytes().to_vec(); ranks.insert(key, value); } Ok(Self { re, end_of_text, offset, ranks, span: tracing::span!(tracing::Level::TRACE, "bpe"), }) } // Taken from: // https://github.com/openai/tiktoken/blob/1b9faf2779855124f05174adf1383e53689ed94b/src/lib.rs#L16C1-L82C2 fn _byte_pair_merge(&self, piece: &[u8]) -> Vec<(usize, Rank)> { // This is a vector of (start, rank). // The rank is of the pair starting at position start. let mut parts = Vec::with_capacity(piece.len() + 1); // Note that we hash bytes when indexing into `ranks`, not token pairs. As long as we train BPE // the way we currently do, this is equivalent. An easy way to break this would be to decouple // merge priority from token index or to prevent specific token merges. let mut min_rank: (Rank, usize) = (Rank::MAX, usize::MAX); for i in 0..piece.len() - 1 { let rank = *self.ranks.get(&piece[i..i + 2]).unwrap_or(&Rank::MAX); if rank < min_rank.0 { min_rank = (rank, i); } parts.push((i, rank)); } parts.push((piece.len() - 1, Rank::MAX)); parts.push((piece.len(), Rank::MAX)); let get_rank = { #[inline(always)] |parts: &Vec<(usize, Rank)>, i: usize| { if (i + 3) < parts.len() { // Similar to `piece[i..i + 2]` above. The +3 is because we haven't yet deleted // parts[i + 1], see comment in the main loop. *self .ranks .get(&piece[parts[i].0..parts[i + 3].0]) .unwrap_or(&Rank::MAX) } else { Rank::MAX } } }; // If you have n parts and m merges, this does O(mn) work. // We could do something with a heap and do O(m log n) work. // n is often very small so considerations like cache-locality outweigh the algorithmic // complexity downsides of the `parts` vector. while min_rank.0 != Rank::MAX { let i = min_rank.1; // Update parts[i] and parts[i - 1] before removing parts[i + 1], since // `parts.remove(i + 1)` will thrash the cache. if i > 0 { parts[i - 1].1 = get_rank(&parts, i - 1); } parts[i].1 = get_rank(&parts, i); parts.remove(i + 1); min_rank = (Rank::MAX, usize::MAX); for (i, &(_, rank)) in parts[..parts.len() - 1].iter().enumerate() { if rank < min_rank.0 { min_rank = (rank, i); } } } parts } pub fn byte_pair_encode(&self, piece: &[u8]) -> Vec<Rank> { if piece.is_empty() { return Vec::new(); } if piece.len() == 1 { return vec![self.ranks[piece]]; } assert!(piece.len() > 1); self._byte_pair_merge(piece) .windows(2) .map(|part| self.ranks[&piece[part[0].0..part[1].0]]) .collect() } pub fn encode(&self, text: &str) -> Result<Vec<u32>> { let _enter = self.span.enter(); let mut bpe_tokens: Vec<u32> = Vec::new(); for word in self.re.find_iter(text) { let word = word.map_err(E::wrap)?; let word_tokens = self.byte_pair_encode(word.as_str().as_bytes()); for &token in word_tokens.iter() { bpe_tokens.push(token + self.offset as u32) } } bpe_tokens.push((self.end_of_text + self.offset) as u32); Ok(bpe_tokens) } } } pub mod gpt { use super::*; #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub enum NormType { LayerNorm, RMSNorm, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub enum AttnKernelType { Fa2, TorchAttn, Hand, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub enum NonLinearityType { Gelu, Swiglu, } enum Norm { RMSNorm(candle_nn::RmsNorm), LayerNorm(candle_nn::LayerNorm), } // https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L27 #[derive(Debug, Clone)] pub struct Config { pub block_size: usize, pub vocab_sizes: Vec<usize>, pub target_vocab_sizes: Vec<usize>, pub n_layer: usize, pub n_head: usize, pub n_embd: usize, pub bias: bool, pub causal: bool, pub spk_emb_on_text: bool, pub norm_type: NormType, pub rmsnorm_eps: f64, pub nonlinearity_type: NonLinearityType, pub swiglu_multiple_of: Option<usize>, pub attn_kernel_type: AttnKernelType, pub kv_cache_enabled: bool, } impl Config { pub fn cfg1b_v0_1() -> Self { Self { n_layer: 6, n_head: 6, n_embd: 384, block_size: 1024, bias: false, vocab_sizes: vec![1538, 1025], causal: false, target_vocab_sizes: vec![1025, 1025, 1025, 1025, 1025, 1025], swiglu_multiple_of: Some(256), norm_type: NormType::LayerNorm, kv_cache_enabled: false, attn_kernel_type: AttnKernelType::TorchAttn, spk_emb_on_text: true, nonlinearity_type: NonLinearityType::Gelu, rmsnorm_eps: 1e-5, } } } impl Norm { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { match cfg.norm_type { NormType::RMSNorm => { let rms_norm = candle_nn::rms_norm(cfg.n_embd, cfg.rmsnorm_eps, vb)?; Ok(Self::RMSNorm(rms_norm)) } NormType::LayerNorm => { let ln_cfg = candle_nn::LayerNormConfig { affine: cfg.bias, ..Default::default() }; let layer_norm = candle_nn::layer_norm(cfg.n_embd, ln_cfg, vb)?; Ok(Self::LayerNorm(layer_norm)) } } } } impl Module for Norm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::RMSNorm(m) => m.forward(xs), Self::LayerNorm(m) => m.forward(xs), } } } // https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/attn.py#L18 struct SelfAttention { c_attn: Linear, c_proj: Linear, n_head: usize, span: tracing::Span, } impl SelfAttention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { // The different attention variants are likely to be identical but still we only accept // TorchAttn for now. if cfg.attn_kernel_type != AttnKernelType::TorchAttn { candle::bail!("only TorchAttn is supported") } if cfg.kv_cache_enabled { candle::bail!("kv_cache_enabled=true is not supported") } let c_attn = linear_b(cfg.n_embd, cfg.n_embd * 3, cfg.bias, vb.pp("c_attn"))?; let c_proj = linear_b(cfg.n_embd, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?; Ok(Self { c_attn, c_proj, n_head: cfg.n_head, span: tracing::span!(tracing::Level::TRACE, "self-attn"), }) } } impl Module for SelfAttention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b, t, c) = xs.dims3()?; let c_x = xs .apply(&self.c_attn)? .reshape((b, t, 3, self.n_head, c / self.n_head))?; let q = c_x.i((.., .., 0))?; let k = c_x.i((.., .., 1))?; let v = c_x.i((.., .., 2))?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (k.dim(D::Minus1)? as f64).sqrt())?; // TODO: causal mask let att = candle_nn::ops::softmax_last_dim(&att)?; let att = att.matmul(&v)?.transpose(1, 2)?; att.reshape((b, t, c))?.apply(&self.c_proj) } } // https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/layers.py#L43 #[allow(clippy::upper_case_acronyms)] enum MLP { Gelu { c_fc: Linear, c_proj: Linear, span: tracing::Span, }, Swiglu { w1: Linear, w3: Linear, c_proj: Linear, span: tracing::Span, }, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_dim = 4 * cfg.n_embd; let slf = match cfg.nonlinearity_type { NonLinearityType::Gelu => { let c_fc = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("c_fc"))?; let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?; Self::Gelu { c_fc, c_proj, span: tracing::span!(tracing::Level::TRACE, "mlp-gelu"), } } NonLinearityType::Swiglu => { let hidden_dim = (2 * hidden_dim) / 3; let swiglu_multiple_of = match cfg.swiglu_multiple_of { None => candle::bail!("swiglu-multiple-of has to be set"), Some(smo) => smo, }; let hidden_dim = swiglu_multiple_of * (hidden_dim + swiglu_multiple_of - 1) / swiglu_multiple_of; let w1 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w1"))?; let w3 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w3"))?; let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?; Self::Swiglu { w1, w3, c_proj, span: tracing::span!(tracing::Level::TRACE, "mlp-swiglu"), } } }; Ok(slf) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::Gelu { c_fc, c_proj, span } => { let _enter = span.enter(); xs.apply(c_fc)?.gelu()?.apply(c_proj) } Self::Swiglu { w1, w3, c_proj, span, } => { let _enter = span.enter(); let w1 = xs.apply(w1)?; let w3 = xs.apply(w3)?; (w1.silu()? * w3)?.apply(c_proj) } } } } // https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/combined.py#L7 struct Block { ln_1: Norm, ln_2: Norm, attn: SelfAttention, mlp: MLP, span: tracing::Span, } impl Block { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln_1 = Norm::new(cfg, vb.pp("ln_1"))?; let ln_2 = Norm::new(cfg, vb.pp("ln_2"))?; let attn = SelfAttention::new(cfg, vb.pp("attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Block { ln_1, ln_2, attn, mlp, span: tracing::span!(tracing::Level::TRACE, "gpt-block"), }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = (xs + xs.apply(&self.ln_1)?.apply(&self.attn))?; let xs = (&xs + xs.apply(&self.ln_2)?.apply(&self.mlp))?; Ok(xs) } } // https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L79 #[allow(clippy::upper_case_acronyms)] pub struct Model { wtes: Vec<candle_nn::Embedding>, wpe: candle_nn::Embedding, h: Vec<Block>, ln_f: Norm, lm_heads: Vec<Linear>, cfg: Config, dtype: DType, span: tracing::Span, } impl Model { pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> { let vb_t = vb.pp("transformer"); let ln_f = Norm::new(&cfg, vb_t.pp("ln_f"))?; let mut wtes = Vec::with_capacity(cfg.vocab_sizes.len()); let vb_w = vb_t.pp("wtes"); for (idx, vocab_size) in cfg.vocab_sizes.iter().enumerate() { let wte = candle_nn::embedding(*vocab_size, cfg.n_embd, vb_w.pp(idx))?; wtes.push(wte) } let wpe = candle_nn::embedding(cfg.block_size, cfg.n_embd, vb_t.pp("wpe"))?; let mut h = Vec::with_capacity(cfg.n_layer); let vb_h = vb_t.pp("h"); for idx in 0..cfg.n_layer { let block = Block::new(&cfg, vb_h.pp(idx))?; h.push(block) } let mut lm_heads = Vec::with_capacity(cfg.target_vocab_sizes.len()); let vb_l = vb.pp("lm_heads"); for (idx, vocab_size) in cfg.target_vocab_sizes.iter().enumerate() { let head = linear_b(cfg.n_embd, *vocab_size, false, vb_l.pp(idx))?; lm_heads.push(head) } Ok(Self { wtes, wpe, h, ln_f, lm_heads, cfg, dtype: vb.dtype(), span: tracing::span!(tracing::Level::TRACE, "gpt"), }) } pub fn config(&self) -> &Config { &self.cfg } pub fn forward(&self, idx: &Tensor) -> Result<Vec<Tensor>> { let _enter = self.span.enter(); let device = idx.device(); let (b, _num_hierarchies, t) = idx.dims3()?; let pos = Tensor::arange(0u32, t as u32, device)?; let pos_emb = pos.apply(&self.wpe)?; let mut tok_emb = Tensor::zeros((b, t, self.cfg.n_embd), self.dtype, device)?; for (wte_idx, wte) in self.wtes.iter().enumerate() { let emb = idx.i((.., wte_idx, ..))?.apply(wte)?; tok_emb = (tok_emb + emb)?; } // TODO: speaker embs. let spk_emb = 0f64; let mut xs = (pos_emb.broadcast_add(&tok_emb)? + spk_emb)?; for block in self.h.iter() { xs = xs.apply(block)? } let xs = xs.apply(&self.ln_f)?; let mut logits = Vec::with_capacity(self.lm_heads.len()); for lm_head in self.lm_heads.iter() { // non-causal mode only. let ys = xs.apply(lm_head)?; logits.push(ys) } Ok(logits) } } } pub mod transformer { use super::*; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub block_size: usize, pub vocab_size: usize, pub n_layer: usize, pub n_head: usize, pub dim: usize, pub speaker_emb_dim: usize, pub intermediate_size: Option<usize>, pub n_local_heads: Option<usize>, pub norm_eps: f64, } impl Config { pub fn cfg1b_v0_1() -> Self { Self { n_layer: 24, n_head: 16, dim: 2048, vocab_size: 2562, speaker_emb_dim: 256, block_size: 2048, intermediate_size: None, n_local_heads: None, norm_eps: 1e-5, } } pub(crate) fn n_local_heads(&self) -> usize { self.n_local_heads.unwrap_or(self.n_head) } pub(crate) fn head_dim(&self) -> usize { self.dim / self.n_head } pub(crate) fn intermediate_size(&self) -> usize { match self.intermediate_size { Some(intermediate_size) => intermediate_size, None => { let hidden_dim = self.dim * 4; let n_hidden = ((2 * hidden_dim) as f64 / 3.) as usize; (n_hidden + 255) / 256 * 256 } } } } #[derive(Debug, Clone)] struct FeedForward { w1: Linear, w2: Linear, w3: Linear, span: tracing::Span, } impl FeedForward { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let i_size = cfg.intermediate_size(); let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?; let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?; let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?; Ok(Self { w1, w2, w3, span: tracing::span!(tracing::Level::TRACE, "feed-forward"), }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?; swiglu.apply(&self.w2) } } #[derive(Debug, Clone)] struct Attention { wqkv: Linear, wo: Linear, dim: usize, kv_size: usize, n_local_heads: usize, head_dim: usize, n_head: usize, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, } impl Attention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_local_heads = cfg.n_local_heads(); let head_dim = cfg.head_dim(); let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim; let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?; let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?; Ok(Self { wqkv, wo, dim: cfg.dim, kv_size: n_local_heads * head_dim, n_local_heads, head_dim, n_head: cfg.n_head, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "feed-forward"), }) } fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, seqlen, _) = xs.dims3()?; let qkv = xs.apply(&self.wqkv)?; let q = qkv.narrow(D::Minus1, 0, self.dim)?; let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?; let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?; let q = q .reshape((b_sz, seqlen, self.n_head, self.head_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 2)?; let v = Tensor::cat(&[prev_v, &v], 2)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?; let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?; let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?; let attn_weights = attn_weights.broadcast_add(mask)?; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&v)?; attn_output .transpose(1, 2)? .reshape((b_sz, seqlen, self.dim))? .apply(&self.wo) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct Block { attention: Attention, feed_forward: FeedForward, ffn_norm: RmsNorm, attention_norm: RmsNorm, span: tracing::Span, } impl Block { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = Attention::new(cfg, vb.pp("attention"))?; let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?; let ffn_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?; let attention_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?; Ok(Self { attention, feed_forward, ffn_norm, attention_norm, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hs = xs.apply(&self.attention_norm)?; let hs = (xs + self.attention.forward(&hs, pos, mask))?; &hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward) } fn clear_kv_cache(&mut self) { self.attention.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { tok_embeddings: Embedding, pos_embeddings: Embedding, speaker_cond_pos: Linear, layers: Vec<Block>, norm: RmsNorm, output: Linear, spk_cond_mask: Tensor, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let tok_embeddings = embedding(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?; let pos_embeddings = embedding(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?; let speaker_cond_pos = linear_b( cfg.speaker_emb_dim, cfg.dim, false, vb.pp("speaker_cond_pos"), )?; let mut layers = Vec::with_capacity(cfg.n_layer); let vb_l = vb.pp("layers"); for layer_idx in 0..cfg.n_layer { let layer = Block::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("norm"))?; let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?; let dtype = vb.dtype(); let spk_cond_mask = Tensor::cat( &[ Tensor::ones((1, 1, cfg.dim), dtype, vb.device())?, Tensor::zeros((1, 1, cfg.dim), dtype, vb.device())?, ], 0, )?; Ok(Self { tok_embeddings, pos_embeddings, speaker_cond_pos, layers, norm, output, spk_cond_mask, span: tracing::span!(tracing::Level::TRACE, "transformer"), }) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_sz, seqlen) = xs.dims2()?; let mask: Vec<_> = (0..seqlen) .flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?; let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?; let tok_embeddings = xs.apply(&self.tok_embeddings)?; let pos_embeddings = input_pos.apply(&self.pos_embeddings)?; let mut xs = tok_embeddings .broadcast_add(&pos_embeddings)? .broadcast_add( &spk_emb .apply(&self.speaker_cond_pos)? .broadcast_mul(&self.spk_cond_mask)?, )?; let mask = mask.to_dtype(xs.dtype())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, pos, &mask)? } xs.narrow(1, seqlen - 1, 1)? .apply(&self.norm)? .apply(&self.output) } } } pub mod adapters { // https://github.com/metavoiceio/metavoice-src/blob/9078234c496d76adbec06df789b6b04b1875f129/fam/llm/adapters/tilted_encodec.py pub struct TiltedEncodec { end_of_audio_token: u32, span: tracing::Span, } impl TiltedEncodec { pub fn new(end_of_audio_token: u32) -> Self { Self { end_of_audio_token, span: tracing::span!(tracing::Level::TRACE, "tilted-encodec"), } } pub fn decode(&self, tokens: &[Vec<u32>]) -> (Vec<u32>, Vec<Vec<u32>>) { let _enter = self.span.enter(); let mut text_ids = vec![]; let mut extracted_audio_ids = vec![]; let mut min_audio_ids_len = usize::MAX; for (book_id, tokens) in tokens.iter().enumerate() { let mut audio_ids = vec![]; for &t in tokens.iter() { #[allow(clippy::comparison_chain)] if t > self.end_of_audio_token { if book_id == 0 { text_ids.push(t) } } else if t < self.end_of_audio_token { audio_ids.push(t) } } min_audio_ids_len = usize::min(min_audio_ids_len, audio_ids.len()); extracted_audio_ids.push(audio_ids) } for audio_ids in extracted_audio_ids.iter_mut() { audio_ids.truncate(min_audio_ids_len) } (text_ids, extracted_audio_ids) } } // https://github.com/metavoiceio/metavoice-src/blob/9078234c496d76adbec06df789b6b04b1875f129/fam/llm/adapters/flattened_encodec.py#L4 pub struct FlattenedInterleavedEncodec2Codebook { end_of_audio_token: u32, span: tracing::Span, } impl FlattenedInterleavedEncodec2Codebook { pub fn new(end_of_audio_token: u32) -> Self { Self { end_of_audio_token, span: tracing::span!(tracing::Level::TRACE, "encodec2codebook"), } } pub fn decode(&self, tokens: &[u32]) -> (Vec<u32>, Vec<u32>, Vec<u32>) { let _enter = self.span.enter(); let mut text_ids = vec![]; let mut audio_ids1 = vec![]; let mut audio_ids2 = vec![]; for &t in tokens.iter() { #[allow(clippy::comparison_chain)] if t < self.end_of_audio_token { audio_ids1.push(t) } else if t < 2 * self.end_of_audio_token { audio_ids2.push(t - self.end_of_audio_token) } else { text_ids.push(t) } } (text_ids, audio_ids1, audio_ids2) } } }
candle/candle-transformers/src/models/metavoice.rs/0
{ "file_path": "candle/candle-transformers/src/models/metavoice.rs", "repo_id": "candle", "token_count": 21694 }
49
pub mod text_model;
candle/candle-transformers/src/models/openclip/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/openclip/mod.rs", "repo_id": "candle", "token_count": 7 }
50
use std::collections::HashMap; use candle::quantized::gguf_file; use candle::quantized::QTensor; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{kv_cache::KvCache, Embedding, RmsNorm}; #[derive(Debug, Clone)] struct QLinear { inner: candle::quantized::QMatMul, span: tracing::Span, } impl QLinear { fn new<R: std::io::Read + std::io::Seek>( ct: &gguf_file::Content, r: &mut R, name: &str, device: &Device, ) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); let w = ct.tensor(r, &format!("{name}.weight"), device)?; let inner = candle::quantized::QMatMul::from_qtensor(w)?; Ok(Self { inner, span }) } } impl Module for QLinear { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] struct Mlp { ffn_up: QLinear, ffn_down: QLinear, i_size: usize, } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let up_states = xs.apply(&self.ffn_up)?; let gate = up_states.narrow(D::Minus1, 0, self.i_size)?; let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?; let up_states = (up_states * gate.silu()?)?; up_states.apply(&self.ffn_down) } } fn rms_norm(w: QTensor, eps: f64) -> Result<RmsNorm> { let w = w.dequantize(&w.device())?; let rms = RmsNorm::new(w, eps); Ok(rms) } #[derive(Debug, Clone)] struct LayerWeights { attn_qkv: QLinear, attn_output: QLinear, attn_norm: RmsNorm, ffn_norm: RmsNorm, mlp: Mlp, n_head: usize, n_kv_head: usize, head_dim: usize, cos: Tensor, sin: Tensor, neg_inf: Tensor, kv_cache: KvCache, use_flash_attn: bool, span_attn: tracing::Span, span_rot: tracing::Span, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> { let shape = mask.shape(); let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?; Ok(m) } impl LayerWeights { fn apply_rotary_emb(&self, xs: &Tensor, index_pos: usize) -> Result<Tensor> { let _enter = self.span_rot.enter(); let (_b_sz, _h, seq_len, _n_embd) = xs.dims4()?; let cos = self.cos.narrow(0, index_pos, seq_len)?; let sin = self.sin.narrow(0, index_pos, seq_len)?; candle_nn::rotary_emb::rope(&xs.contiguous()?, &cos, &sin) } fn forward_attn( &mut self, x: &Tensor, mask: Option<&Tensor>, index_pos: usize, ) -> Result<Tensor> { let _enter = self.span_attn.enter(); let (b_sz, seq_len, n_embd) = x.dims3()?; let qkv = self.attn_qkv.forward(x)?; let query_pos = self.n_head * self.head_dim; let q = qkv.narrow(D::Minus1, 0, query_pos)?; let k = qkv.narrow(D::Minus1, query_pos, self.n_kv_head * self.head_dim)?; let v = qkv.narrow( D::Minus1, query_pos + self.n_kv_head * self.head_dim, self.n_kv_head * self.head_dim, )?; let q = q .reshape((b_sz, seq_len, self.n_head, self.head_dim))? .transpose(1, 2)?; let k = k .reshape((b_sz, seq_len, self.n_head, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))? .transpose(1, 2)?; let q = self.apply_rotary_emb(&q, index_pos)?.contiguous()?; let k = self.apply_rotary_emb(&k, index_pos)?; let (k, v) = self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?; let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?; let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?; let y = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = q.to_dtype(DType::BF16)?.transpose(1, 2)?; let k = k.to_dtype(DType::BF16)?.transpose(1, 2)?; let v = v.to_dtype(DType::BF16)?.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)? .to_dtype(DType::F32)? .transpose(1, 2)? } else { let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = match mask { None => att, Some(mask) => { let mask = mask.broadcast_as(att.shape())?; masked_fill(&att, &mask, &self.neg_inf)? } }; let att = candle_nn::ops::softmax_last_dim(&att)?; // Convert to contiguous as matmul doesn't support strided vs for now. att.matmul(&v)? }; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.attn_output.forward(&y)?; Ok(y) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug, Clone)] pub struct ModelWeights { tok_embeddings: Embedding, layers: Vec<LayerWeights>, output_norm: RmsNorm, output: QLinear, masks: HashMap<usize, Tensor>, span: tracing::Span, span_output: tracing::Span, } fn precomput_freqs_cis( head_dim: usize, max_seq_len: usize, freq_base: f32, device: &Device, ) -> Result<(Tensor, Tensor)> { let theta: Vec<_> = (0..head_dim) .step_by(2) .map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, max_seq_len as u32, device)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let cos = idx_theta.cos()?; let sin = idx_theta.sin()?; Ok((cos, sin)) } impl ModelWeights { pub fn from_gguf<R: std::io::Seek + std::io::Read>( use_flash_attn: bool, ct: gguf_file::Content, reader: &mut R, device: &Device, ) -> Result<Self> { let md_get = |s: &str| match ct.metadata.get(s) { None => candle::bail!("cannot find {s} in metadata"), Some(v) => Ok(v), }; // Parameter extraction from metadata. let head_count = md_get("phi3.attention.head_count")?.to_u32()? as usize; let head_count_kv = md_get("phi3.attention.head_count_kv")?.to_u32()? as usize; let block_count = md_get("phi3.block_count")?.to_u32()? as usize; let embedding_length = md_get("phi3.embedding_length")?.to_u32()? as usize; let max_seq_len = md_get("phi3.context_length")?.to_u32()? as usize; let head_dim = embedding_length / head_count; let i_size = md_get("phi3.feed_forward_length")?.to_u32()? as usize; let rope_dim = md_get("phi3.rope.dimension_count")?.to_u32()? as usize; let rms_eps = md_get("phi3.attention.layer_norm_rms_epsilon")?.to_f32()? as f64; let (cos, sin) = precomput_freqs_cis(rope_dim, max_seq_len, 10_000., device)?; let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?; let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; let tok_embeddings = tok_embeddings.dequantize(device)?; let output_norm = rms_norm(ct.tensor(reader, "output_norm.weight", device)?, rms_eps)?; let output = QLinear::new(&ct, reader, "output", device)?; let mut layers = Vec::with_capacity(block_count); for layer_idx in 0..block_count { let prefix = format!("blk.{layer_idx}"); let ffn_up = QLinear::new(&ct, reader, &format!("{prefix}.ffn_up"), device)?; let ffn_down = QLinear::new(&ct, reader, &format!("{prefix}.ffn_down"), device)?; let mlp = Mlp { ffn_up, ffn_down, i_size, }; let attn_norm = rms_norm( ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?, rms_eps, )?; let ffn_norm = rms_norm( ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?, rms_eps, )?; let span_attn = tracing::span!(tracing::Level::TRACE, "attn"); let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot"); let kv_cache = KvCache::new(2, max_seq_len); layers.push(LayerWeights { attn_qkv: QLinear::new(&ct, reader, &format!("{prefix}.attn_qkv"), device)?, attn_output: QLinear::new(&ct, reader, &format!("{prefix}.attn_output"), device)?, attn_norm, ffn_norm, mlp, n_head: head_count, n_kv_head: head_count_kv, head_dim, cos: cos.clone(), sin: sin.clone(), neg_inf: neg_inf.clone(), kv_cache, use_flash_attn, span_attn, span_rot, }) } let span = tracing::span!(tracing::Level::TRACE, "model"); let span_output = tracing::span!(tracing::Level::TRACE, "output"); Ok(Self { tok_embeddings: Embedding::new(tok_embeddings, embedding_length), layers, output_norm, output, masks: HashMap::new(), span, span_output, }) } fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } pub fn forward(&mut self, xs: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = xs.dims2()?; let mask = if seq_len == 1 { None } else { Some(self.mask(seq_len, xs.device())?) }; let _enter = self.span.enter(); let mut xs = self.tok_embeddings.forward(xs)?; for layer in self.layers.iter_mut() { let residual = &xs; let ys = xs.apply(&layer.attn_norm)?; let ys = layer.forward_attn(&ys, mask.as_ref(), index_pos)?; let ys = (ys + residual)?; let residual = &ys; let ys = ys.apply(&layer.ffn_norm)?; let ys = layer.mlp.forward(&ys)?; xs = (ys + residual)? } let xs = xs.apply(&self.output_norm)?.i((.., seq_len - 1, ..))?; let _enter = self.span_output.enter(); self.output.forward(&xs) } }
candle/candle-transformers/src/models/quantized_phi3.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_phi3.rs", "repo_id": "candle", "token_count": 5978 }
51
use candle::{IndexOp, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use super::transformer::TwoWayTransformer; #[derive(Debug)] struct MlpMaskDecoder { layers: Vec<super::Linear>, sigmoid_output: bool, span: tracing::Span, } impl MlpMaskDecoder { fn new( input_dim: usize, hidden_dim: usize, output_dim: usize, num_layers: usize, sigmoid_output: bool, vb: VarBuilder, ) -> Result<Self> { let mut layers = Vec::with_capacity(num_layers); let vb = vb.pp("layers"); for i in 0..num_layers { let in_dim = if i == 0 { input_dim } else { hidden_dim }; let out_dim = if i + 1 == num_layers { output_dim } else { hidden_dim }; let layer = super::linear(vb.pp(i), in_dim, out_dim, true)?; layers.push(layer) } let span = tracing::span!(tracing::Level::TRACE, "mlp-mask-decoder"); Ok(Self { layers, sigmoid_output, span, }) } } impl Module for MlpMaskDecoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for (i, layer) in self.layers.iter().enumerate() { xs = layer.forward(&xs)?; if i + 1 < self.layers.len() { xs = xs.relu()? } } if self.sigmoid_output { candle_nn::ops::sigmoid(&xs) } else { Ok(xs) } } } #[derive(Debug)] pub struct MaskDecoder { iou_token: candle_nn::Embedding, mask_tokens: candle_nn::Embedding, iou_prediction_head: MlpMaskDecoder, output_upscaling_conv1: candle_nn::ConvTranspose2d, output_upscaling_ln: super::LayerNorm2d, output_upscaling_conv2: candle_nn::ConvTranspose2d, num_mask_tokens: usize, output_hypernetworks_mlps: Vec<MlpMaskDecoder>, transformer: TwoWayTransformer, span: tracing::Span, } impl MaskDecoder { pub fn new( transformer_dim: usize, num_multimask_outputs: usize, iou_head_depth: usize, iou_head_hidden_dim: usize, vb: VarBuilder, ) -> Result<Self> { let num_mask_tokens = num_multimask_outputs + 1; let iou_prediction_head = MlpMaskDecoder::new( transformer_dim, iou_head_hidden_dim, num_mask_tokens, iou_head_depth, false, vb.pp("iou_prediction_head"), )?; let iou_token = candle_nn::embedding(1, transformer_dim, vb.pp("iou_token"))?; let mask_tokens = candle_nn::embedding(num_mask_tokens, transformer_dim, vb.pp("mask_tokens"))?; let cfg = candle_nn::ConvTranspose2dConfig { stride: 2, ..Default::default() }; let output_upscaling_conv1 = candle_nn::conv_transpose2d( transformer_dim, transformer_dim / 4, 2, cfg, vb.pp("output_upscaling.0"), )?; let output_upscaling_ln = super::LayerNorm2d::new(transformer_dim / 4, 1e-6, vb.pp("output_upscaling.1"))?; let output_upscaling_conv2 = candle_nn::conv_transpose2d( transformer_dim / 4, transformer_dim / 8, 2, cfg, vb.pp("output_upscaling.3"), )?; let mut output_hypernetworks_mlps = Vec::with_capacity(num_mask_tokens); let vb_o = vb.pp("output_hypernetworks_mlps"); for i in 0..num_mask_tokens { let mlp = MlpMaskDecoder::new( transformer_dim, transformer_dim, transformer_dim / 8, 3, false, vb_o.pp(i), )?; output_hypernetworks_mlps.push(mlp) } let transformer = TwoWayTransformer::new( /* depth */ 2, /* embedding_dim */ transformer_dim, /* num_heads */ 8, /* mlp_dim */ 2048, vb.pp("transformer"), )?; let span = tracing::span!(tracing::Level::TRACE, "mask-decoder"); Ok(Self { iou_token, mask_tokens, iou_prediction_head, output_upscaling_conv1, output_upscaling_ln, output_upscaling_conv2, num_mask_tokens, output_hypernetworks_mlps, transformer, span, }) } pub fn forward( &self, image_embeddings: &Tensor, image_pe: &Tensor, sparse_prompt_embeddings: &Tensor, dense_prompt_embeddings: &Tensor, multimask_output: bool, ) -> Result<(Tensor, Tensor)> { let _enter = self.span.enter(); let (masks, iou_pred) = self.predict_masks( image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, )?; let masks = if multimask_output { masks.i((.., 1..))? } else { masks.i((.., 0..1))? }; let iou_pred = if multimask_output { iou_pred.i((.., 1..))? } else { iou_pred.i((.., 0..1))? }; Ok((masks, iou_pred)) } fn predict_masks( &self, image_embeddings: &Tensor, image_pe: &Tensor, sparse_prompt_embeddings: &Tensor, dense_prompt_embeddings: &Tensor, ) -> Result<(Tensor, Tensor)> { // Concatenate output tokens. let output_tokens = Tensor::cat( &[self.iou_token.embeddings(), self.mask_tokens.embeddings()], 0, )?; let (d1, d2) = output_tokens.dims2()?; let output_tokens = output_tokens .unsqueeze(0)? .expand((sparse_prompt_embeddings.dim(0)?, d1, d2))?; let tokens = Tensor::cat(&[&output_tokens, sparse_prompt_embeddings], 1)?; // Expand per-image data in batch direction to be per mask let src = repeat_interleave(image_embeddings, tokens.dim(0)?, 0)?; let src = src.broadcast_add(dense_prompt_embeddings)?; let pos_src = repeat_interleave(image_pe, tokens.dim(0)?, 0)?; let (b, c, h, w) = src.dims4()?; // Run the transformer let (hs, src) = self.transformer.forward(&src, &pos_src, &tokens)?; let iou_token_out = hs.i((.., 0))?; let mask_tokens_out = hs.i((.., 1..1 + self.num_mask_tokens))?; // Upscale mask embeddings and predict masks using the masks tokens. let src = src.transpose(1, 2)?.reshape((b, c, h, w))?; let upscaled_embedding = self .output_upscaling_conv1 .forward(&src)? .apply(&self.output_upscaling_ln)? .gelu()? .apply(&self.output_upscaling_conv2)? .gelu()?; let mut hyper_in_list = Vec::with_capacity(self.num_mask_tokens); for (i, mlp) in self.output_hypernetworks_mlps.iter().enumerate() { let h = mlp.forward(&mask_tokens_out.i((.., i))?)?; hyper_in_list.push(h) } let hyper_in = Tensor::stack(hyper_in_list.as_slice(), 1)?.contiguous()?; let (b, c, h, w) = upscaled_embedding.dims4()?; let masks = hyper_in.matmul(&upscaled_embedding.reshape((b, c, h * w))?)?; let masks = masks.reshape((b, (), h, w))?; // Generate mask quality predictions. let iou_pred = self.iou_prediction_head.forward(&iou_token_out)?; Ok((masks, iou_pred)) } } // Equivalent to torch.repeat_interleave fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> { let img = img.unsqueeze(dim + 1)?; let mut dims = img.dims().to_vec(); dims[dim + 1] = repeats; img.broadcast_as(dims)?.flatten(dim, dim + 1) }
candle/candle-transformers/src/models/segment_anything/mask_decoder.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/mask_decoder.rs", "repo_id": "candle", "token_count": 4213 }
52
//! 2D UNet Building Blocks //! use super::attention::{ AttentionBlock, AttentionBlockConfig, SpatialTransformer, SpatialTransformerConfig, }; use super::resnet::{ResnetBlock2D, ResnetBlock2DConfig}; use crate::models::with_tracing::{conv2d, Conv2d}; use candle::{Module, Result, Tensor, D}; use candle_nn as nn; #[derive(Debug)] struct Downsample2D { conv: Option<Conv2d>, padding: usize, span: tracing::Span, } impl Downsample2D { fn new( vs: nn::VarBuilder, in_channels: usize, use_conv: bool, out_channels: usize, padding: usize, ) -> Result<Self> { let conv = if use_conv { let config = nn::Conv2dConfig { stride: 2, padding, ..Default::default() }; let conv = conv2d(in_channels, out_channels, 3, config, vs.pp("conv"))?; Some(conv) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "downsample2d"); Ok(Self { conv, padding, span, }) } } impl Module for Downsample2D { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); match &self.conv { None => xs.avg_pool2d(2), Some(conv) => { if self.padding == 0 { let xs = xs .pad_with_zeros(D::Minus1, 0, 1)? .pad_with_zeros(D::Minus2, 0, 1)?; conv.forward(&xs) } else { conv.forward(xs) } } } } } // This does not support the conv-transpose mode. #[derive(Debug)] struct Upsample2D { conv: Conv2d, span: tracing::Span, } impl Upsample2D { fn new(vs: nn::VarBuilder, in_channels: usize, out_channels: usize) -> Result<Self> { let config = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv = conv2d(in_channels, out_channels, 3, config, vs.pp("conv"))?; let span = tracing::span!(tracing::Level::TRACE, "upsample2d"); Ok(Self { conv, span }) } } impl Upsample2D { fn forward(&self, xs: &Tensor, size: Option<(usize, usize)>) -> Result<Tensor> { let _enter = self.span.enter(); let xs = match size { None => { let (_bsize, _channels, h, w) = xs.dims4()?; xs.upsample_nearest2d(2 * h, 2 * w)? } Some((h, w)) => xs.upsample_nearest2d(h, w)?, }; self.conv.forward(&xs) } } #[derive(Debug, Clone, Copy)] pub struct DownEncoderBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: usize, pub output_scale_factor: f64, pub add_downsample: bool, pub downsample_padding: usize, } impl Default for DownEncoderBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_downsample: true, downsample_padding: 1, } } } #[derive(Debug)] pub struct DownEncoderBlock2D { resnets: Vec<ResnetBlock2D>, downsampler: Option<Downsample2D>, span: tracing::Span, pub config: DownEncoderBlock2DConfig, } impl DownEncoderBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: DownEncoderBlock2DConfig, ) -> Result<Self> { let resnets: Vec<_> = { let vs = vs.pp("resnets"); let conv_cfg = ResnetBlock2DConfig { eps: config.resnet_eps, out_channels: Some(out_channels), groups: config.resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels: None, ..Default::default() }; (0..(config.num_layers)) .map(|i| { let in_channels = if i == 0 { in_channels } else { out_channels }; ResnetBlock2D::new(vs.pp(i.to_string()), in_channels, conv_cfg) }) .collect::<Result<Vec<_>>>()? }; let downsampler = if config.add_downsample { let downsample = Downsample2D::new( vs.pp("downsamplers").pp("0"), out_channels, true, out_channels, config.downsample_padding, )?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "down-enc2d"); Ok(Self { resnets, downsampler, span, config, }) } } impl Module for DownEncoderBlock2D { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for resnet in self.resnets.iter() { xs = resnet.forward(&xs, None)? } match &self.downsampler { Some(downsampler) => downsampler.forward(&xs), None => Ok(xs), } } } #[derive(Debug, Clone, Copy)] pub struct UpDecoderBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: usize, pub output_scale_factor: f64, pub add_upsample: bool, } impl Default for UpDecoderBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_upsample: true, } } } #[derive(Debug)] pub struct UpDecoderBlock2D { resnets: Vec<ResnetBlock2D>, upsampler: Option<Upsample2D>, span: tracing::Span, pub config: UpDecoderBlock2DConfig, } impl UpDecoderBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: UpDecoderBlock2DConfig, ) -> Result<Self> { let resnets: Vec<_> = { let vs = vs.pp("resnets"); let conv_cfg = ResnetBlock2DConfig { out_channels: Some(out_channels), eps: config.resnet_eps, groups: config.resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels: None, ..Default::default() }; (0..(config.num_layers)) .map(|i| { let in_channels = if i == 0 { in_channels } else { out_channels }; ResnetBlock2D::new(vs.pp(i.to_string()), in_channels, conv_cfg) }) .collect::<Result<Vec<_>>>()? }; let upsampler = if config.add_upsample { let upsample = Upsample2D::new(vs.pp("upsamplers").pp("0"), out_channels, out_channels)?; Some(upsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "up-dec2d"); Ok(Self { resnets, upsampler, span, config, }) } } impl Module for UpDecoderBlock2D { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for resnet in self.resnets.iter() { xs = resnet.forward(&xs, None)? } match &self.upsampler { Some(upsampler) => upsampler.forward(&xs, None), None => Ok(xs), } } } #[derive(Debug, Clone, Copy)] pub struct UNetMidBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: Option<usize>, pub attn_num_head_channels: Option<usize>, // attention_type "default" pub output_scale_factor: f64, } impl Default for UNetMidBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: Some(32), attn_num_head_channels: Some(1), output_scale_factor: 1., } } } #[derive(Debug)] pub struct UNetMidBlock2D { resnet: ResnetBlock2D, attn_resnets: Vec<(AttentionBlock, ResnetBlock2D)>, span: tracing::Span, pub config: UNetMidBlock2DConfig, } impl UNetMidBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, temb_channels: Option<usize>, config: UNetMidBlock2DConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let vs_attns = vs.pp("attentions"); let resnet_groups = config .resnet_groups .unwrap_or_else(|| usize::min(in_channels / 4, 32)); let resnet_cfg = ResnetBlock2DConfig { eps: config.resnet_eps, groups: resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels, ..Default::default() }; let resnet = ResnetBlock2D::new(vs_resnets.pp("0"), in_channels, resnet_cfg)?; let attn_cfg = AttentionBlockConfig { num_head_channels: config.attn_num_head_channels, num_groups: resnet_groups, rescale_output_factor: config.output_scale_factor, eps: config.resnet_eps, }; let mut attn_resnets = vec![]; for index in 0..config.num_layers { let attn = AttentionBlock::new(vs_attns.pp(index.to_string()), in_channels, attn_cfg)?; let resnet = ResnetBlock2D::new( vs_resnets.pp((index + 1).to_string()), in_channels, resnet_cfg, )?; attn_resnets.push((attn, resnet)) } let span = tracing::span!(tracing::Level::TRACE, "mid2d"); Ok(Self { resnet, attn_resnets, span, config, }) } pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = self.resnet.forward(xs, temb)?; for (attn, resnet) in self.attn_resnets.iter() { xs = resnet.forward(&attn.forward(&xs)?, temb)? } Ok(xs) } } #[derive(Debug, Clone, Copy)] pub struct UNetMidBlock2DCrossAttnConfig { pub num_layers: usize, pub resnet_eps: f64, pub resnet_groups: Option<usize>, pub attn_num_head_channels: usize, // attention_type "default" pub output_scale_factor: f64, pub cross_attn_dim: usize, pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, pub transformer_layers_per_block: usize, } impl Default for UNetMidBlock2DCrossAttnConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: Some(32), attn_num_head_channels: 1, output_scale_factor: 1., cross_attn_dim: 1280, sliced_attention_size: None, // Sliced attention disabled use_linear_projection: false, transformer_layers_per_block: 1, } } } #[derive(Debug)] pub struct UNetMidBlock2DCrossAttn { resnet: ResnetBlock2D, attn_resnets: Vec<(SpatialTransformer, ResnetBlock2D)>, span: tracing::Span, pub config: UNetMidBlock2DCrossAttnConfig, } impl UNetMidBlock2DCrossAttn { pub fn new( vs: nn::VarBuilder, in_channels: usize, temb_channels: Option<usize>, use_flash_attn: bool, config: UNetMidBlock2DCrossAttnConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let vs_attns = vs.pp("attentions"); let resnet_groups = config .resnet_groups .unwrap_or_else(|| usize::min(in_channels / 4, 32)); let resnet_cfg = ResnetBlock2DConfig { eps: config.resnet_eps, groups: resnet_groups, output_scale_factor: config.output_scale_factor, temb_channels, ..Default::default() }; let resnet = ResnetBlock2D::new(vs_resnets.pp("0"), in_channels, resnet_cfg)?; let n_heads = config.attn_num_head_channels; let attn_cfg = SpatialTransformerConfig { depth: config.transformer_layers_per_block, num_groups: resnet_groups, context_dim: Some(config.cross_attn_dim), sliced_attention_size: config.sliced_attention_size, use_linear_projection: config.use_linear_projection, }; let mut attn_resnets = vec![]; for index in 0..config.num_layers { let attn = SpatialTransformer::new( vs_attns.pp(index.to_string()), in_channels, n_heads, in_channels / n_heads, use_flash_attn, attn_cfg, )?; let resnet = ResnetBlock2D::new( vs_resnets.pp((index + 1).to_string()), in_channels, resnet_cfg, )?; attn_resnets.push((attn, resnet)) } let span = tracing::span!(tracing::Level::TRACE, "xa-mid2d"); Ok(Self { resnet, attn_resnets, span, config, }) } pub fn forward( &self, xs: &Tensor, temb: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = self.resnet.forward(xs, temb)?; for (attn, resnet) in self.attn_resnets.iter() { xs = resnet.forward(&attn.forward(&xs, encoder_hidden_states)?, temb)? } Ok(xs) } } #[derive(Debug, Clone, Copy)] pub struct DownBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, // resnet_time_scale_shift: "default" // resnet_act_fn: "swish" pub resnet_groups: usize, pub output_scale_factor: f64, pub add_downsample: bool, pub downsample_padding: usize, } impl Default for DownBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_downsample: true, downsample_padding: 1, } } } #[derive(Debug)] pub struct DownBlock2D { resnets: Vec<ResnetBlock2D>, downsampler: Option<Downsample2D>, span: tracing::Span, pub config: DownBlock2DConfig, } impl DownBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, temb_channels: Option<usize>, config: DownBlock2DConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let resnet_cfg = ResnetBlock2DConfig { out_channels: Some(out_channels), eps: config.resnet_eps, output_scale_factor: config.output_scale_factor, temb_channels, ..Default::default() }; let resnets = (0..config.num_layers) .map(|i| { let in_channels = if i == 0 { in_channels } else { out_channels }; ResnetBlock2D::new(vs_resnets.pp(i.to_string()), in_channels, resnet_cfg) }) .collect::<Result<Vec<_>>>()?; let downsampler = if config.add_downsample { let downsampler = Downsample2D::new( vs.pp("downsamplers").pp("0"), out_channels, true, out_channels, config.downsample_padding, )?; Some(downsampler) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "down2d"); Ok(Self { resnets, downsampler, span, config, }) } pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<(Tensor, Vec<Tensor>)> { let _enter = self.span.enter(); let mut xs = xs.clone(); let mut output_states = vec![]; for resnet in self.resnets.iter() { xs = resnet.forward(&xs, temb)?; output_states.push(xs.clone()); } let xs = match &self.downsampler { Some(downsampler) => { let xs = downsampler.forward(&xs)?; output_states.push(xs.clone()); xs } None => xs, }; Ok((xs, output_states)) } } #[derive(Debug, Clone, Copy)] pub struct CrossAttnDownBlock2DConfig { pub downblock: DownBlock2DConfig, pub attn_num_head_channels: usize, pub cross_attention_dim: usize, // attention_type: "default" pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, pub transformer_layers_per_block: usize, } impl Default for CrossAttnDownBlock2DConfig { fn default() -> Self { Self { downblock: Default::default(), attn_num_head_channels: 1, cross_attention_dim: 1280, sliced_attention_size: None, use_linear_projection: false, transformer_layers_per_block: 1, } } } #[derive(Debug)] pub struct CrossAttnDownBlock2D { downblock: DownBlock2D, attentions: Vec<SpatialTransformer>, span: tracing::Span, pub config: CrossAttnDownBlock2DConfig, } impl CrossAttnDownBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, temb_channels: Option<usize>, use_flash_attn: bool, config: CrossAttnDownBlock2DConfig, ) -> Result<Self> { let downblock = DownBlock2D::new( vs.clone(), in_channels, out_channels, temb_channels, config.downblock, )?; let n_heads = config.attn_num_head_channels; let cfg = SpatialTransformerConfig { depth: config.transformer_layers_per_block, context_dim: Some(config.cross_attention_dim), num_groups: config.downblock.resnet_groups, sliced_attention_size: config.sliced_attention_size, use_linear_projection: config.use_linear_projection, }; let vs_attn = vs.pp("attentions"); let attentions = (0..config.downblock.num_layers) .map(|i| { SpatialTransformer::new( vs_attn.pp(i.to_string()), out_channels, n_heads, out_channels / n_heads, use_flash_attn, cfg, ) }) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "xa-down2d"); Ok(Self { downblock, attentions, span, config, }) } pub fn forward( &self, xs: &Tensor, temb: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<(Tensor, Vec<Tensor>)> { let _enter = self.span.enter(); let mut output_states = vec![]; let mut xs = xs.clone(); for (resnet, attn) in self.downblock.resnets.iter().zip(self.attentions.iter()) { xs = resnet.forward(&xs, temb)?; xs = attn.forward(&xs, encoder_hidden_states)?; output_states.push(xs.clone()); } let xs = match &self.downblock.downsampler { Some(downsampler) => { let xs = downsampler.forward(&xs)?; output_states.push(xs.clone()); xs } None => xs, }; Ok((xs, output_states)) } } #[derive(Debug, Clone, Copy)] pub struct UpBlock2DConfig { pub num_layers: usize, pub resnet_eps: f64, // resnet_time_scale_shift: "default" // resnet_act_fn: "swish" pub resnet_groups: usize, pub output_scale_factor: f64, pub add_upsample: bool, } impl Default for UpBlock2DConfig { fn default() -> Self { Self { num_layers: 1, resnet_eps: 1e-6, resnet_groups: 32, output_scale_factor: 1., add_upsample: true, } } } #[derive(Debug)] pub struct UpBlock2D { pub resnets: Vec<ResnetBlock2D>, upsampler: Option<Upsample2D>, span: tracing::Span, pub config: UpBlock2DConfig, } impl UpBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, prev_output_channels: usize, out_channels: usize, temb_channels: Option<usize>, config: UpBlock2DConfig, ) -> Result<Self> { let vs_resnets = vs.pp("resnets"); let resnet_cfg = ResnetBlock2DConfig { out_channels: Some(out_channels), temb_channels, eps: config.resnet_eps, output_scale_factor: config.output_scale_factor, ..Default::default() }; let resnets = (0..config.num_layers) .map(|i| { let res_skip_channels = if i == config.num_layers - 1 { in_channels } else { out_channels }; let resnet_in_channels = if i == 0 { prev_output_channels } else { out_channels }; let in_channels = resnet_in_channels + res_skip_channels; ResnetBlock2D::new(vs_resnets.pp(i.to_string()), in_channels, resnet_cfg) }) .collect::<Result<Vec<_>>>()?; let upsampler = if config.add_upsample { let upsampler = Upsample2D::new(vs.pp("upsamplers").pp("0"), out_channels, out_channels)?; Some(upsampler) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "up2d"); Ok(Self { resnets, upsampler, span, config, }) } pub fn forward( &self, xs: &Tensor, res_xs: &[Tensor], temb: Option<&Tensor>, upsample_size: Option<(usize, usize)>, ) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for (index, resnet) in self.resnets.iter().enumerate() { xs = Tensor::cat(&[&xs, &res_xs[res_xs.len() - index - 1]], 1)?; xs = xs.contiguous()?; xs = resnet.forward(&xs, temb)?; } match &self.upsampler { Some(upsampler) => upsampler.forward(&xs, upsample_size), None => Ok(xs), } } } #[derive(Debug, Clone, Copy)] pub struct CrossAttnUpBlock2DConfig { pub upblock: UpBlock2DConfig, pub attn_num_head_channels: usize, pub cross_attention_dim: usize, // attention_type: "default" pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, pub transformer_layers_per_block: usize, } impl Default for CrossAttnUpBlock2DConfig { fn default() -> Self { Self { upblock: Default::default(), attn_num_head_channels: 1, cross_attention_dim: 1280, sliced_attention_size: None, use_linear_projection: false, transformer_layers_per_block: 1, } } } #[derive(Debug)] pub struct CrossAttnUpBlock2D { pub upblock: UpBlock2D, pub attentions: Vec<SpatialTransformer>, span: tracing::Span, pub config: CrossAttnUpBlock2DConfig, } impl CrossAttnUpBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, prev_output_channels: usize, out_channels: usize, temb_channels: Option<usize>, use_flash_attn: bool, config: CrossAttnUpBlock2DConfig, ) -> Result<Self> { let upblock = UpBlock2D::new( vs.clone(), in_channels, prev_output_channels, out_channels, temb_channels, config.upblock, )?; let n_heads = config.attn_num_head_channels; let cfg = SpatialTransformerConfig { depth: config.transformer_layers_per_block, context_dim: Some(config.cross_attention_dim), num_groups: config.upblock.resnet_groups, sliced_attention_size: config.sliced_attention_size, use_linear_projection: config.use_linear_projection, }; let vs_attn = vs.pp("attentions"); let attentions = (0..config.upblock.num_layers) .map(|i| { SpatialTransformer::new( vs_attn.pp(i.to_string()), out_channels, n_heads, out_channels / n_heads, use_flash_attn, cfg, ) }) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "xa-up2d"); Ok(Self { upblock, attentions, span, config, }) } pub fn forward( &self, xs: &Tensor, res_xs: &[Tensor], temb: Option<&Tensor>, upsample_size: Option<(usize, usize)>, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for (index, resnet) in self.upblock.resnets.iter().enumerate() { xs = Tensor::cat(&[&xs, &res_xs[res_xs.len() - index - 1]], 1)?; xs = xs.contiguous()?; xs = resnet.forward(&xs, temb)?; xs = self.attentions[index].forward(&xs, encoder_hidden_states)?; } match &self.upblock.upsampler { Some(upsampler) => upsampler.forward(&xs, upsample_size), None => Ok(xs), } } }
candle/candle-transformers/src/models/stable_diffusion/unet_2d_blocks.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/unet_2d_blocks.rs", "repo_id": "candle", "token_count": 13813 }
53
use candle::{Result, Tensor}; #[derive(Debug, Clone)] pub struct DDPMWSchedulerConfig { scaler: f64, s: f64, } impl Default for DDPMWSchedulerConfig { fn default() -> Self { Self { scaler: 1f64, s: 0.008f64, } } } pub struct DDPMWScheduler { init_alpha_cumprod: f64, init_noise_sigma: f64, timesteps: Vec<f64>, pub config: DDPMWSchedulerConfig, } impl DDPMWScheduler { pub fn new(inference_steps: usize, config: DDPMWSchedulerConfig) -> Result<Self> { let init_alpha_cumprod = (config.s / (1. + config.s) * std::f64::consts::PI) .cos() .powi(2); let timesteps = (0..=inference_steps) .map(|i| 1. - i as f64 / inference_steps as f64) .collect::<Vec<_>>(); Ok(Self { init_alpha_cumprod, init_noise_sigma: 1.0, timesteps, config, }) } pub fn timesteps(&self) -> &[f64] { &self.timesteps } fn alpha_cumprod(&self, t: f64) -> f64 { let scaler = self.config.scaler; let s = self.config.s; let t = if scaler > 1. { 1. - (1. - t).powf(scaler) } else if scaler < 1. { t.powf(scaler) } else { t }; let alpha_cumprod = ((t + s) / (1. + s) * std::f64::consts::PI * 0.5) .cos() .powi(2) / self.init_alpha_cumprod; alpha_cumprod.clamp(0.0001, 0.9999) } fn previous_timestep(&self, ts: f64) -> f64 { let index = self .timesteps .iter() .enumerate() .map(|(idx, v)| (idx, (v - ts).abs())) .min_by(|x, y| x.1.total_cmp(&y.1)) .unwrap() .0; self.timesteps[index + 1] } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. pub fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Tensor { sample } pub fn step(&self, model_output: &Tensor, ts: f64, sample: &Tensor) -> Result<Tensor> { let prev_t = self.previous_timestep(ts); let alpha_cumprod = self.alpha_cumprod(ts); let alpha_cumprod_prev = self.alpha_cumprod(prev_t); let alpha = alpha_cumprod / alpha_cumprod_prev; let mu = (sample - model_output * ((1. - alpha) / (1. - alpha_cumprod).sqrt()))?; let mu = (mu * (1. / alpha).sqrt())?; let std_noise = mu.randn_like(0., 1.)?; let std = std_noise * ((1. - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt(); if prev_t == 0. { Ok(mu) } else { mu + std } } pub fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
candle/candle-transformers/src/models/wuerstchen/ddpm.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/ddpm.rs", "repo_id": "candle", "token_count": 1537 }
54
//load Candle Bert Module wasm module import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "bert-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Bert { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize = true, } = event.data; try { self.postMessage({ status: "ready", message: "Starting Bert Model" }); const model = await Bert.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "embedding", message: "Calculating Embeddings", }); const output = model.get_embeddings({ sentences: sentences, normalize_embeddings: normalize, }); self.postMessage({ status: "complete", message: "complete", output: output.data, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/bert/bertWorker.js/0
{ "file_path": "candle/candle-wasm-examples/bert/bertWorker.js", "repo_id": "candle", "token_count": 779 }
55
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url, cacheModel = true) { if (!cacheModel) return new Uint8Array(await (await fetch(url)).arrayBuffer()); const cacheName = "moondream-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } async function concatenateArrayBuffers(urls) { const arrayBuffers = await Promise.all( urls.map((url) => fetchArrayBuffer(url)) ); let totalLength = arrayBuffers.reduce( (acc, arrayBuffer) => acc + arrayBuffer.byteLength, 0 ); let concatenatedBuffer = new Uint8Array(totalLength); let offset = 0; arrayBuffers.forEach((buffer) => { concatenatedBuffer.set(new Uint8Array(buffer), offset); offset += buffer.byteLength; }); return concatenatedBuffer; } class Moondream { static imageArrayHash = {}; static instance = {}; static currentModelID = null; static async getInstance(weightsURL, modelID, tokenizerURL, quantized) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8] = await Promise.all([ weightsURL instanceof Array ? concatenateArrayBuffers(weightsURL) : fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, quantized ); } this.currentModelID = modelID; return this.instance[modelID]; } // Remove the modelID parameter from setImageEmbeddings static setImageEmbeddings(imageArrayU8) { // check if image embeddings are already set for this image and model const imageArrayHash = this.getSimpleHash(imageArrayU8); if ( this.imageArrayHash[this.currentModelID] === imageArrayHash && this.instance[this.currentModelID] ) { self.postMessage({ status: "embedding", message: "Embeddings Already Set", }); return; } this.imageArrayHash[this.currentModelID] = imageArrayHash; this.instance[this.currentModelID].set_image_embeddings(imageArrayU8); self.postMessage({ status: "embedding", message: "Embeddings Set" }); } static getSimpleHash(imageArrayU8) { // get simple hash of imageArrayU8 let imageArrayHash = 0; for (let i = 0; i < imageArrayU8.length; i += 100) { imageArrayHash ^= imageArrayU8[i]; } return imageArrayHash.toString(16); } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, quantized, imageURL, prompt, seed, temp, top_p, repeatPenalty, maxSeqLen, verbose_prompt, } = data; try { self.postMessage({ status: "loading", message: "Starting Moondream" }); const model = await Moondream.getInstance( weightsURL, modelID, tokenizerURL, quantized ); self.postMessage({ status: "loading", message: "Initializing model" }); self.postMessage({ status: "loading", message: "Loading Image" }); const imageArrayU8 = await fetchArrayBuffer(imageURL, false); self.postMessage({ status: "embedding", message: "Creating Embeddings" }); Moondream.setImageEmbeddings(imageArrayU8); self.postMessage({ status: "complete-embedding", message: "Embeddings Complete", }); const { token, token_id } = model.init_with_image_prompt({ prompt, seed: BigInt(seed), temp: parseFloat(temp), top_p: parseFloat(top_p), repeat_penalty: parseFloat(repeatPenalty), repeat_last_n: 64, verbose_prompt, }); const seq_len = 2048; let sentence = token; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { console.log("Aborted"); self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const { token, token_id } = await model.next_token(); if (token_id === 50256) { // <|endoftext|> self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); return; } const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
candle/candle-wasm-examples/moondream/moondreamWorker.js/0
{ "file_path": "candle/candle-wasm-examples/moondream/moondreamWorker.js", "repo_id": "candle", "token_count": 2273 }
56
import init, { run_app } from './pkg/candle_wasm_example_whisper.js'; async function main() { await init('/pkg/candle_wasm_example_whisper_bg.wasm'); run_app(); } main()
candle/candle-wasm-examples/whisper/main.js/0
{ "file_path": "candle/candle-wasm-examples/whisper/main.js", "repo_id": "candle", "token_count": 73 }
57
fn main() { wasm_logger::init(wasm_logger::Config::new(log::Level::Trace)); console_error_panic_hook::set_once(); yew::Renderer::<candle_wasm_example_yolo::App>::new().render(); }
candle/candle-wasm-examples/yolo/src/bin/app.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/bin/app.rs", "repo_id": "candle", "token_count": 82 }
58
Dockerfile .vscode/ .idea .gitignore LICENSE README.md node_modules/ .svelte-kit/ .env* !.env .env.local
chat-ui/.dockerignore/0
{ "file_path": "chat-ui/.dockerignore", "repo_id": "chat-ui", "token_count": 51 }
59
{ "useTabs": true, "trailingComma": "es5", "printWidth": 100, "plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"], "pluginSearchDirs": ["."], "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }] }
chat-ui/.prettierrc/0
{ "file_path": "chat-ui/.prettierrc", "repo_id": "chat-ui", "token_count": 104 }
60
{{- if $.Values.monitoring.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: {{ include "labels.standard" . | nindent 4 }} name: {{ include "name" . }} namespace: {{ .Release.Namespace }} spec: selector: matchLabels: {{ include "labels.standard" . | nindent 6 }} endpoints: - port: metrics path: /metrics interval: 15s {{- end }}
chat-ui/chart/templates/service-monitor.yaml/0
{ "file_path": "chat-ui/chart/templates/service-monitor.yaml", "repo_id": "chat-ui", "token_count": 144 }
61
# Ollama | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | We also support the Ollama inference server. Spin up a model with ```bash ollama run mistral ``` Then specify the endpoints like so: ```ini MODELS=`[ { "name": "Ollama Mistral", "chatPromptTemplate": "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s> {{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "endpoints": [ { "type": "ollama", "url" : "http://127.0.0.1:11434", "ollamaName" : "mistral" } ] } ]` ```
chat-ui/docs/source/configuration/models/providers/ollama.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/ollama.md", "repo_id": "chat-ui", "token_count": 468 }
62
<script lang="ts"> import { afterUpdate } from "svelte"; import CopyToClipBoardBtn from "./CopyToClipBoardBtn.svelte"; import DOMPurify from "isomorphic-dompurify"; export let code = ""; export let lang = ""; $: highlightedCode = ""; afterUpdate(async () => { const { default: hljs } = await import("highlight.js"); const language = hljs.getLanguage(lang); highlightedCode = hljs.highlightAuto(code, language?.aliases).value; }); </script> <div class="group relative my-4 rounded-lg"> <!-- eslint-disable svelte/no-at-html-tags --> <pre class="scrollbar-custom overflow-auto px-5 scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20"><code class="language-{lang}" >{@html DOMPurify.sanitize(highlightedCode || code.replaceAll("<", "&lt;"))} </code></pre> <CopyToClipBoardBtn classNames="btn rounded-lg border border-gray-200 px-2 py-2 text-sm shadow-sm transition-all hover:border-gray-300 active:shadow-inner dark:border-gray-700 dark:hover:border-gray-500 absolute top-2 right-2 invisible opacity-0 group-hover:visible group-hover:opacity-100 dark:text-gray-700 text-gray-200" value={code} /> </div>
chat-ui/src/lib/components/CodeBlock.svelte/0
{ "file_path": "chat-ui/src/lib/components/CodeBlock.svelte", "repo_id": "chat-ui", "token_count": 442 }
63
<script lang="ts"> import CarbonRotate360 from "~icons/carbon/rotate-360"; export let classNames = ""; </script> <button type="button" on:click class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}" > <CarbonRotate360 class="mr-2 text-xs " /> Retry </button>
chat-ui/src/lib/components/RetryBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/RetryBtn.svelte", "repo_id": "chat-ui", "token_count": 157 }
64
<script lang="ts"> import { marked, type MarkedOptions } from "marked"; import markedKatex from "marked-katex-extension"; import type { Message } from "$lib/types/Message"; import { afterUpdate, createEventDispatcher, tick } from "svelte"; import { deepestChild } from "$lib/utils/deepestChild"; import { page } from "$app/stores"; import CodeBlock from "../CodeBlock.svelte"; import CopyToClipBoardBtn from "../CopyToClipBoardBtn.svelte"; import IconLoading from "../icons/IconLoading.svelte"; import CarbonRotate360 from "~icons/carbon/rotate-360"; import CarbonTrashCan from "~icons/carbon/trash-can"; import CarbonDownload from "~icons/carbon/download"; import CarbonThumbsUp from "~icons/carbon/thumbs-up"; import CarbonThumbsDown from "~icons/carbon/thumbs-down"; import CarbonPen from "~icons/carbon/pen"; import CarbonChevronLeft from "~icons/carbon/chevron-left"; import CarbonChevronRight from "~icons/carbon/chevron-right"; import { PUBLIC_SEP_TOKEN } from "$lib/constants/publicSepToken"; import type { Model } from "$lib/types/Model"; import UploadedFile from "./UploadedFile.svelte"; import OpenWebSearchResults from "../OpenWebSearchResults.svelte"; import { MessageWebSearchUpdateType, type MessageToolUpdate, type MessageWebSearchSourcesUpdate, type MessageWebSearchUpdate, } from "$lib/types/MessageUpdate"; import { base } from "$app/paths"; import { useConvTreeStore } from "$lib/stores/convTree"; import ToolUpdate from "./ToolUpdate.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import DOMPurify from "isomorphic-dompurify"; import { enhance } from "$app/forms"; import { browser } from "$app/environment"; function sanitizeMd(md: string) { let ret = md .replace(/<\|[a-z]*$/, "") .replace(/<\|[a-z]+\|$/, "") .replace(/<$/, "") .replaceAll(PUBLIC_SEP_TOKEN, " ") .replaceAll(/<\|[a-z]+\|>/g, " ") .replaceAll(/<br\s?\/?>/gi, "\n") .replaceAll("<", "&lt;") .trim(); for (const stop of [...(model.parameters?.stop ?? []), "<|endoftext|>"]) { if (ret.endsWith(stop)) { ret = ret.slice(0, -stop.length).trim(); } } return ret; } function unsanitizeMd(md: string) { return md.replaceAll("&lt;", "<"); } export let model: Model; export let id: Message["id"]; export let messages: Message[]; export let loading = false; export let isAuthor = true; export let readOnly = false; export let isTapped = false; $: message = messages.find((m) => m.id === id) ?? ({} as Message); $: urlNotTrailing = $page.url.pathname.replace(/\/$/, ""); const dispatch = createEventDispatcher<{ retry: { content?: string; id: Message["id"] }; vote: { score: Message["score"]; id: Message["id"] }; }>(); let contentEl: HTMLElement; let loadingEl: IconLoading; let pendingTimeout: ReturnType<typeof setTimeout>; let isCopied = false; let initialized = false; const renderer = new marked.Renderer(); // For code blocks with simple backticks renderer.codespan = (code) => { // Unsanitize double-sanitized code return `<code>${code.replaceAll("&amp;", "&")}</code>`; }; renderer.link = (href, title, text) => { return `<a href="${href?.replace(/>$/, "")}" target="_blank" rel="noreferrer">${text}</a>`; }; // eslint-disable-next-line @typescript-eslint/no-unused-vars const { extensions, ...defaults } = marked.getDefaults() as MarkedOptions & { // eslint-disable-next-line @typescript-eslint/no-explicit-any extensions: any; }; const options: MarkedOptions = { ...defaults, gfm: true, breaks: true, renderer, }; marked.use( markedKatex({ throwOnError: false, }) ); $: tokens = marked.lexer(sanitizeMd(message.content ?? "")); $: emptyLoad = !message.content && (webSearchIsDone || (searchUpdates && searchUpdates.length === 0)); const settings = useSettingsStore(); afterUpdate(() => { if ($settings.disableStream) { return; } loadingEl?.$destroy(); clearTimeout(pendingTimeout); // Add loading animation to the last message if update takes more than 600ms if (isLast && loading && emptyLoad) { pendingTimeout = setTimeout(() => { if (contentEl) { loadingEl = new IconLoading({ target: deepestChild(contentEl), props: { classNames: "loading inline ml-2 first:ml-0" }, }); } }, 600); } }); function handleKeyDown(e: KeyboardEvent) { if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) { editFormEl.requestSubmit(); } } $: searchUpdates = (message.updates?.filter(({ type }) => type === "webSearch") ?? []) as MessageWebSearchUpdate[]; // filter all updates with type === "tool" then group them by uuid field $: toolUpdates = message.updates ?.filter(({ type }) => type === "tool") .reduce((acc, update) => { if (update.type !== "tool") { return acc; } acc[update.uuid] = acc[update.uuid] ?? []; acc[update.uuid].push(update); return acc; }, {} as Record<string, MessageToolUpdate[]>); $: downloadLink = urlNotTrailing + `/message/${message.id}/prompt`; let webSearchIsDone = true; $: webSearchIsDone = searchUpdates.some( (update) => update.subtype === MessageWebSearchUpdateType.Finished ); $: webSearchSources = searchUpdates?.find( (update): update is MessageWebSearchSourcesUpdate => update.subtype === MessageWebSearchUpdateType.Sources )?.sources; $: if (isCopied) { setTimeout(() => { isCopied = false; }, 1000); } $: editMode = $convTreeStore.editing === message.id; let editContentEl: HTMLTextAreaElement; let editFormEl: HTMLFormElement; $: if (editMode) { tick(); if (editContentEl) { editContentEl.value = message.content; editContentEl?.focus(); } } $: isLast = (message && message.children?.length === 0) ?? false; $: childrenToRender = 0; $: nChildren = message?.children?.length ?? 0; $: { if (initialized) { childrenToRender = Math.max(0, nChildren - 1); } else { childrenToRender = 0; initialized = true; } } const convTreeStore = useConvTreeStore(); $: if (message.children?.length === 0) { $convTreeStore.leaf = message.id; // Check if the code is running in a browser if (browser) { // Remember the last message viewed or interacted by the user localStorage.setItem("leafId", message.id); } } let isRun = false; $: { if (message.id && !isRun) { if (message.currentChildIndex) childrenToRender = message.currentChildIndex; isRun = true; } } $: if (message.children?.length === 0) $convTreeStore.leaf = message.id; </script> {#if message.from === "assistant"} <div class="group relative -mb-4 flex items-start justify-start gap-4 pb-4 leading-relaxed" role="presentation" on:click={() => (isTapped = !isTapped)} on:keydown={() => (isTapped = !isTapped)} > {#if $page.data?.assistant?.avatar} <img src="{base}/settings/assistants/{$page.data.assistant._id}/avatar.jpg" alt="Avatar" class="mt-5 h-3 w-3 flex-none select-none rounded-full shadow-lg" /> {:else} <img alt="" src="https://huggingface.co/avatars/2edb18bd0206c16b433841a47f53fa8e.svg" class="mt-5 h-3 w-3 flex-none select-none rounded-full shadow-lg" /> {/if} <div class="relative min-h-[calc(2rem+theme(spacing[3.5])*2)] min-w-[60px] break-words rounded-2xl border border-gray-100 bg-gradient-to-br from-gray-50 px-5 py-3.5 text-gray-600 prose-pre:my-2 dark:border-gray-800 dark:from-gray-800/40 dark:text-gray-300" > {#if message.files?.length} <div class="flex h-fit flex-wrap gap-x-5 gap-y-2"> {#each message.files as file} <UploadedFile {file} canClose={false} isPreview={false} /> {/each} </div> {/if} {#if searchUpdates && searchUpdates.length > 0} <OpenWebSearchResults classNames={tokens.length ? "mb-3.5" : ""} webSearchMessages={searchUpdates} /> {/if} {#if toolUpdates} {#each Object.values(toolUpdates) as tool} {#if tool.length} {#key tool[0].uuid} <ToolUpdate {tool} {loading} /> {/key} {/if} {/each} {/if} <div class="prose max-w-none dark:prose-invert max-sm:prose-sm prose-headings:font-semibold prose-h1:text-lg prose-h2:text-base prose-h3:text-base prose-pre:bg-gray-800 dark:prose-pre:bg-gray-900" bind:this={contentEl} > {#if isLast && loading && $settings.disableStream} <IconLoading classNames="loading inline ml-2 first:ml-0" /> {/if} {#each tokens as token} {#if token.type === "code"} <CodeBlock lang={token.lang} code={unsanitizeMd(token.text)} /> {:else} {#await marked.parse(token.raw, options) then parsed} <!-- eslint-disable-next-line svelte/no-at-html-tags --> {@html DOMPurify.sanitize(parsed)} {/await} {/if} {/each} </div> <!-- Web Search sources --> {#if webSearchSources?.length} <div class="mt-4 flex flex-wrap items-center gap-x-2 gap-y-1.5 text-sm"> <div class="text-gray-400">Sources:</div> {#each webSearchSources as { link, title }} <a class="flex items-center gap-2 whitespace-nowrap rounded-lg border bg-white px-2 py-1.5 leading-none hover:border-gray-300 dark:border-gray-800 dark:bg-gray-900 dark:hover:border-gray-700" href={link} target="_blank" > <img class="h-3.5 w-3.5 rounded" src="https://www.google.com/s2/favicons?sz=64&domain_url={new URL(link).hostname}" alt="{title} favicon" /> <div>{new URL(link).hostname.replace(/^www\./, "")}</div> </a> {/each} </div> {/if} </div> {#if !loading && (message.content || toolUpdates)} <div class="absolute -bottom-4 right-0 flex max-md:transition-all md:group-hover:visible md:group-hover:opacity-100 {message.score ? 'visible opacity-100' : 'invisible max-md:-translate-y-4 max-md:opacity-0'} {isTapped || isCopied ? 'max-md:visible max-md:translate-y-0 max-md:opacity-100' : ''} " > {#if isAuthor} <button class="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300 {message.score && message.score > 0 ? 'text-green-500 hover:text-green-500 dark:text-green-400 hover:dark:text-green-400' : ''}" title={message.score === 1 ? "Remove +1" : "+1"} type="button" on:click={() => dispatch("vote", { score: message.score === 1 ? 0 : 1, id: message.id })} > <CarbonThumbsUp class="h-[1.14em] w-[1.14em]" /> </button> <button class="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300 {message.score && message.score < 0 ? 'text-red-500 hover:text-red-500 dark:text-red-400 hover:dark:text-red-400' : ''}" title={message.score === -1 ? "Remove -1" : "-1"} type="button" on:click={() => dispatch("vote", { score: message.score === -1 ? 0 : -1, id: message.id })} > <CarbonThumbsDown class="h-[1.14em] w-[1.14em]" /> </button> {/if} <button class="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300" title="Retry" type="button" on:click={() => { dispatch("retry", { id: message.id }); }} > <CarbonRotate360 /> </button> <CopyToClipBoardBtn on:click={() => { isCopied = true; }} classNames="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300" value={message.content} /> </div> {/if} </div> <slot name="childrenNav" /> {/if} {#if message.from === "user"} <div class="group relative w-full items-start justify-start gap-4 max-sm:text-sm" role="presentation" on:click={() => (isTapped = !isTapped)} on:keydown={() => (isTapped = !isTapped)} > <div class="flex w-full flex-col gap-2"> {#if message.files?.length} <div class="flex w-fit gap-4 px-5"> {#each message.files as file} <UploadedFile {file} canClose={false} isPreview={false} /> {/each} </div> {/if} <div class="flex w-full flex-row flex-nowrap"> {#if !editMode} <p class="disabled w-full appearance-none whitespace-break-spaces text-wrap break-words bg-inherit px-5 py-3.5 text-gray-500 dark:text-gray-400" > {message.content.trim()} </p> {:else} <form class="flex w-full flex-col" bind:this={editFormEl} on:submit|preventDefault={() => { dispatch("retry", { content: editContentEl.value, id: message.id }); $convTreeStore.editing = null; }} > <textarea class="w-full whitespace-break-spaces break-words rounded-xl bg-gray-100 px-5 py-3.5 text-gray-500 *:h-max dark:bg-gray-800 dark:text-gray-400" rows="5" bind:this={editContentEl} value={message.content.trim()} on:keydown={handleKeyDown} required /> <div class="flex w-full flex-row flex-nowrap items-center justify-center gap-2 pt-2"> <button type="submit" class="btn rounded-lg px-3 py-1.5 text-sm {loading ? 'bg-gray-300 text-gray-400 dark:bg-gray-700 dark:text-gray-600' : 'bg-gray-200 text-gray-600 hover:text-gray-800 focus:ring-0 dark:bg-gray-800 dark:text-gray-300 dark:hover:text-gray-200'} " disabled={loading} > Submit </button> <button type="button" class="btn rounded-sm p-2 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300" on:click={() => { $convTreeStore.editing = null; }} > Cancel </button> </div> </form> {/if} {#if !loading && !editMode} <div class=" max-md:opacity-0' invisible absolute right-0 top-3.5 z-10 h-max max-md:-translate-y-4 max-md:transition-all md:bottom-0 md:group-hover:visible md:group-hover:opacity-100 {isTapped || isCopied ? 'max-md:visible max-md:translate-y-0 max-md:opacity-100' : ''}" > <div class="mx-auto flex flex-row flex-nowrap gap-2"> {#if downloadLink} <a class="rounded-lg border border-gray-100 bg-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-400 dark:hover:text-gray-300 max-sm:!hidden md:hidden" title="Download prompt and parameters" type="button" target="_blank" href={downloadLink} > <CarbonDownload /> </a> {/if} {#if !readOnly} <button class="cursor-pointer rounded-lg border border-gray-100 bg-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-400 dark:hover:text-gray-300 md:hidden lg:-right-2" title="Branch" type="button" on:click={() => ($convTreeStore.editing = message.id)} > <CarbonPen /> </button> {/if} </div> </div> {/if} </div> <slot name="childrenNav" /> </div> </div> {/if} {#if nChildren > 0} <svelte:self {loading} {messages} {isAuthor} {readOnly} {model} id={messages.find((m) => m.id === id)?.children?.[childrenToRender]} on:retry on:vote on:continue > <svelte:fragment slot="childrenNav"> {#if nChildren > 1 && $convTreeStore.editing === null} <div class="font-white group/navbranch z-10 -mt-1 ml-3.5 mr-auto flex h-6 w-fit select-none flex-row items-center justify-center gap-1 text-sm" > <button class="inline text-lg font-thin text-gray-400 hover:text-gray-800 disabled:pointer-events-none disabled:opacity-25 dark:text-gray-500 dark:hover:text-gray-200" on:click={() => (childrenToRender = Math.max(0, childrenToRender - 1))} disabled={childrenToRender === 0 || loading} > <CarbonChevronLeft class="text-sm" /> </button> <span class=" text-gray-400 dark:text-gray-500"> {childrenToRender + 1} / {nChildren} </span> <button class="inline text-lg font-thin text-gray-400 hover:text-gray-800 disabled:pointer-events-none disabled:opacity-25 dark:text-gray-500 dark:hover:text-gray-200" on:click={() => (childrenToRender = Math.min( message?.children?.length ?? 1 - 1, childrenToRender + 1 ))} disabled={childrenToRender === nChildren - 1 || loading} > <CarbonChevronRight class="text-sm" /> </button> {#if !loading && message.children}<form method="POST" action="?/deleteBranch" class="hidden group-hover/navbranch:block" use:enhance={({ cancel }) => { if (!confirm("Are you sure you want to delete this branch?")) { cancel(); } }} > <input name="messageId" value={message.children[childrenToRender]} type="hidden" /> <button class="flex items-center justify-center text-xs text-gray-400 hover:text-gray-800 dark:text-gray-500 dark:hover:text-gray-200" type="submit" ><CarbonTrashCan /> </button> </form> {/if} </div> {/if} </svelte:fragment> </svelte:self> {/if} <style> @keyframes loading { to { stroke-dashoffset: 122.9; } } </style>
chat-ui/src/lib/components/chat/ChatMessage.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatMessage.svelte", "repo_id": "chat-ui", "token_count": 7678 }
65
import { Database } from "$lib/server/database"; import { acquireLock, refreshLock } from "$lib/migrations/lock"; import type { ObjectId } from "mongodb"; import { subDays } from "date-fns"; import { logger } from "$lib/server/logger"; const LOCK_KEY = "assistants.count"; let hasLock = false; let lockId: ObjectId | null = null; async function refreshAssistantsCountsHelper() { if (!hasLock) { return; } try { await Database.getInstance() .getClient() .withSession((session) => session.withTransaction(async () => { await Database.getInstance() .getCollections() .assistants.aggregate([ { $project: { _id: 1 } }, { $set: { last24HoursCount: 0 } }, { $unionWith: { coll: "assistants.stats", pipeline: [ { $match: { "date.at": { $gte: subDays(new Date(), 1) }, "date.span": "hour" }, }, { $group: { _id: "$assistantId", last24HoursCount: { $sum: "$count" }, }, }, ], }, }, { $group: { _id: "$_id", last24HoursCount: { $sum: "$last24HoursCount" }, }, }, { $merge: { into: "assistants", on: "_id", whenMatched: "merge", whenNotMatched: "discard", }, }, ]) .next(); }) ); } catch (e) { logger.error(e, "Refresh assistants counter failed!"); } } async function maintainLock() { if (hasLock && lockId) { hasLock = await refreshLock(LOCK_KEY, lockId); if (!hasLock) { lockId = null; } } else if (!hasLock) { lockId = (await acquireLock(LOCK_KEY)) || null; hasLock = !!lockId; } setTimeout(maintainLock, 10_000); } export function refreshAssistantsCounts() { const ONE_HOUR_MS = 3_600_000; maintainLock().then(() => { refreshAssistantsCountsHelper(); setInterval(refreshAssistantsCountsHelper, ONE_HOUR_MS); }); }
chat-ui/src/lib/jobs/refresh-assistants-counts.ts/0
{ "file_path": "chat-ui/src/lib/jobs/refresh-assistants-counts.ts", "repo_id": "chat-ui", "token_count": 970 }
66
import { z } from "zod"; import { embeddingEndpointTei, embeddingEndpointTeiParametersSchema, } from "./tei/embeddingEndpoints"; import { embeddingEndpointTransformersJS, embeddingEndpointTransformersJSParametersSchema, } from "./transformersjs/embeddingEndpoints"; import { embeddingEndpointOpenAI, embeddingEndpointOpenAIParametersSchema, } from "./openai/embeddingEndpoints"; import { embeddingEndpointHfApi, embeddingEndpointHfApiSchema } from "./hfApi/embeddingHfApi"; // parameters passed when generating text interface EmbeddingEndpointParameters { inputs: string[]; } export type Embedding = number[]; // type signature for the endpoint export type EmbeddingEndpoint = (params: EmbeddingEndpointParameters) => Promise<Embedding[]>; export const embeddingEndpointSchema = z.discriminatedUnion("type", [ embeddingEndpointTeiParametersSchema, embeddingEndpointTransformersJSParametersSchema, embeddingEndpointOpenAIParametersSchema, embeddingEndpointHfApiSchema, ]); type EmbeddingEndpointTypeOptions = z.infer<typeof embeddingEndpointSchema>["type"]; // generator function that takes in type discrimantor value for defining the endpoint and return the endpoint export type EmbeddingEndpointGenerator<T extends EmbeddingEndpointTypeOptions> = ( inputs: Extract<z.infer<typeof embeddingEndpointSchema>, { type: T }> ) => EmbeddingEndpoint | Promise<EmbeddingEndpoint>; // list of all endpoint generators export const embeddingEndpoints: { [Key in EmbeddingEndpointTypeOptions]: EmbeddingEndpointGenerator<Key>; } = { tei: embeddingEndpointTei, transformersjs: embeddingEndpointTransformersJS, openai: embeddingEndpointOpenAI, hfapi: embeddingEndpointHfApi, }; export default embeddingEndpoints;
chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 544 }
67
import type { Sharp } from "sharp"; import sharp from "sharp"; import type { MessageFile } from "$lib/types/Message"; import { z, type util } from "zod"; export interface ImageProcessorOptions<TMimeType extends string = string> { supportedMimeTypes: TMimeType[]; preferredMimeType: TMimeType; maxSizeInMB: number; maxWidth: number; maxHeight: number; } export type ImageProcessor<TMimeType extends string = string> = (file: MessageFile) => Promise<{ image: Buffer; mime: TMimeType; }>; export function createImageProcessorOptionsValidator<TMimeType extends string = string>( defaults: ImageProcessorOptions<TMimeType> ) { return z .object({ supportedMimeTypes: z .array( z.enum<string, [TMimeType, ...TMimeType[]]>([ defaults.supportedMimeTypes[0], ...defaults.supportedMimeTypes.slice(1), ]) ) .default(defaults.supportedMimeTypes), preferredMimeType: z .enum([defaults.supportedMimeTypes[0], ...defaults.supportedMimeTypes.slice(1)]) .default(defaults.preferredMimeType as util.noUndefined<TMimeType>), maxSizeInMB: z.number().positive().default(defaults.maxSizeInMB), maxWidth: z.number().int().positive().default(defaults.maxWidth), maxHeight: z.number().int().positive().default(defaults.maxHeight), }) .default(defaults); } export function makeImageProcessor<TMimeType extends string = string>( options: ImageProcessorOptions<TMimeType> ): ImageProcessor<TMimeType> { return async (file) => { const { supportedMimeTypes, preferredMimeType, maxSizeInMB, maxWidth, maxHeight } = options; const { mime, value } = file; const buffer = Buffer.from(value, "base64"); let sharpInst = sharp(buffer); const metadata = await sharpInst.metadata(); if (!metadata) throw Error("Failed to read image metadata"); const { width, height } = metadata; if (width === undefined || height === undefined) throw Error("Failed to read image size"); const tooLargeInSize = width > maxWidth || height > maxHeight; const tooLargeInBytes = buffer.byteLength > maxSizeInMB * 1000 * 1000; const outputMime = chooseMimeType(supportedMimeTypes, preferredMimeType, mime, { preferSizeReduction: tooLargeInBytes, }); // Resize if necessary if (tooLargeInSize || tooLargeInBytes) { const size = chooseImageSize({ mime: outputMime, width, height, maxWidth, maxHeight, maxSizeInMB, }); if (size.width !== width || size.height !== height) { sharpInst = resizeImage(sharpInst, size.width, size.height); } } // Convert format if necessary // We always want to convert the image when the file was too large in bytes // so we can guarantee that ideal options are used, which are expected when // choosing the image size if (outputMime !== mime || tooLargeInBytes) { sharpInst = convertImage(sharpInst, outputMime); } const processedImage = await sharpInst.toBuffer(); return { image: processedImage, mime: outputMime }; }; } const outputFormats = ["png", "jpeg", "webp", "avif", "tiff", "gif"] as const; type OutputImgFormat = (typeof outputFormats)[number]; const isOutputFormat = (format: string): format is (typeof outputFormats)[number] => outputFormats.includes(format as OutputImgFormat); export function convertImage(sharpInst: Sharp, outputMime: string): Sharp { const [type, format] = outputMime.split("/"); if (type !== "image") throw Error(`Requested non-image mime type: ${outputMime}`); if (!isOutputFormat(format)) { throw Error(`Requested to convert to an unsupported format: ${format}`); } return sharpInst[format](); } // heic/heif requires proprietary license // TODO: blocking heif may be incorrect considering it also supports av1, so we should instead // detect the compression method used via sharp().metadata().compression // TODO: consider what to do about animated formats: apng, gif, animated webp, ... const blocklistedMimes = ["image/heic", "image/heif"]; /** Sorted from largest to smallest */ const mimesBySizeDesc = [ "image/png", "image/tiff", "image/gif", "image/jpeg", "image/webp", "image/avif", ]; /** * Defaults to preferred format or uses existing mime if supported * When preferSizeReduction is true, it will choose the smallest format that is supported **/ function chooseMimeType<T extends readonly string[]>( supportedMimes: T, preferredMime: string, mime: string, { preferSizeReduction }: { preferSizeReduction: boolean } ): T[number] { if (!supportedMimes.includes(preferredMime)) { const supportedMimesStr = supportedMimes.join(", "); throw Error( `Preferred format "${preferredMime}" not found in supported mimes: ${supportedMimesStr}` ); } const [type] = mime.split("/"); if (type !== "image") throw Error(`Received non-image mime type: ${mime}`); if (supportedMimes.includes(mime) && !preferSizeReduction) return mime; if (blocklistedMimes.includes(mime)) throw Error(`Received blocklisted mime type: ${mime}`); const smallestMime = mimesBySizeDesc.findLast((m) => supportedMimes.includes(m)); return smallestMime ?? preferredMime; } interface ImageSizeOptions { mime: string; width: number; height: number; maxWidth: number; maxHeight: number; maxSizeInMB: number; } /** Resizes the image to fit within the specified size in MB by guessing the output size */ export function chooseImageSize({ mime, width, height, maxWidth, maxHeight, maxSizeInMB, }: ImageSizeOptions): { width: number; height: number } { const biggestDiscrepency = Math.max(1, width / maxWidth, height / maxHeight); let selectedWidth = Math.ceil(width / biggestDiscrepency); let selectedHeight = Math.ceil(height / biggestDiscrepency); do { const estimatedSize = estimateImageSizeInBytes(mime, selectedWidth, selectedHeight); if (estimatedSize < maxSizeInMB * 1024 * 1024) { return { width: selectedWidth, height: selectedHeight }; } selectedWidth = Math.floor(selectedWidth / 1.1); selectedHeight = Math.floor(selectedHeight / 1.1); } while (selectedWidth > 1 && selectedHeight > 1); throw Error(`Failed to resize image to fit within ${maxSizeInMB}MB`); } const mimeToCompressionRatio: Record<string, number> = { "image/png": 1 / 2, "image/jpeg": 1 / 10, "image/webp": 1 / 4, "image/avif": 1 / 5, "image/tiff": 1, "image/gif": 1 / 5, }; /** * Guesses the side of an image in MB based on its format and dimensions * Should guess the worst case **/ function estimateImageSizeInBytes(mime: string, width: number, height: number): number { const compressionRatio = mimeToCompressionRatio[mime]; if (!compressionRatio) throw Error(`Unsupported image format: ${mime}`); const bitsPerPixel = 32; // Assuming 32-bit color depth for 8-bit R G B A const bytesPerPixel = bitsPerPixel / 8; const uncompressedSize = width * height * bytesPerPixel; return uncompressedSize * compressionRatio; } export function resizeImage(sharpInst: Sharp, maxWidth: number, maxHeight: number): Sharp { return sharpInst.resize({ width: maxWidth, height: maxHeight, fit: "inside" }); }
chat-ui/src/lib/server/endpoints/images.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/images.ts", "repo_id": "chat-ui", "token_count": 2311 }
68
import { collectDefaultMetrics, Registry, Counter, Summary } from "prom-client"; import express from "express"; import { logger } from "$lib/server/logger"; import { env } from "$env/dynamic/private"; import type { Model } from "$lib/types/Model"; import { onExit } from "./exitHandler"; import { promisify } from "util"; interface Metrics { model: { conversationsTotal: Counter<Model["id"]>; messagesTotal: Counter<Model["id"]>; tokenCountTotal: Counter<Model["id"]>; timePerOutputToken: Summary<Model["id"]>; timeToFirstToken: Summary<Model["id"]>; latency: Summary<Model["id"]>; }; webSearch: { requestCount: Counter; pageFetchCount: Counter; pageFetchCountError: Counter; pageFetchDuration: Summary; embeddingDuration: Summary; }; tool: { toolUseCount: Counter<string>; toolUseCountError: Counter<string>; toolUseDuration: Summary<string>; timeToChooseTools: Summary; }; } export class MetricsServer { private static instance: MetricsServer; private metrics: Metrics; private constructor() { const app = express(); const port = Number(env.METRICS_PORT || "5565"); if (isNaN(port) || port < 0 || port > 65535) { logger.warn(`Invalid value for METRICS_PORT: ${env.METRICS_PORT}`); } if (env.METRICS_ENABLED !== "false" && env.METRICS_ENABLED !== "true") { logger.warn(`Invalid value for METRICS_ENABLED: ${env.METRICS_ENABLED}`); } if (env.METRICS_ENABLED === "true") { const server = app.listen(port, () => { logger.info(`Metrics server listening on port ${port}`); }); const closeServer = promisify(server.close); onExit(async () => { logger.info("Disconnecting metrics server ..."); await closeServer(); logger.info("Server stopped ..."); }); } const register = new Registry(); collectDefaultMetrics({ register }); this.metrics = { model: { conversationsTotal: new Counter({ name: "model_conversations_total", help: "Total number of conversations", labelNames: ["model"], registers: [register], }), messagesTotal: new Counter({ name: "model_messages_total", help: "Total number of messages", labelNames: ["model"], registers: [register], }), tokenCountTotal: new Counter({ name: "model_token_count_total", help: "Total number of tokens", labelNames: ["model"], registers: [register], }), timePerOutputToken: new Summary({ name: "model_time_per_output_token_ms", help: "Time per output token in ms", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), timeToFirstToken: new Summary({ name: "model_time_to_first_token_ms", help: "Time to first token", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), latency: new Summary({ name: "model_latency_ms", help: "Total latency until end of answer", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), }, webSearch: { requestCount: new Counter({ name: "web_search_request_count", help: "Total number of web search requests", registers: [register], }), pageFetchCount: new Counter({ name: "web_search_page_fetch_count", help: "Total number of web search page fetches", registers: [register], }), pageFetchCountError: new Counter({ name: "web_search_page_fetch_count_error", help: "Total number of web search page fetch errors", registers: [register], }), pageFetchDuration: new Summary({ name: "web_search_page_fetch_duration_ms", help: "Web search page fetch duration", registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), embeddingDuration: new Summary({ name: "web_search_embedding_duration_ms", help: "Web search embedding duration", registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), }, tool: { toolUseCount: new Counter({ name: "tool_use_count", help: "Total number of tool uses", labelNames: ["tool"], registers: [register], }), toolUseCountError: new Counter({ name: "tool_use_count_error", help: "Total number of tool use errors", labelNames: ["tool"], registers: [register], }), toolUseDuration: new Summary({ name: "tool_use_duration_ms", help: "Tool use duration", labelNames: ["tool"], registers: [register], maxAgeSeconds: 30 * 60, // longer duration since we use this to give feedback to the user ageBuckets: 5, }), timeToChooseTools: new Summary({ name: "time_to_choose_tools_ms", help: "Time to choose tools", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), }, }; app.get("/metrics", (req, res) => { register.metrics().then((metrics) => { res.set("Content-Type", "text/plain"); res.send(metrics); }); }); } public static getInstance(): MetricsServer { if (!MetricsServer.instance) { MetricsServer.instance = new MetricsServer(); } return MetricsServer.instance; } public static getMetrics(): Metrics { return MetricsServer.getInstance().metrics; } }
chat-ui/src/lib/server/metrics.ts/0
{ "file_path": "chat-ui/src/lib/server/metrics.ts", "repo_id": "chat-ui", "token_count": 2179 }
69
import { z } from "zod"; import { env } from "$env/dynamic/private"; import JSON5 from "json5"; // RATE_LIMIT is the legacy way to define messages per minute limit export const usageLimitsSchema = z .object({ conversations: z.coerce.number().optional(), // how many conversations messages: z.coerce.number().optional(), // how many messages in a conversation assistants: z.coerce.number().optional(), // how many assistants messageLength: z.coerce.number().optional(), // how long can a message be before we cut it off messagesPerMinute: z .preprocess((val) => { if (val === undefined) { return env.RATE_LIMIT; } return val; }, z.coerce.number().optional()) .optional(), // how many messages per minute tools: z.coerce.number().optional(), // how many tools }) .optional(); export const usageLimits = usageLimitsSchema.parse(JSON5.parse(env.USAGE_LIMITS));
chat-ui/src/lib/server/usageLimits.ts/0
{ "file_path": "chat-ui/src/lib/server/usageLimits.ts", "repo_id": "chat-ui", "token_count": 309 }
70
import type { WebSearchSource } from "$lib/types/WebSearch"; import { env } from "$env/dynamic/private"; export default async function search(query: string): Promise<WebSearchSource[]> { // const params = { // q: query, // // You can add other parameters if needed, like 'count', 'offset', etc. // }; const response = await fetch( "https://api.bing.microsoft.com/v7.0/search" + "?q=" + encodeURIComponent(query), { method: "GET", headers: { "Ocp-Apim-Subscription-Key": env.BING_SUBSCRIPTION_KEY, "Content-type": "application/json", }, } ); /* eslint-disable @typescript-eslint/no-explicit-any */ const data = (await response.json()) as Record<string, any>; if (!response.ok) { throw new Error( data["message"] ?? `Bing API returned error code ${response.status} - ${response.statusText}` ); } console.log(data["webPages"]?.["value"]); // Adapt the data structure from the Bing response to match the WebSearchSource type const webPages = data["webPages"]?.["value"] ?? []; return webPages.map((page: any) => ({ title: page.name, link: page.url, text: page.snippet, displayLink: page.displayUrl, })); }
chat-ui/src/lib/server/websearch/search/endpoints/bing.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/bing.ts", "repo_id": "chat-ui", "token_count": 425 }
71
import { browser } from "$app/environment"; import { invalidate } from "$app/navigation"; import { base } from "$app/paths"; import { UrlDependency } from "$lib/types/UrlDependency"; import type { ObjectId } from "mongodb"; import { getContext, setContext } from "svelte"; import { type Writable, writable, get } from "svelte/store"; type SettingsStore = { shareConversationsWithModelAuthors: boolean; hideEmojiOnSidebar: boolean; ethicsModalAccepted: boolean; ethicsModalAcceptedAt: Date | null; activeModel: string; customPrompts: Record<string, string>; recentlySaved: boolean; assistants: Array<ObjectId | string>; tools?: Array<string>; disableStream: boolean; }; type SettingsStoreWritable = Writable<SettingsStore> & { instantSet: (settings: Partial<SettingsStore>) => Promise<void>; }; export function useSettingsStore() { return getContext<SettingsStoreWritable>("settings"); } export function createSettingsStore(initialValue: Omit<SettingsStore, "recentlySaved">) { const baseStore = writable({ ...initialValue, recentlySaved: false }); let timeoutId: NodeJS.Timeout; async function setSettings(settings: Partial<SettingsStore>) { baseStore.update((s) => ({ ...s, ...settings, })); clearTimeout(timeoutId); if (browser) { timeoutId = setTimeout(async () => { await fetch(`${base}/settings`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ ...get(baseStore), ...settings, }), }); invalidate(UrlDependency.ConversationList); // set savedRecently to true for 3s baseStore.update((s) => ({ ...s, recentlySaved: true, })); setTimeout(() => { baseStore.update((s) => ({ ...s, recentlySaved: false, })); }, 3000); invalidate(UrlDependency.ConversationList); }, 300); // debounce server calls by 300ms } } async function instantSet(settings: Partial<SettingsStore>) { baseStore.update((s) => ({ ...s, ...settings, })); if (browser) { await fetch(`${base}/settings`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ ...get(baseStore), ...settings, }), }); invalidate(UrlDependency.ConversationList); } } const newStore = { subscribe: baseStore.subscribe, set: setSettings, instantSet, update: (fn: (s: SettingsStore) => SettingsStore) => { setSettings(fn(get(baseStore))); }, } satisfies SettingsStoreWritable; setContext("settings", newStore); return newStore; }
chat-ui/src/lib/stores/settings.ts/0
{ "file_path": "chat-ui/src/lib/stores/settings.ts", "repo_id": "chat-ui", "token_count": 999 }
72
import type { Timestamps } from "./Timestamps"; export interface Semaphore extends Timestamps { key: string; }
chat-ui/src/lib/types/Semaphore.ts/0
{ "file_path": "chat-ui/src/lib/types/Semaphore.ts", "repo_id": "chat-ui", "token_count": 35 }
73
export function formatUserCount(userCount: number): string { const userCountRanges: { min: number; max: number; label: string }[] = [ { min: 0, max: 1, label: "1" }, { min: 2, max: 9, label: "1-10" }, { min: 10, max: 49, label: "10+" }, { min: 50, max: 99, label: "50+" }, { min: 100, max: 299, label: "100+" }, { min: 300, max: 499, label: "300+" }, { min: 500, max: 999, label: "500+" }, { min: 1_000, max: 2_999, label: "1k+" }, { min: 3_000, max: 4_999, label: "3k+" }, { min: 5_000, max: 9_999, label: "5k+" }, { min: 10_000, max: 19_999, label: "10k+" }, { min: 20_000, max: 29_999, label: "20k+" }, { min: 30_000, max: 39_999, label: "30k+" }, { min: 40_000, max: 49_999, label: "40k+" }, { min: 50_000, max: Infinity, label: "50k+" }, ]; const range = userCountRanges.find(({ min, max }) => userCount >= min && userCount <= max); return range?.label ?? ""; }
chat-ui/src/lib/utils/formatUserCount.ts/0
{ "file_path": "chat-ui/src/lib/utils/formatUserCount.ts", "repo_id": "chat-ui", "token_count": 404 }
74
import { browser } from "$app/environment"; export async function share(url: string, title: string, appendLeafId: boolean = false) { if (!browser) return; // Retrieve the leafId from localStorage const leafId = localStorage.getItem("leafId"); if (appendLeafId && leafId) { // Use URL and URLSearchParams to add the leafId parameter const shareUrl = new URL(url); shareUrl.searchParams.append("leafId", leafId); url = shareUrl.toString(); } if (navigator.share) { navigator.share({ url, title }); } else { if (document.hasFocus()) { await navigator.clipboard.writeText(url); } else { alert("Document is not focused. Please try again."); } } }
chat-ui/src/lib/utils/share.ts/0
{ "file_path": "chat-ui/src/lib/utils/share.ts", "repo_id": "chat-ui", "token_count": 231 }
75
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { describe, expect, it } from "vitest"; // function used to insert conversations used for testing export const insertLegacyConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "legacy conversation", model: "", embeddingModel: "", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world! I am a user", }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, world! I am an assistant.", }, { id: "1-1-1-1-3", from: "user", content: "Hello, world! I am a user.", }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, world! I am an assistant.", }, ], }); return res.insertedId; }; export const insertLinearBranchConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "linear branch conversation", model: "", embeddingModel: "", rootMessageId: "1-1-1-1-1", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world! I am a user", ancestors: [], children: ["1-1-1-1-2"], }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, world! I am an assistant.", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-3"], }, { id: "1-1-1-1-3", from: "user", content: "Hello, world! I am a user.", ancestors: ["1-1-1-1-1", "1-1-1-1-2"], children: ["1-1-1-1-4"], }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, world! I am an assistant.", ancestors: ["1-1-1-1-1", "1-1-1-1-2", "1-1-1-1-3"], children: [], }, ], }); return res.insertedId; }; export const insertSideBranchesConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "side branches conversation", model: "", embeddingModel: "", rootMessageId: "1-1-1-1-1", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world, root message!", ancestors: [], children: ["1-1-1-1-2", "1-1-1-1-5"], }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, response to root message!", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-3"], }, { id: "1-1-1-1-3", from: "user", content: "Hello, follow up question!", ancestors: ["1-1-1-1-1", "1-1-1-1-2"], children: ["1-1-1-1-4"], }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, response from follow up question!", ancestors: ["1-1-1-1-1", "1-1-1-1-2", "1-1-1-1-3"], children: [], }, { id: "1-1-1-1-5", from: "assistant", content: "Hello, alternative assistant answer!", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-6", "1-1-1-1-7"], }, { id: "1-1-1-1-6", from: "user", content: "Hello, follow up question to alternative answer!", ancestors: ["1-1-1-1-1", "1-1-1-1-5"], children: [], }, { id: "1-1-1-1-7", from: "user", content: "Hello, alternative follow up question to alternative answer!", ancestors: ["1-1-1-1-1", "1-1-1-1-5"], children: [], }, ], }); return res.insertedId; }; describe("inserting conversations", () => { it("should insert a legacy conversation", async () => { const id = await insertLegacyConversation(); expect(id).toBeDefined(); }); it("should insert a linear branch conversation", async () => { const id = await insertLinearBranchConversation(); expect(id).toBeDefined(); }); it("should insert a side branches conversation", async () => { const id = await insertSideBranchesConversation(); expect(id).toBeDefined(); }); });
chat-ui/src/lib/utils/tree/treeHelpers.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/treeHelpers.spec.ts", "repo_id": "chat-ui", "token_count": 1864 }
76
import { authCondition } from "$lib/server/auth"; import type { Conversation } from "$lib/types/Conversation"; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; export async function GET({ locals }) { if (locals.user?._id || locals.sessionId) { const settings = await collections.settings.findOne(authCondition(locals)); const conversations = await collections.conversations .find(authCondition(locals)) .sort({ updatedAt: -1 }) .project<Pick<Conversation, "assistantId">>({ assistantId: 1, }) .limit(300) .toArray(); const userAssistants = settings?.assistants?.map((assistantId) => assistantId.toString()) ?? []; const userAssistantsSet = new Set(userAssistants); const assistantIds = [ ...userAssistants.map((el) => new ObjectId(el)), ...(conversations.map((conv) => conv.assistantId).filter((el) => !!el) as ObjectId[]), ]; const assistants = await collections.assistants.find({ _id: { $in: assistantIds } }).toArray(); const res = assistants .filter((el) => userAssistantsSet.has(el._id.toString())) .map((el) => ({ ...el, _id: el._id.toString(), createdById: undefined, createdByMe: el.createdById.toString() === (locals.user?._id ?? locals.sessionId).toString(), })); return Response.json(res); } else { return Response.json({ message: "Must have session cookie" }, { status: 401 }); } }
chat-ui/src/routes/api/user/assistants/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/user/assistants/+server.ts", "repo_id": "chat-ui", "token_count": 509 }
77
import { base } from "$app/paths"; import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { redirect } from "@sveltejs/kit"; export const actions = { async delete({ locals }) { // double check we have a user to delete conversations for if (locals.user?._id || locals.sessionId) { await collections.conversations.deleteMany({ ...authCondition(locals), }); } redirect(303, `${base}/`); }, };
chat-ui/src/routes/conversations/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/conversations/+page.server.ts", "repo_id": "chat-ui", "token_count": 158 }
78
import { collections } from "$lib/server/database"; import { z } from "zod"; import { authCondition } from "$lib/server/auth"; import { DEFAULT_SETTINGS, type SettingsEditable } from "$lib/types/Settings"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import { ObjectId } from "mongodb"; export async function POST({ request, locals }) { const body = await request.json(); const { ethicsModalAccepted, ...settings } = z .object({ shareConversationsWithModelAuthors: z .boolean() .default(DEFAULT_SETTINGS.shareConversationsWithModelAuthors), hideEmojiOnSidebar: z.boolean().default(DEFAULT_SETTINGS.hideEmojiOnSidebar), ethicsModalAccepted: z.boolean().optional(), activeModel: z.string().default(DEFAULT_SETTINGS.activeModel), customPrompts: z.record(z.string()).default({}), tools: z.array(z.string()).optional(), disableStream: z.boolean().default(false), }) .parse(body) satisfies SettingsEditable; // only allow tools to be set to community tools if user is early access // XXX: feature_flag_tools if (!locals.user?.isEarlyAccess) { settings.tools = settings.tools?.filter((toolId) => { return toolFromConfigs.some((tool) => tool._id.toString() === toolId); }); } // make sure all tools exist // either in db or in config if (settings.tools) { const newTools = [ ...(await collections.tools .find({ _id: { $in: settings.tools.map((toolId) => new ObjectId(toolId)) } }) .project({ _id: 1 }) .toArray() .then((tools) => tools.map((tool) => tool._id.toString()))), ...toolFromConfigs .filter((el) => (settings?.tools ?? []).includes(el._id.toString())) .map((el) => el._id.toString()), ]; settings.tools = newTools; } await collections.settings.updateOne( authCondition(locals), { $set: { ...settings, ...(ethicsModalAccepted && { ethicsModalAcceptedAt: new Date() }), updatedAt: new Date(), }, $setOnInsert: { createdAt: new Date(), }, }, { upsert: true, } ); // return ok response return new Response(); }
chat-ui/src/routes/settings/(nav)/+server.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/+server.ts", "repo_id": "chat-ui", "token_count": 772 }
79
import { authCondition } from "$lib/server/auth.js"; import { Database, collections } from "$lib/server/database.js"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import { SortKey } from "$lib/types/Assistant.js"; import type { CommunityToolDB } from "$lib/types/Tool.js"; import type { User } from "$lib/types/User.js"; import { generateQueryTokens, generateSearchTokens } from "$lib/utils/searchTokens.js"; import { error } from "@sveltejs/kit"; import { ObjectId, type Filter } from "mongodb"; const NUM_PER_PAGE = 16; export const load = async ({ url, locals }) => { // XXX: feature_flag_tools if (!locals.user?.isEarlyAccess) { error(403, "You need to be an early access user to view tools"); } const username = url.searchParams.get("user"); const query = url.searchParams.get("q")?.trim() ?? null; const pageIndex = parseInt(url.searchParams.get("p") ?? "0"); const sort = url.searchParams.get("sort")?.trim() ?? SortKey.TRENDING; const createdByCurrentUser = locals.user?.username && locals.user.username === username; const activeOnly = url.searchParams.get("active") === "true"; let user: Pick<User, "_id"> | null = null; if (username) { user = await collections.users.findOne<Pick<User, "_id">>( { username }, { projection: { _id: 1 } } ); if (!user) { error(404, `User "${username}" doesn't exist`); } } const settings = await collections.settings.findOne(authCondition(locals)); if (!settings && activeOnly) { error(404, "No user settings found"); } const queryTokens = !!query && generateQueryTokens(query); const filter: Filter<CommunityToolDB> = { ...(!createdByCurrentUser && !activeOnly && !locals.user?.isAdmin && { featured: true }), ...(user && { createdById: user._id }), ...(queryTokens && { searchTokens: { $all: queryTokens } }), ...(activeOnly && { _id: { $in: (settings?.tools ?? []).map((key) => { return new ObjectId(key); }), }, }), }; const communityTools = await Database.getInstance() .getCollections() .tools.find(filter) .skip(NUM_PER_PAGE * pageIndex) .sort({ ...(sort === SortKey.TRENDING && { last24HoursUseCount: -1 }), useCount: -1, }) .limit(NUM_PER_PAGE) .toArray(); const configTools = toolFromConfigs .filter((tool) => !tool?.isHidden) .filter((tool) => { if (queryTokens) { return generateSearchTokens(tool.displayName).some((token) => queryTokens.some((queryToken) => queryToken.test(token)) ); } return true; }); const tools = [...(pageIndex == 0 && !username ? configTools : []), ...communityTools]; const numTotalItems = (await Database.getInstance().getCollections().tools.countDocuments(filter)) + toolFromConfigs.length; return { tools: JSON.parse(JSON.stringify(tools)) as CommunityToolDB[], numTotalItems, numItemsPerPage: NUM_PER_PAGE, query, sort, }; };
chat-ui/src/routes/tools/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/tools/+page.server.ts", "repo_id": "chat-ui", "token_count": 1028 }
80
import adapter from "@sveltejs/adapter-node"; import { vitePreprocess } from "@sveltejs/vite-plugin-svelte"; import dotenv from "dotenv"; dotenv.config({ path: "./.env.local" }); dotenv.config({ path: "./.env" }); process.env.PUBLIC_VERSION ??= process.env.npm_package_version; /** @type {import('@sveltejs/kit').Config} */ const config = { // Consult https://kit.svelte.dev/docs/integrations#preprocessors // for more information about preprocessors preprocess: vitePreprocess(), kit: { adapter: adapter(), paths: { base: process.env.APP_BASE || "", }, csrf: { // handled in hooks.server.ts, because we can have multiple valid origins checkOrigin: false, }, }, }; export default config;
chat-ui/svelte.config.js/0
{ "file_path": "chat-ui/svelte.config.js", "repo_id": "chat-ui", "token_count": 258 }
81
import json import os from dataclasses import dataclass import numpy as np import pyarrow as pa import datasets from utils import get_duration SPEED_TEST_N_EXAMPLES = 100_000_000_000 SPEED_TEST_CHUNK_SIZE = 10_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) def generate_100B_dataset(num_examples: int, chunk_size: int) -> datasets.Dataset: table = pa.Table.from_pydict({"col": [0] * chunk_size}) table = pa.concat_tables([table] * (num_examples // chunk_size)) return datasets.Dataset(table, fingerprint="table_100B") @dataclass class RandIter: low: int high: int size: int seed: int def __post_init__(self): rng = np.random.default_rng(self.seed) self._sampled_values = rng.integers(low=self.low, high=self.high, size=self.size).tolist() def __iter__(self): return iter(self._sampled_values) def __len__(self): return self.size @get_duration def get_first_row(dataset: datasets.Dataset): _ = dataset[0] @get_duration def get_last_row(dataset: datasets.Dataset): _ = dataset[-1] @get_duration def get_batch_of_1024_rows(dataset: datasets.Dataset): _ = dataset[range(len(dataset) // 2, len(dataset) // 2 + 1024)] @get_duration def get_batch_of_1024_random_rows(dataset: datasets.Dataset): _ = dataset[RandIter(0, len(dataset), 1024, seed=42)] def benchmark_table_100B(): times = {"num examples": SPEED_TEST_N_EXAMPLES} functions = (get_first_row, get_last_row, get_batch_of_1024_rows, get_batch_of_1024_random_rows) print("generating dataset") dataset = generate_100B_dataset(num_examples=SPEED_TEST_N_EXAMPLES, chunk_size=SPEED_TEST_CHUNK_SIZE) print("Functions") for func in functions: print(func.__name__) times[func.__name__] = func(dataset) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_table_100B()
datasets/benchmarks/benchmark_getitem_100B.py/0
{ "file_path": "datasets/benchmarks/benchmark_getitem_100B.py", "repo_id": "datasets", "token_count": 867 }
82
# Datasets 🤝 Arrow ## What is Arrow? [Arrow](https://arrow.apache.org/) enables large amounts of data to be processed and moved quickly. It is a specific data format that stores data in a columnar memory layout. This provides several significant advantages: * Arrow's standard format allows [zero-copy reads](https://en.wikipedia.org/wiki/Zero-copy) which removes virtually all serialization overhead. * Arrow is language-agnostic so it supports different programming languages. * Arrow is column-oriented so it is faster at querying and processing slices or columns of data. * Arrow allows for copy-free hand-offs to standard machine learning tools such as NumPy, Pandas, PyTorch, and TensorFlow. * Arrow supports many, possibly nested, column types. ## Memory-mapping 🤗 Datasets uses Arrow for its local caching system. It allows datasets to be backed by an on-disk cache, which is memory-mapped for fast lookup. This architecture allows for large datasets to be used on machines with relatively small device memory. For example, loading the full English Wikipedia dataset only takes a few MB of RAM: ```python >>> import os; import psutil; import timeit >>> from datasets import load_dataset # Process.memory_info is expressed in bytes, so convert to megabytes >>> mem_before = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024) >>> wiki = load_dataset("wikipedia", "20220301.en", split="train") >>> mem_after = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024) >>> print(f"RAM memory used: {(mem_after - mem_before)} MB") RAM memory used: 50 MB ``` This is possible because the Arrow data is actually memory-mapped from disk, and not loaded in memory. Memory-mapping allows access to data on disk, and leverages virtual memory capabilities for fast lookups. ## Performance Iterating over a memory-mapped dataset using Arrow is fast. Iterating over Wikipedia on a laptop gives you speeds of 1-3 Gbit/s: ```python >>> s = """batch_size = 1000 ... for batch in wiki.iter(batch_size): ... ... ... """ >>> elapsed_time = timeit.timeit(stmt=s, number=1, globals=globals()) >>> print(f"Time to iterate over the {wiki.dataset_size >> 30} GB dataset: {elapsed_time:.1f} sec, " ... f"ie. {float(wiki.dataset_size >> 27)/elapsed_time:.1f} Gb/s") Time to iterate over the 18 GB dataset: 31.8 sec, ie. 4.8 Gb/s ```
datasets/docs/source/about_arrow.md/0
{ "file_path": "datasets/docs/source/about_arrow.md", "repo_id": "datasets", "token_count": 682 }
83
# Search index [FAISS](https://github.com/facebookresearch/faiss) and [Elasticsearch](https://www.elastic.co/elasticsearch/) enables searching for examples in a dataset. This can be useful when you want to retrieve specific examples from a dataset that are relevant to your NLP task. For example, if you are working on an Open Domain Question Answering task, you may want to only return examples that are relevant to answering your question. This guide will show you how to build an index for your dataset that will allow you to search it. ## FAISS FAISS retrieves documents based on the similarity of their vector representations. In this example, you will generate the vector representations with the [DPR](https://huggingface.co/transformers/model_doc/dpr.html) model. 1. Download the DPR model from 🤗 Transformers: ```py >>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer >>> import torch >>> torch.set_grad_enabled(False) >>> ctx_encoder = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base") >>> ctx_tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base") ``` 2. Load your dataset and compute the vector representations: ```py >>> from datasets import load_dataset >>> ds = load_dataset('crime_and_punish', split='train[:100]') >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': ctx_encoder(**ctx_tokenizer(example["line"], return_tensors="pt"))[0][0].numpy()}) ``` 3. Create the index with [`Dataset.add_faiss_index`]: ```py >>> ds_with_embeddings.add_faiss_index(column='embeddings') ``` 4. Now you can query your dataset with the `embeddings` index. Load the DPR Question Encoder, and search for a question with [`Dataset.get_nearest_examples`]: ```py >>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer >>> q_encoder = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base") >>> q_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base") >>> question = "Is it serious ?" >>> question_embedding = q_encoder(**q_tokenizer(question, return_tensors="pt"))[0][0].numpy() >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', question_embedding, k=10) >>> retrieved_examples["line"][0] '_that_ serious? It is not serious at all. It’s simply a fantasy to amuse\r\n' ``` 5. You can access the index with [`Dataset.get_index`] and use it for special operations, e.g. query it using `range_search`: ```py >>> faiss_index = ds_with_embeddings.get_index('embeddings').faiss_index >>> limits, distances, indices = faiss_index.range_search(x=question_embedding.reshape(1, -1), thresh=0.95) ``` 6. When you are done querying, save the index on disk with [`Dataset.save_faiss_index`]: ```py >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') ``` 7. Reload it at a later time with [`Dataset.load_faiss_index`]: ```py >>> ds = load_dataset('crime_and_punish', split='train[:100]') >>> ds.load_faiss_index('embeddings', 'my_index.faiss') ``` ## Elasticsearch Unlike FAISS, Elasticsearch retrieves documents based on exact matches. Start Elasticsearch on your machine, or see the [Elasticsearch installation guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html) if you don't already have it installed. 1. Load the dataset you want to index: ```py >>> from datasets import load_dataset >>> squad = load_dataset('squad', split='validation') ``` 2. Build the index with [`Dataset.add_elasticsearch_index`]: ```py >>> squad.add_elasticsearch_index("context", host="localhost", port="9200") ``` 3. Then you can query the `context` index with [`Dataset.get_nearest_examples`]: ```py >>> query = "machine" >>> scores, retrieved_examples = squad.get_nearest_examples("context", query, k=10) >>> retrieved_examples["title"][0] 'Computational_complexity_theory' ``` 4. If you want to reuse the index, define the `es_index_name` parameter when you build the index: ```py >>> from datasets import load_dataset >>> squad = load_dataset('squad', split='validation') >>> squad.add_elasticsearch_index("context", host="localhost", port="9200", es_index_name="hf_squad_val_context") >>> squad.get_index("context").es_index_name hf_squad_val_context ``` 5. Reload it later with the index name when you call [`Dataset.load_elasticsearch_index`]: ```py >>> from datasets import load_dataset >>> squad = load_dataset('squad', split='validation') >>> squad.load_elasticsearch_index("context", host="localhost", port="9200", es_index_name="hf_squad_val_context") >>> query = "machine" >>> scores, retrieved_examples = squad.get_nearest_examples("context", query, k=10) ``` For more advanced Elasticsearch usage, you can specify your own configuration with custom settings: ```py >>> import elasticsearch as es >>> import elasticsearch.helpers >>> from elasticsearch import Elasticsearch >>> es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) # default client >>> es_config = { ... "settings": { ... "number_of_shards": 1, ... "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, ... }, ... "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}}, ... } # default config >>> es_index_name = "hf_squad_context" # name of the index in Elasticsearch >>> squad.add_elasticsearch_index("context", es_client=es_client, es_config=es_config, es_index_name=es_index_name) ```
datasets/docs/source/faiss_es.mdx/0
{ "file_path": "datasets/docs/source/faiss_es.mdx", "repo_id": "datasets", "token_count": 1830 }
84
# Builder classes ## Builders 🤗 Datasets relies on two main classes during the dataset building process: [`DatasetBuilder`] and [`BuilderConfig`]. [[autodoc]] datasets.DatasetBuilder [[autodoc]] datasets.GeneratorBasedBuilder [[autodoc]] datasets.ArrowBasedBuilder [[autodoc]] datasets.BuilderConfig ## Download [[autodoc]] datasets.DownloadManager [[autodoc]] datasets.StreamingDownloadManager [[autodoc]] datasets.DownloadConfig [[autodoc]] datasets.DownloadMode ## Verification [[autodoc]] datasets.VerificationMode ## Splits [[autodoc]] datasets.SplitGenerator [[autodoc]] datasets.Split [[autodoc]] datasets.NamedSplit [[autodoc]] datasets.NamedSplitAll [[autodoc]] datasets.ReadInstruction ## Version [[autodoc]] datasets.utils.Version
datasets/docs/source/package_reference/builder_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/builder_classes.mdx", "repo_id": "datasets", "token_count": 240 }
85
# Use with JAX This document is a quick introduction to using `datasets` with JAX, with a particular focus on how to get `jax.Array` objects out of our datasets, and how to use them to train JAX models. <Tip> `jax` and `jaxlib` are required to reproduce to code above, so please make sure you install them as `pip install datasets[jax]`. </Tip> ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc., and string and binary objects are unchanged, since JAX only supports numbers. To get JAX arrays (numpy-like) instead, you can set the format of the dataset to `jax`: ```py >>> from datasets import Dataset >>> data = [[1, 2], [3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("jax") >>> ds[0] {'data': DeviceArray([1, 2], dtype=int32)} >>> ds[:2] {'data': DeviceArray([ [1, 2], [3, 4]], dtype=int32)} ``` <Tip> A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to JAX arrays. </Tip> Note that the exact same procedure applies to `DatasetDict` objects, so that when setting the format of a `DatasetDict` to `jax`, all the `Dataset`s there will be formatted as `jax`: ```py >>> from datasets import DatasetDict >>> data = {"train": {"data": [[1, 2], [3, 4]]}, "test": {"data": [[5, 6], [7, 8]]}} >>> dds = DatasetDict.from_dict(data) >>> dds = dds.with_format("jax") >>> dds["train"][:2] {'data': DeviceArray([ [1, 2], [3, 4]], dtype=int32)} ``` Another thing you'll need to take into consideration is that the formatting is not applied until you actually access the data. So if you want to get a JAX array out of a dataset, you'll need to access the data first, otherwise the format will remain the same. Finally, to load the data in the device of your choice, you can specify the `device` argument, but note that `jaxlib.xla_extension.Device` is not supported as it's not serializable with neither `pickle` not `dill`, so you'll need to use its string identifier instead: ```py >>> import jax >>> from datasets import Dataset >>> data = [[1, 2], [3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> device = str(jax.devices()[0]) # Not casting to `str` before passing it to `with_format` will raise a `ValueError` >>> ds = ds.with_format("jax", device=device) >>> ds[0] {'data': DeviceArray([1, 2], dtype=int32)} >>> ds[0]["data"].device() TFRT_CPU_0 >>> assert ds[0]["data"].device() == jax.devices()[0] True ``` Note that if the `device` argument is not provided to `with_format` then it will use the default device which is `jax.devices()[0]`. ### N-dimensional arrays If your dataset consists of N-dimensional arrays, you will see that by default they are considered as the same tensor if the shape is fixed: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3, 4]], [[5, 6],[7, 8]]] # fixed shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("jax") >>> ds[0] {'data': Array([[1, 2], [3, 4]], dtype=int32)} ``` ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3]], [[4, 5, 6],[7, 8]]] # varying shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("jax") >>> ds[0] {'data': [Array([1, 2], dtype=int32), Array([3], dtype=int32)]} ``` However this logic often requires slow shape comparisons and data copies. To avoid this, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: ```py >>> from datasets import Dataset, Features, Array2D >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] >>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) >>> ds = Dataset.from_dict({"data": data}, features=features) >>> ds = ds.with_format("torch") >>> ds[0] {'data': Array([[1, 2], [3, 4]], dtype=int32)} >>> ds[:2] {'data': Array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=int32)} ``` ### Other feature types [`ClassLabel`] data is properly converted to arrays: ```py >>> from datasets import Dataset, Features, ClassLabel >>> labels = [0, 0, 1] >>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) >>> ds = Dataset.from_dict({"label": labels}, features=features) >>> ds = ds.with_format("jax") >>> ds[:3] {'label': DeviceArray([0, 0, 1], dtype=int32)} ``` String and binary objects are unchanged, since JAX only supports numbers. The [`Image`] and [`Audio`] feature types are also supported. <Tip> To use the [`Image`] feature type, you'll need to install the `vision` extra as `pip install datasets[vision]`. </Tip> ```py >>> from datasets import Dataset, Features, Image >>> images = ["path/to/image.png"] * 10 >>> features = Features({"image": Image()}) >>> ds = Dataset.from_dict({"image": images}, features=features) >>> ds = ds.with_format("jax") >>> ds[0]["image"].shape (512, 512, 3) >>> ds[0] {'image': DeviceArray([[[ 255, 255, 255], [ 255, 255, 255], ..., [ 255, 255, 255], [ 255, 255, 255]]], dtype=uint8)} >>> ds[:2]["image"].shape (2, 512, 512, 3) >>> ds[:2] {'image': DeviceArray([[[[ 255, 255, 255], [ 255, 255, 255], ..., [ 255, 255, 255], [ 255, 255, 255]]]], dtype=uint8)} ``` <Tip> To use the [`Audio`] feature type, you'll need to install the `audio` extra as `pip install datasets[audio]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio >>> audio = ["path/to/audio.wav"] * 10 >>> features = Features({"audio": Audio()}) >>> ds = Dataset.from_dict({"audio": audio}, features=features) >>> ds = ds.with_format("jax") >>> ds[0]["audio"]["array"] DeviceArray([-0.059021 , -0.03894043, -0.00735474, ..., 0.0133667 , 0.01809692, 0.00268555], dtype=float32) >>> ds[0]["audio"]["sampling_rate"] DeviceArray(44100, dtype=int32, weak_type=True) ``` ## Data loading JAX doesn't have any built-in data loading capabilities, so you'll need to use a library such as [PyTorch](https://pytorch.org/) to load your data using a `DataLoader` or [TensorFlow](https://www.tensorflow.org/) using a `tf.data.Dataset`. Citing the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Neural_Network_and_Data_Loading.html#data-loading-with-pytorch) on this topic: "JAX is laser-focused on program transformations and accelerator-backed NumPy, so we don’t include data loading or munging in the JAX library. There are already a lot of great data loaders out there, so let’s just use them instead of reinventing anything. We’ll grab PyTorch’s data loader, and make a tiny shim to make it work with NumPy arrays.". So that's the reason why JAX-formatting in `datasets` is so useful, because it lets you use any model from the HuggingFace Hub with JAX, without having to worry about the data loading part. ### Using `with_format('jax')` The easiest way to get JAX arrays out of a dataset is to use the `with_format('jax')` method. Lets assume that we want to train a neural network on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) available at the HuggingFace Hub at https://huggingface.co/datasets/mnist. ```py >>> from datasets import load_dataset >>> ds = load_dataset("mnist") >>> ds = ds.with_format("jax") >>> ds["train"][0] {'image': DeviceArray([[ 0, 0, 0, ...], [ 0, 0, 0, ...], ..., [ 0, 0, 0, ...], [ 0, 0, 0, ...]], dtype=uint8), 'label': DeviceArray(5, dtype=int32)} ``` Once the format is set we can feed the dataset to the JAX model in batches using the `Dataset.iter()` method: ```py >>> for epoch in range(epochs): ... for batch in ds["train"].iter(batch_size=32): ... x, y = batch["image"], batch["label"] ... ... ```
datasets/docs/source/use_with_jax.mdx/0
{ "file_path": "datasets/docs/source/use_with_jax.mdx", "repo_id": "datasets", "token_count": 2969 }
86
from argparse import ArgumentParser from typing import Optional from datasets.commands import BaseDatasetsCLICommand from datasets.hub import convert_to_parquet def _command_factory(args): return ConvertToParquetCommand( args.dataset_id, args.token, args.revision, args.trust_remote_code, ) class ConvertToParquetCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser): parser: ArgumentParser = parser.add_parser("convert_to_parquet", help="Convert dataset to Parquet") parser.add_argument( "dataset_id", help="source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME" ) parser.add_argument("--token", help="access token to the Hugging Face Hub (defaults to logged-in user's one)") parser.add_argument("--revision", help="source revision") parser.add_argument( "--trust_remote_code", action="store_true", help="whether to trust the code execution of the load script" ) parser.set_defaults(func=_command_factory) def __init__( self, dataset_id: str, token: Optional[str], revision: Optional[str], trust_remote_code: bool, ): self._dataset_id = dataset_id self._token = token self._revision = revision self._trust_remote_code = trust_remote_code def run(self) -> None: _ = convert_to_parquet( self._dataset_id, revision=self._revision, token=self._token, trust_remote_code=self._trust_remote_code )
datasets/src/datasets/commands/convert_to_parquet.py/0
{ "file_path": "datasets/src/datasets/commands/convert_to_parquet.py", "repo_id": "datasets", "token_count": 652 }
87
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """This class handle features definition in datasets and some utilities to display table type.""" import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages logger = logging.get_logger(__name__) def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str: """ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` """ if pyarrow.types.is_null(arrow_type): return "null" elif pyarrow.types.is_boolean(arrow_type): return "bool" elif pyarrow.types.is_int8(arrow_type): return "int8" elif pyarrow.types.is_int16(arrow_type): return "int16" elif pyarrow.types.is_int32(arrow_type): return "int32" elif pyarrow.types.is_int64(arrow_type): return "int64" elif pyarrow.types.is_uint8(arrow_type): return "uint8" elif pyarrow.types.is_uint16(arrow_type): return "uint16" elif pyarrow.types.is_uint32(arrow_type): return "uint32" elif pyarrow.types.is_uint64(arrow_type): return "uint64" elif pyarrow.types.is_float16(arrow_type): return "float16" # pyarrow dtype is "halffloat" elif pyarrow.types.is_float32(arrow_type): return "float32" # pyarrow dtype is "float" elif pyarrow.types.is_float64(arrow_type): return "float64" # pyarrow dtype is "double" elif pyarrow.types.is_time32(arrow_type): return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_time64(arrow_type): return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_timestamp(arrow_type): if arrow_type.tz is None: return f"timestamp[{arrow_type.unit}]" elif arrow_type.tz: return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]" else: raise ValueError(f"Unexpected timestamp object {arrow_type}.") elif pyarrow.types.is_date32(arrow_type): return "date32" # pyarrow dtype is "date32[day]" elif pyarrow.types.is_date64(arrow_type): return "date64" # pyarrow dtype is "date64[ms]" elif pyarrow.types.is_duration(arrow_type): return f"duration[{arrow_type.unit}]" elif pyarrow.types.is_decimal128(arrow_type): return f"decimal128({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_decimal256(arrow_type): return f"decimal256({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_binary(arrow_type): return "binary" elif pyarrow.types.is_large_binary(arrow_type): return "large_binary" elif pyarrow.types.is_string(arrow_type): return "string" elif pyarrow.types.is_large_string(arrow_type): return "large_string" elif pyarrow.types.is_dictionary(arrow_type): return _arrow_to_datasets_dtype(arrow_type.value_type) else: raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.") def string_to_arrow(datasets_dtype: str) -> pa.DataType: """ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function. """ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None): msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type." if examples: examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0] msg += f"\nValid examples include: {examples}." if urls: urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0] msg += f"\nFor more insformation, see: {urls}." return msg if datasets_dtype in pa.__dict__: return pa.__dict__[datasets_dtype]() if (datasets_dtype + "_") in pa.__dict__: return pa.__dict__[datasets_dtype + "_"]() timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype) if timestamp_matches: timestamp_internals = timestamp_matches.group(1) internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals) if timestamp_internals in ["s", "ms", "us", "ns"]: return pa.timestamp(timestamp_internals) elif internals_matches: return pa.timestamp(internals_matches.group(1), internals_matches.group(2)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "timestamp", examples=["timestamp[us]", "timestamp[us, tz=America/New_York"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"], ) ) duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype) if duration_matches: duration_internals = duration_matches.group(1) if duration_internals in ["s", "ms", "us", "ns"]: return pa.duration(duration_internals) else: raise ValueError( _dtype_error_msg( datasets_dtype, "duration", examples=["duration[s]", "duration[us]"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"], ) ) time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype) if time_matches: time_internals_bits = time_matches.group(1) if time_internals_bits == "32": time_internals_unit = time_matches.group(2) if time_internals_unit in ["s", "ms"]: return pa.time32(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)." ) elif time_internals_bits == "64": time_internals_unit = time_matches.group(2) if time_internals_unit in ["us", "ns"]: return pa.time64(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)." ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "time", examples=["time32[s]", "time64[us]"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html", "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html", ], ) ) decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype) if decimal_matches: decimal_internals_bits = decimal_matches.group(1) if decimal_internals_bits == "128": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal128(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal128", examples=["decimal128(10, 2)", "decimal128(4, -2)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"], ) ) elif decimal_internals_bits == "256": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal256(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal256", examples=["decimal256(30, 2)", "decimal256(38, -4)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"], ) ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal", examples=["decimal128(12, 3)", "decimal256(40, 6)"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html", "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html", ], ) ) raise ValueError( f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. " f"Please make sure to use a correct data type, see: " f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions" ) def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]: """ Cast pytorch/tensorflow/pandas objects to python numpy array/lists. It works recursively. If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast. only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object has_changed (bool): True if the object has been changed, False if it is identical """ if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp if config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(obj, np.ndarray): if obj.ndim == 0: return obj[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj, False else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj ], True, ) elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor): if obj.dtype == torch.bfloat16: return _cast_to_python_objects( obj.detach().to(torch.float).cpu().numpy(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting, )[0], True if obj.ndim == 0: return obj.detach().cpu().numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.detach().cpu().numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.detach().cpu().numpy() ], True, ) elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor): if obj.ndim == 0: return obj.numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.numpy() ], True, ) elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray): if obj.ndim == 0: return np.asarray(obj)[()], True elif not only_1d_for_numpy or obj.ndim == 1: return np.asarray(obj), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in np.asarray(obj) ], True, ) elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image): return encode_pil_image(obj), True elif isinstance(obj, pd.Series): return ( _cast_to_python_objects( obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, pd.DataFrame): return ( { key: _cast_to_python_objects( value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for key, value in obj.to_dict("series").items() }, True, ) elif isinstance(obj, pd.Timestamp): return obj.to_pydatetime(), True elif isinstance(obj, pd.Timedelta): return obj.to_pytimedelta(), True elif isinstance(obj, Mapping): has_changed = not isinstance(obj, dict) output = {} for k, v in obj.items(): casted_v, has_changed_v = _cast_to_python_objects( v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) has_changed |= has_changed_v output[k] = casted_v return output if has_changed else obj, has_changed elif hasattr(obj, "__array__"): return ( _cast_to_python_objects( obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, (list, tuple)): if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt): break casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects( first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) if has_changed_first_elmt or not optimize_list_casting: return ( [ _cast_to_python_objects( elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for elmt in obj ], True, ) else: if isinstance(obj, (list, tuple)): return obj, False else: return list(obj), True else: return obj, False else: return obj, False def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """ return _cast_to_python_objects( obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] @dataclass class Value: """ Scalar feature value of a particular data type. The possible dtypes of `Value` are as follows: - `null` - `bool` - `int8` - `int16` - `int32` - `int64` - `uint8` - `uint16` - `uint32` - `uint64` - `float16` - `float32` (alias float) - `float64` (alias double) - `time32[(s|ms)]` - `time64[(us|ns)]` - `timestamp[(s|ms|us|ns)]` - `timestamp[(s|ms|us|ns), tz=(tzstring)]` - `date32` - `date64` - `duration[(s|ms|us|ns)]` - `decimal128(precision, scale)` - `decimal256(precision, scale)` - `binary` - `large_binary` - `string` - `large_string` Args: dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'stars': Value(dtype='int32')}) >>> features {'stars': Value(dtype='int32', id=None)} ``` """ dtype: str id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value class _ArrayXD: def __post_init__(self): self.shape = tuple(self.shape) def __call__(self): pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype) return pa_type def encode_example(self, value): return value @dataclass class Array2D(_ArrayXD): """Create a two-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array2D", init=False, repr=False) @dataclass class Array3D(_ArrayXD): """Create a three-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array3D", init=False, repr=False) @dataclass class Array4D(_ArrayXD): """Create a four-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array4D", init=False, repr=False) @dataclass class Array5D(_ArrayXD): """Create a five-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array5D", init=False, repr=False) class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) class Array2DExtensionType(_ArrayXDExtensionType): ndims = 2 class Array3DExtensionType(_ArrayXDExtensionType): ndims = 3 class Array4DExtensionType(_ArrayXDExtensionType): ndims = 4 class Array5DExtensionType(_ArrayXDExtensionType): ndims = 5 # Register the extension types for deserialization pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool: """ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22 """ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType: if pa.types.is_list(pa_type): return _unnest_pa_type(pa_type.value_type) return pa_type if unnest: pa_type = _unnest_pa_type(pa_type) return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type)) class ArrayExtensionArray(pa.ExtensionArray): def __array__(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) return self.to_numpy(zero_copy_only=zero_copy_only) def __getitem__(self, i): return self.storage[i] def to_numpy(self, zero_copy_only=True): storage: pa.ListArray = self.storage null_mask = storage.is_null().to_numpy(zero_copy_only=False) if self.type.shape[0] is not None: size = 1 null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask)) for i in range(self.type.ndims): size *= self.type.shape[i] storage = storage.flatten() numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only) numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape) if len(null_indices): numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0) else: shape = self.type.shape ndims = self.type.ndims arrays = [] first_dim_offsets = np.array([off.as_py() for off in storage.offsets]) for i, is_null in enumerate(null_mask): if is_null: arrays.append(np.nan) else: storage_el = storage[i : i + 1] first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i] # flatten storage for _ in range(ndims): storage_el = storage_el.flatten() numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only) arrays.append(numpy_arr.reshape(first_dim, *shape[1:])) if len(np.unique(np.diff(first_dim_offsets))) > 1: # ragged numpy_arr = np.empty(len(arrays), dtype=object) numpy_arr[:] = arrays else: numpy_arr = np.array(arrays) return numpy_arr def to_pylist(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only) if self.type.shape[0] is None and numpy_arr.dtype == object: return [arr.tolist() for arr in numpy_arr.tolist()] else: return numpy_arr.tolist() class PandasArrayExtensionDtype(PandasExtensionDtype): _metadata = "value_type" def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]): self._value_type = value_type def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): if isinstance(array, pa.ChunkedArray): array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) return PandasArrayExtensionArray(numpy_arr) @classmethod def construct_array_type(cls): return PandasArrayExtensionArray @property def type(self) -> type: return np.ndarray @property def kind(self) -> str: return "O" @property def name(self) -> str: return f"array[{self.value_type}]" @property def value_type(self) -> np.dtype: return self._value_type class PandasArrayExtensionArray(PandasExtensionArray): def __init__(self, data: np.ndarray, copy: bool = False): self._data = data if not copy else np.array(data) self._dtype = PandasArrayExtensionDtype(data.dtype) def __array__(self, dtype=None): """ Convert to NumPy Array. Note that Pandas expects a 1D array when dtype is set to object. But for other dtypes, the returned shape is the same as the one of ``data``. More info about pandas 1D requirement for PandasExtensionArray here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray """ if dtype == np.dtype(object): out = np.empty(len(self._data), dtype=object) for i in range(len(self._data)): out[i] = self._data[i] return out if dtype is None: return self._data else: return self._data.astype(dtype) def copy(self, deep: bool = False) -> "PandasArrayExtensionArray": return PandasArrayExtensionArray(self._data, copy=True) @classmethod def _from_sequence( cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False ) -> "PandasArrayExtensionArray": if len(scalars) > 1 and all( isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars ): data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy) else: data = np.empty(len(scalars), dtype=object) data[:] = scalars return cls(data, copy=copy) @classmethod def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray": if len(to_concat) > 1 and all( va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype for va in to_concat ): data = np.vstack([va._data for va in to_concat]) else: data = np.empty(len(to_concat), dtype=object) data[:] = [va._data for va in to_concat] return cls(data, copy=False) @property def dtype(self) -> PandasArrayExtensionDtype: return self._dtype @property def nbytes(self) -> int: return self._data.nbytes def isna(self) -> np.ndarray: return np.array([pd.isna(arr).any() for arr in self._data]) def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: raise NotImplementedError() def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]: if isinstance(item, int): return self._data[item] return PandasArrayExtensionArray(self._data[item], copy=False) def take( self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None ) -> "PandasArrayExtensionArray": indices: np.ndarray = np.asarray(indices, dtype=int) if allow_fill: fill_value = ( self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type) ) mask = indices == -1 if (indices < -1).any(): raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True") elif len(self) > 0: pass elif not np.all(mask): raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.") else: data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type) return PandasArrayExtensionArray(data, copy=False) took = self._data.take(indices, axis=0) if allow_fill and mask.any(): took[mask] = [fill_value] * np.sum(mask) return PandasArrayExtensionArray(took, copy=False) def __len__(self) -> int: return len(self._data) def __eq__(self, other) -> np.ndarray: if not isinstance(other, PandasArrayExtensionArray): raise NotImplementedError(f"Invalid type to compare to: {type(other)}") return (self._data == other._data).all() def pandas_types_mapper(dtype): if isinstance(dtype, _ArrayXDExtensionType): return PandasArrayExtensionDtype(dtype.value_type) @dataclass class ClassLabel: """Feature type for integer class labels. There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments: * `num_classes`: Create 0 to (num_classes-1) labels. * `names`: List of label strings. * `names_file`: File containing the list of labels. Under the hood the labels are stored as integers. You can use negative integers to represent unknown/missing labels. Args: num_classes (`int`, *optional*): Number of classes. All labels must be < `num_classes`. names (`list` of `str`, *optional*): String names for the integer classes. The order in which the names are provided is kept. names_file (`str`, *optional*): Path to a file with names for the integer classes, one per line. Example: ```py >>> from datasets import Features >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])}) >>> features {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)} ``` """ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict names: List[str] = None names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "int64" pa_type: ClassVar[Any] = pa.int64() _str2int: ClassVar[Dict[str, int]] = None _int2str: ClassVar[Dict[int, int]] = None _type: str = field(default="ClassLabel", init=False, repr=False) def __post_init__(self, num_classes, names_file): self.num_classes = num_classes self.names_file = names_file if self.names_file is not None and self.names is not None: raise ValueError("Please provide either names or names_file but not both.") # Set self.names if self.names is None: if self.names_file is not None: self.names = self._load_names_from_file(self.names_file) elif self.num_classes is not None: self.names = [str(i) for i in range(self.num_classes)] else: raise ValueError("Please provide either num_classes, names or names_file.") elif not isinstance(self.names, SequenceABC): raise TypeError(f"Please provide names as a list, is {type(self.names)}") # Set self.num_classes if self.num_classes is None: self.num_classes = len(self.names) elif self.num_classes != len(self.names): raise ValueError( "ClassLabel number of names do not match the defined num_classes. " f"Got {len(self.names)} names VS {self.num_classes} num_classes" ) # Prepare mappings self._int2str = [str(name) for name in self.names] self._str2int = {name: i for i, name in enumerate(self._int2str)} if len(self._int2str) != len(self._str2int): raise ValueError("Some label names are duplicated. Each label name should be unique.") def __call__(self): return self.pa_type def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: """Conversion class name `string` => `integer`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].str2int('neg') 0 ``` """ if not isinstance(values, str) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, str): values = [values] return_list = False output = [self._strval2int(value) for value in values] return output if return_list else output[0] def _strval2int(self, value: str) -> int: failed_parse = False value = str(value) # first attempt - raw string value int_value = self._str2int.get(value) if int_value is None: # second attempt - strip whitespace int_value = self._str2int.get(value.strip()) if int_value is None: # third attempt - convert str to int try: int_value = int(value) except ValueError: failed_parse = True else: if int_value < -1 or int_value >= self.num_classes: failed_parse = True if failed_parse: raise ValueError(f"Invalid string class label {value}") return int_value def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0] def encode_example(self, example_data): if self.num_classes is None: raise ValueError( "Trying to use ClassLabel feature with undefined number of class. " "Please set ClassLabel.names or num_classes." ) # If a string is given, convert to associated integer if isinstance(example_data, str): example_data = self.str2int(example_data) # Allowing -1 to mean no label. if not -1 <= example_data < self.num_classes: raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}") return example_data def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: """Cast an Arrow array to the `ClassLabel` arrow storage type. The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are: - `pa.string()` - `pa.int()` Args: storage (`Union[pa.StringArray, pa.IntegerArray]`): PyArrow array to cast. Returns: `pa.Int64Array`: Array in the `ClassLabel` arrow storage type. """ if isinstance(storage, pa.IntegerArray) and len(storage) > 0: min_max = pc.min_max(storage).as_py() if min_max["max"] is not None and min_max["max"] >= self.num_classes: raise ValueError( f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}" ) elif isinstance(storage, pa.StringArray): storage = pa.array( [self._strval2int(label) if label is not None else None for label in storage.to_pylist()] ) return array_cast(storage, self.pa_type) @staticmethod def _load_names_from_file(names_filepath): with open(names_filepath, encoding="utf-8") as f: return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names @dataclass class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature ([`FeatureType`]): A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) @dataclass class LargeList: """Feature type for large list data composed of child feature data type. It is backed by `pyarrow.LargeListType`, which is like `pyarrow.ListType` but with 64-bit rather than 32-bit offsets. Args: feature ([`FeatureType`]): Child feature data type of each item within the large list. """ feature: Any id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="LargeList", init=False, repr=False) FeatureType = Union[ dict, list, tuple, Value, ClassLabel, Translation, TranslationVariableLanguages, LargeList, Sequence, Array2D, Array3D, Array4D, Array5D, Audio, Image, ] def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool: """ Check if the object is not None. If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence. """ if obj is None: return False elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, LargeList, Sequence))): if len(obj) > 0: if schema is None: pass elif isinstance(schema, (list, tuple)): schema = schema[0] else: schema = schema.feature return _check_non_null_non_empty_recursive(obj[0], schema) else: return False else: return True def get_nested_type(schema: FeatureType) -> pa.DataType: """ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of generate_from_arrow_type(). It performs double-duty as the implementation of Features.type and handles the conversion of datasets.Feature->pa.struct """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, Features): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # Features is subclass of dict, and dict order is deterministic since Python 3.6 elif isinstance(schema, dict): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # however don't sort on struct types since the order matters elif isinstance(schema, (list, tuple)): if len(schema) != 1: raise ValueError("When defining list feature, you should just provide one example of the inner type") value_type = get_nested_type(schema[0]) return pa.list_(value_type) elif isinstance(schema, LargeList): value_type = get_nested_type(schema.feature) return pa.large_list(value_type) elif isinstance(schema, Sequence): value_type = get_nested_type(schema.feature) # We allow to reverse list of dict => dict of list for compatibility with tfds if isinstance(schema.feature, dict): data_type = pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type}) else: data_type = pa.list_(value_type, schema.length) return data_type # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods) return schema() def encode_nested_example(schema, obj, level=0): """Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): if level == 0 and obj is None: raise ValueError("Got None but expected a dictionary instead") return ( {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None elif isinstance(obj, np.ndarray): return encode_nested_example(schema, obj.tolist()) else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] return list(obj) elif isinstance(schema, LargeList): if obj is None: return None else: if len(obj) > 0: sub_schema = schema.feature for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] return list(obj) elif isinstance(schema, Sequence): if obj is None: return None # We allow to reverse list of dict => dict of list for compatibility with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k in schema.feature: list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj] return list_dict else: # obj is a single dict for k in schema.feature: list_dict[k] = ( [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]] if k in obj else None ) return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f"Got a string but expected a list instead: '{obj}'") else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if ( not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt ): return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return ( {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, LargeList): if obj is None: return None else: sub_schema = schema.feature if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatibility with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): # we pass the token to read and decode files from private repositories in streaming mode if obj is not None and schema.decode: return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) return obj _FEATURE_TYPES: Dict[str, FeatureType] = { Value.__name__: Value, ClassLabel.__name__: ClassLabel, Translation.__name__: Translation, TranslationVariableLanguages.__name__: TranslationVariableLanguages, LargeList.__name__: LargeList, Sequence.__name__: Sequence, Array2D.__name__: Array2D, Array3D.__name__: Array3D, Array4D.__name__: Array4D, Array5D.__name__: Array5D, Audio.__name__: Audio, Image.__name__: Image, } @experimental def register_feature( feature_cls: type, feature_type: str, ): """ Register a Feature object using a name and class. This function must be used on a Feature class. """ if feature_type in _FEATURE_TYPES: logger.warning( f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})" ) _FEATURE_TYPES[feature_type] = feature_cls def generate_from_dict(obj: Any): """Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(obj, list): return [generate_from_dict(value) for value in obj] # Otherwise we have a dict or a dataclass if "_type" not in obj or isinstance(obj["_type"], dict): return {key: generate_from_dict(value) for key, value in obj.items()} obj = dict(obj) _type = obj.pop("_type") class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None) if class_type is None: raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}") if class_type == LargeList: feature = obj.pop("feature") return LargeList(feature=generate_from_dict(feature), **obj) if class_type == Sequence: feature = obj.pop("feature") return Sequence(feature=generate_from_dict(feature), **obj) field_names = {f.name for f in fields(class_type)} return class_type(**{k: v for k, v in obj.items() if k in field_names}) def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType: """ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema """ if isinstance(pa_type, pa.StructType): return {field.name: generate_from_arrow_type(field.type) for field in pa_type} elif isinstance(pa_type, pa.FixedSizeListType): return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size) elif isinstance(pa_type, pa.ListType): feature = generate_from_arrow_type(pa_type.value_type) if isinstance(feature, (dict, tuple, list)): return [feature] return Sequence(feature=feature) elif isinstance(pa_type, pa.LargeListType): feature = generate_from_arrow_type(pa_type.value_type) return LargeList(feature=feature) elif isinstance(pa_type, _ArrayXDExtensionType): array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims] return array_feature(shape=pa_type.shape, dtype=pa_type.value_type) elif isinstance(pa_type, pa.DataType): return Value(dtype=_arrow_to_datasets_dtype(pa_type)) else: raise ValueError(f"Cannot convert {pa_type} to a Feature type.") def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a multidimensional NumPy array""" arr = np.array(arr) values = pa.array(arr.flatten(), type=type) for i in range(arr.ndim - 1): n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1) step_offsets = arr.shape[arr.ndim - i - 1] offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) values = pa.ListArray.from_arrays(offsets, values) return values def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray: null_mask = np.array([arr is None for arr in l_arr]) null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask)) l_arr = [arr for arr in l_arr if arr is not None] offsets = np.cumsum( [0] + [len(arr) for arr in l_arr], dtype=object ) # convert to dtype object to allow None insertion offsets = np.insert(offsets, null_indices, None) offsets = pa.array(offsets, type=pa.int32()) values = pa.concat_arrays(l_arr) return pa.ListArray.from_arrays(offsets, values) def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a possibly nested list of NumPy arrays""" if len(l_arr) > 0: return list_of_pa_arrays_to_pyarrow_listarray( [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr] ) else: return pa.array([], type=type) def contains_any_np_array(data: Any): """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray. Args: data (Any): Data. Returns: bool """ if isinstance(data, np.ndarray): return True elif isinstance(data, list): return contains_any_np_array(first_non_null_value(data)[1]) else: return False def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray: """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray. Args: data (Union[np.ndarray, List]): Data. type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type. Returns: pa.ListArray """ if isinstance(data, np.ndarray): return numpy_to_pyarrow_listarray(data, type=type) elif isinstance(data, list): return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data]) def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array: """Convert to PyArrow ListArray. Args: data (Any): Sequence, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array """ if contains_any_np_array(data): return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type) else: return pa.array(data, pa_type.storage_dtype) def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType: """Visit a (possibly nested) feature. Args: feature (FeatureType): the feature type to be checked Returns: visited feature (FeatureType) """ if isinstance(feature, dict): out = func({k: _visit(f, func) for k, f in feature.items()}) elif isinstance(feature, (list, tuple)): out = func([_visit(feature[0], func)]) elif isinstance(feature, LargeList): out = func(LargeList(_visit(feature.feature, func))) elif isinstance(feature, Sequence): out = func(Sequence(_visit(feature.feature, func), length=feature.length)) else: out = func(feature) return feature if out is None else out def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool: """Check if a (possibly nested) feature requires decoding. Args: feature (FeatureType): the feature type to be checked ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value of the `decode` attribute of the decodable feature types. Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_decoding(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_decoding(feature[0]) elif isinstance(feature, LargeList): return require_decoding(feature.feature) elif isinstance(feature, Sequence): return require_decoding(feature.feature) else: return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True) def require_storage_cast(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires storage casting. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, LargeList): return require_storage_cast(feature.feature) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "cast_storage") def require_storage_embed(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires embedding data into storage. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, LargeList): return require_storage_cast(feature.feature) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "embed_storage") def keep_features_dicts_synced(func): """ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary. """ @wraps(func) def wrapper(*args, **kwargs): if args: self: "Features" = args[0] args = args[1:] else: self: "Features" = kwargs.pop("self") out = func(self, *args, **kwargs) assert hasattr(self, "_column_requires_decoding") self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()} return out wrapper._decorator_name_ = "_keep_dicts_synced" return wrapper class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - [`Value`] feature specifies a single data type value, e.g. `int64` or `string`. - [`ClassLabel`] feature specifies a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - Python `dict` specifies a composite feature containing a mapping of sub-fields to sub-features. It's possible to have nested fields of nested fields in an arbitrary manner. - Python `list`, [`LargeList`] or [`Sequence`] specifies a composite feature containing a sequence of sub-features, all of the same feature type. <Tip> A [`Sequence`] with an internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatibility layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a Python `list` or a [`LargeList`] instead of the [`Sequence`]. </Tip> - [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`Translation`] or [`TranslationVariableLanguages`] feature specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) @property def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) @property def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) @classmethod def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Also, pa.dictionary is not supported and it uses its underlying type instead. Therefore datasets convert DictionaryArray objects to their actual values. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata metadata_features = Features() if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: metadata_features = Features.from_dict(metadata["info"]["features"]) metadata_features_schema = metadata_features.arrow_schema obj = { field.name: ( metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) ) for field in pa_schema } return cls(**obj) @classmethod def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") for list_type in ["large_list", "list", "sequence"]: # # list_type: -> list_type: int32 # dtype: int32 -> # if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["dtype"]: feature[list_type] = feature[list_type]["dtype"] # # list_type: -> list_type: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["struct"]: feature[list_type] = feature[list_type]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "LargeList": _feature = obj.pop("feature") return simplify({"large_list": to_yaml_inner(_feature), **obj}) elif _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) elif isinstance(obj, tuple): return to_yaml_inner(list(obj)) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") def to_yaml_types(obj: dict) -> dict: if isinstance(obj, dict): return {k: to_yaml_types(v) for k, v in obj.items()} elif isinstance(obj, list): return [to_yaml_types(v) for v in obj] elif isinstance(obj, tuple): return to_yaml_types(list(obj)) else: return obj return to_yaml_types(to_yaml_inner(yaml_data)["struct"]) @classmethod def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") for list_type in ["large_list", "list", "sequence"]: # # list_type: int32 -> list_type: # -> dtype: int32 # if isinstance(feature.get(list_type), str): feature[list_type] = {"dtype": feature[list_type]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "large_list": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "LargeList"} if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # e.g. Audio, Image, ArrayXD return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj, level=1) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have two features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but makes the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): sequence_kwargs = vars(source).copy() source = sequence_kwargs.pop("feature") if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, **sequence_kwargs) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], **sequence_kwargs) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] elif isinstance(source, LargeList): if not isinstance(target, LargeList): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) return LargeList(recursive_reorder(source.feature, target.feature, stack)) else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self def _align_features(features_list: List[Features]) -> List[Features]: """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.""" name2feature = {} for features in features_list: for k, v in features.items(): if k in name2feature and isinstance(v, dict): # Recursively align features. name2feature[k] = _align_features([name2feature[k], v])[0] elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] def _check_if_features_can_be_aligned(features_list: List[Features]): """Check if the dictionaries of features can be aligned. Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`. """ name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v for features in features_list: for k, v in features.items(): if isinstance(v, dict) and isinstance(name2feature[k], dict): # Deep checks for structure. _check_if_features_can_be_aligned([name2feature[k], v]) elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v: raise ValueError( f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").' )
datasets/src/datasets/features/features.py/0
{ "file_path": "datasets/src/datasets/features/features.py", "repo_id": "datasets", "token_count": 41338 }
88
import copy import os from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Any, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from .utils.logging import get_logger if TYPE_CHECKING: from .features.features import Features, FeatureType logger = get_logger(__name__) def inject_arrow_table_documentation(arrow_table_method): def wrapper(fn): fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "") fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table") if hasattr(arrow_table_method, "__annotations__"): fn.__annotations__ = arrow_table_method.__annotations__ return fn return wrapper def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: in_memory_stream = pa.input_stream(filename) opened_stream = pa.ipc.open_stream(in_memory_stream) pa_table = opened_stream.read_all() return pa_table def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: stream = pa.BufferReader(buffer) opened_stream = pa.ipc.open_stream(stream) table = opened_stream.read_all() return table def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader: memory_mapped_stream = pa.memory_map(filename) return pa.ipc.open_stream(memory_mapped_stream) def read_schema_from_file(filename: str) -> pa.Schema: """ Infer arrow table schema from file without loading whole file into memory. Usefull especially while having very big files. """ with pa.memory_map(filename) as memory_mapped_stream: schema = pa.ipc.open_stream(memory_mapped_stream).schema return schema def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: opened_stream = _memory_mapped_record_batch_reader_from_file(filename) pa_table = opened_stream.read_all() return pa_table def _deepcopy(x, memo: dict): """deepcopy a regular class instance""" cls = x.__class__ result = cls.__new__(cls) memo[id(x)] = result for k, v in x.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) return result def _interpolation_search(arr: List[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """ i, j = 0, len(arr) - 1 while i < j and arr[i] <= x < arr[j]: k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) if arr[k] <= x < arr[k + 1]: return k elif arr[k] < x: i, j = k + 1, j else: i, j = i, k raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") class IndexedTableMixin: def __init__(self, table: pa.Table): self._schema: pa.Schema = table.schema self._batches: List[pa.RecordBatch] = [ recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0 ] self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64) def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table: """ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute the binary searches in parallel, highly optimized C """ if not len(indices): raise ValueError("Indices must be non-empty") batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1 return pa.Table.from_batches( [ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1) for batch_idx, i in zip(batch_indices, indices) ], schema=self._schema, ) def fast_slice(self, offset=0, length=None) -> pa.Table: """ Slice the Table using interpolation search. The behavior is the same as `pyarrow.Table.slice` but it's significantly faster. Interpolation search is used to find the start and end indexes of the batches we want to keep. The batches to keep are then concatenated to form the sliced Table. """ if offset < 0: raise IndexError("Offset must be non-negative") elif offset >= self._offsets[-1] or (length is not None and length <= 0): return pa.Table.from_batches([], schema=self._schema) i = _interpolation_search(self._offsets, offset) if length is None or length + offset >= self._offsets[-1]: batches = self._batches[i:] batches[0] = batches[0].slice(offset - self._offsets[i]) else: j = _interpolation_search(self._offsets, offset + length - 1) batches = self._batches[i : j + 1] batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) batches[0] = batches[0].slice(offset - self._offsets[i]) return pa.Table.from_batches(batches, schema=self._schema) class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) @property def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema @property def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns @property def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns @property def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows @property def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape @property def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes @property def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() class TableBlock(Table): """ `TableBlock` is the allowed class inside a `ConcanetationTable`. Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`. This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`. """ pass class InMemoryTable(TableBlock): """ The table is said in-memory when it is loaded into the user's RAM. Pickling it does copy all the data using memory. Its implementation is simple and uses the underlying pyarrow Table methods directly. This is different from the `MemoryMapped` table, for which pickling doesn't copy all the data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ @classmethod def from_file(cls, filename: str): table = _in_memory_arrow_table_from_file(filename) return cls(table) @classmethod def from_buffer(cls, buffer: pa.Buffer): table = _in_memory_arrow_table_from_buffer(buffer) return cls(table) @classmethod def from_pandas(cls, *args, **kwargs): """ Convert pandas.DataFrame to an Arrow Table. The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to null. This behavior can be avoided by constructing an explicit schema and passing it to this function. Args: df (`pandas.DataFrame`): schema (`pyarrow.Schema`, *optional*): The expected schema of the Arrow Table. This can be used to indicate the type of columns if we cannot infer it automatically. If passed, the output will have exactly this schema. Columns specified in the schema that are not found in the DataFrame columns or its index will raise an error. Additional columns or index levels in the DataFrame which are not specified in the schema will be ignored. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting `Table`. The default of None will store the index as a column, except for RangeIndex which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. nthreads (`int`, defaults to `None` (may use up to system CPU count threads)) If greater than 1, convert columns to Arrow in parallel using indicated number of threads. columns (`List[str]`, *optional*): List of column to be converted. If `None`, use all columns. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions, Returns: `datasets.table.Table`: Examples: ```python >>> import pandas as pd >>> import pyarrow as pa >>> df = pd.DataFrame({ ... 'int': [1, 2], ... 'str': ['a', 'b'] ... }) >>> pa.Table.from_pandas(df) <pyarrow.lib.Table object at 0x7f05d1fb1b40> ``` """ return cls(pa.Table.from_pandas(*args, **kwargs)) @classmethod def from_arrays(cls, *args, **kwargs): """ Construct a Table from Arrow arrays. Args: arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`): Equal-length arrays that should form the table. names (`List[str]`, *optional*): Names for the table columns. If not passed, schema must be passed. schema (`Schema`, defaults to `None`): Schema for the created table. If not passed, names must be passed. metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_arrays(*args, **kwargs)) @classmethod def from_pydict(cls, *args, **kwargs): """ Construct a Table from Arrow arrays or columns. Args: mapping (`Union[dict, Mapping]`): A mapping of strings to Arrays or Python lists. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pydict(*args, **kwargs)) @classmethod def from_pylist(cls, mapping, *args, **kwargs): """ Construct a Table from list of rows / dictionaries. Args: mapping (`List[dict]`): A mapping of strings to row values. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pylist(mapping, *args, **kwargs)) @classmethod def from_batches(cls, *args, **kwargs): """ Construct a Table from a sequence or iterator of Arrow `RecordBatches`. Args: batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`): Sequence of `RecordBatch` to be converted, all schemas must be equal. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the first `RecordBatch`. Returns: `datasets.table.Table`: """ return cls(pa.Table.from_batches(*args, **kwargs)) def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ # Use fast slicing here return InMemoryTable(self.fast_slice(offset=offset, length=length)) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ return InMemoryTable(self.table.filter(*args, **kwargs)) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(table_flatten(self.table, *args, **kwargs)) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ return InMemoryTable(table_cast(self.table, *args, **kwargs)) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.add_column(*args, **kwargs)) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.append_column(*args, **kwargs)) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ return InMemoryTable(self.table.remove_column(*args, **kwargs)) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ return InMemoryTable(self.table.set_column(*args, **kwargs)) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ return InMemoryTable(self.table.rename_columns(*args, **kwargs)) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ return InMemoryTable(self.table.drop(*args, **kwargs)) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ return InMemoryTable(self.table.select(*args, **kwargs)) # The MemoryMappedTable needs replays to properly reload tables from the disk Replay = Tuple[str, tuple, dict] class MemoryMappedTable(TableBlock): """ The table is said memory mapped when it doesn't use the user's RAM but loads the data from the disk instead. Pickling it doesn't copy the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replay" when reloading the table from the disk. Its implementation requires to store an history of all the transforms that were applied to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table from the disk. This is different from the `InMemoryTable` table, for which pickling does copy all the data in memory. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None): super().__init__(table) self.path = os.path.abspath(path) self.replays: List[Replay] = replays if replays is not None else [] @classmethod def from_file(cls, filename: str, replays=None): table = _memory_mapped_arrow_table_from_file(filename) table = cls._apply_replays(table, replays) return cls(table, filename, replays) def __getstate__(self): return {"path": self.path, "replays": self.replays} def __setstate__(self, state): path = state["path"] replays = state["replays"] table = _memory_mapped_arrow_table_from_file(path) table = self._apply_replays(table, replays) MemoryMappedTable.__init__(self, table, path=path, replays=replays) @staticmethod def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table: if replays is not None: for name, args, kwargs in replays: if name == "cast": table = table_cast(table, *args, **kwargs) elif name == "flatten": table = table_flatten(table, *args, **kwargs) else: table = getattr(table, name)(*args, **kwargs) return table def _append_replay(self, replay: Replay) -> List[Replay]: replays = copy.deepcopy(self.replays) replays.append(replay) return replays def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ replay = ("slice", (offset, length), {}) replays = self._append_replay(replay) # Use fast slicing here return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the ChunkedArray of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) def cast(self, *args, **kwargs): """ Cast table values to another schema Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata. Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays) # A ConcatenationTable is the concatenation of several tables. # The ``blocks`` attributes stores a list of list of blocks. # The first axis concatenates the tables along the axis 0 (it appends rows), # while the second axis concatenates tables along the axis 1 (it appends columns). TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]]) class ConcatenationTable(Table): """ The table comes from the concatenation of several tables called blocks. It enables concatenation on both axis 0 (append rows) and axis 1 (append columns). The underlying tables are called "blocks" and can be either `InMemoryTable` or `MemoryMappedTable` objects. This allows to combine tables that come from memory or that are memory mapped. When a `ConcatenationTable` is pickled, then each block is pickled: - the `InMemoryTable` objects are pickled by copying all the data in memory. - the MemoryMappedTable objects are pickled without copying the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replays" when reloading the table from the disk. Its implementation requires to store each block separately. The `blocks` attributes stores a list of list of blocks. The first axis concatenates the tables along the axis 0 (it appends rows), while the second axis concatenates tables along the axis 1 (it appends columns). If some columns are missing when concatenating on axis 0, they are filled with null values. This is done using `pyarrow.concat_tables(tables, promote=True)`. You can access the fully combined table by accessing the `ConcatenationTable.table` attribute, and the blocks by accessing the `ConcatenationTable.blocks` attribute. """ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]): super().__init__(table) self.blocks = blocks # Check that all the blocks have the right type. # Only InMemoryTable and MemoryMappedTable are allowed. for subtables in blocks: for subtable in subtables: if not isinstance(subtable, TableBlock): raise TypeError( "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects" f", but got {_short_str(subtable)}." ) def __getstate__(self): return {"blocks": self.blocks, "schema": self.table.schema} def __setstate__(self, state): blocks = state["blocks"] schema = state["schema"] table = self._concat_blocks_horizontally_and_vertically(blocks) if schema is not None and table.schema != schema: # We fix the columns by concatenating with an empty table with the right columns empty_table = pa.Table.from_batches([], schema=schema) # We set promote_options="default" to fill missing columns with null values table = pa.concat_tables([table, empty_table], promote_options="default") ConcatenationTable.__init__(self, table, blocks=blocks) @staticmethod def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table: pa_tables = [table.table if hasattr(table, "table") else table for table in blocks] if axis == 0: # We set promote_options="default" to fill missing columns with null values return pa.concat_tables(pa_tables, promote_options="default") elif axis == 1: for i, table in enumerate(pa_tables): if i == 0: pa_table = table else: for name, col in zip(table.column_names, table.columns): pa_table = pa_table.append_column(name, col) return pa_table else: raise ValueError("'axis' must be either 0 or 1") @classmethod def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table: pa_tables_to_concat_vertically = [] for i, tables in enumerate(blocks): if not tables: continue pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) @classmethod def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer: if axis is not None: merged_blocks = [] for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): if is_in_memory: block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] merged_blocks += list(block_group) else: # both merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] if all(len(row_block) == 1 for row_block in merged_blocks): merged_blocks = cls._merge_blocks( [block for row_block in merged_blocks for block in row_block], axis=0 ) return merged_blocks @classmethod def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: if isinstance(blocks, TableBlock): return blocks elif isinstance(blocks[0], TableBlock): return cls._merge_blocks(blocks, axis=0) else: return cls._merge_blocks(blocks) @classmethod def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable": blocks = cls._consolidate_blocks(blocks) if isinstance(blocks, TableBlock): table = blocks return cls(table.table, [[table]]) elif isinstance(blocks[0], TableBlock): table = cls._concat_blocks(blocks, axis=0) blocks = [[t] for t in blocks] return cls(table, blocks) else: table = cls._concat_blocks_horizontally_and_vertically(blocks) return cls(table, blocks) @classmethod def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable": """Create `ConcatenationTable` from list of tables. Args: tables (list of `Table` or list of `pyarrow.Table`): List of tables. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> """ def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]: if isinstance(table, pa.Table): return [[InMemoryTable(table)]] elif isinstance(table, ConcatenationTable): return copy.deepcopy(table.blocks) else: return [[table]] def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]: sliced = [table.slice(0, length) for table in row_block] remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block] return sliced, remainder def _split_both_like( result: List[List[TableBlock]], blocks: List[List[TableBlock]] ) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]: """ Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1. To do so, we modify both blocks sets to have the same row_blocks boundaries. For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows, we modify both to have 4 row_blocks of size 2, 1, 1 and 2: [ x x x | x x x ] + [ y y | y y | y y ] ----------------------------- = [ x x | x | x | x x ] [ y y | y | y | y y ] """ result, blocks = list(result), list(blocks) new_result, new_blocks = [], [] while result and blocks: # we slice the longest row block to save two row blocks of same length # and we replace the long row block by its remainder if necessary if len(result[0][0]) > len(blocks[0][0]): new_blocks.append(blocks[0]) sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0])) new_result.append(sliced) elif len(result[0][0]) < len(blocks[0][0]): new_result.append(result[0]) sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0])) new_blocks.append(sliced) else: new_result.append(result.pop(0)) new_blocks.append(blocks.pop(0)) if result or blocks: raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows") return new_result, new_blocks def _extend_blocks( result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0 ) -> List[List[TableBlock]]: if axis == 0: result.extend(blocks) elif axis == 1: # We make sure each row_block have the same num_rows result, blocks = _split_both_like(result, blocks) for i, row_block in enumerate(blocks): result[i].extend(row_block) return result blocks = to_blocks(tables[0]) for table in tables[1:]: table_blocks = to_blocks(table) blocks = _extend_blocks(blocks, table_blocks, axis=axis) return cls.from_blocks(blocks) @property def _slices(self): offset = 0 for tables in self.blocks: length = len(tables[0]) yield (offset, length) offset += length def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ table = self.table.slice(offset, length=length) length = length if length is not None else self.num_rows - offset blocks = [] for tables in self.blocks: n_rows = len(tables[0]) if length == 0: break elif n_rows <= offset: offset = offset - n_rows elif n_rows <= offset + length: blocks.append([t.slice(offset) for t in tables]) length, offset = length + offset - n_rows, 0 else: blocks.append([t.slice(offset, length) for t in tables]) length, offset = 0, 0 return ConcatenationTable(table, blocks) def filter(self, mask, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ table = self.table.filter(mask, *args, **kwargs) blocks = [] for (offset, length), tables in zip(self._slices, self.blocks): submask = mask.slice(offset, length) blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = table_flatten(self.table, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.flatten(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = self.table.combine_chunks(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def cast(self, target_schema, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ from .features import Features table = table_cast(self.table, target_schema, *args, **kwargs) target_features = Features.from_arrow_schema(target_schema) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields}) subschema = subfeatures.arrow_schema new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ table = self.table.replace_schema_metadata(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) return ConcatenationTable(table, self.blocks) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, i, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, names, *args, **kwargs): """ Create new table with columns renamed to provided names. """ table = self.table.rename_columns(names, *args, **kwargs) names = dict(zip(self.table.column_names, names)) blocks = [] for tables in self.blocks: blocks.append( [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables] ) return ConcatenationTable(table, blocks) def drop(self, columns, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ table = self.table.drop(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def select(self, columns, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ table = self.table.select(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def concat_tables(tables: List[Table], axis: int = 0) -> Table: """ Concatenate tables. Args: tables (list of `Table`): List of tables to be concatenated. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Returns: `datasets.table.Table`: If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`. Otherwise if there's only one table, it is returned as is. """ tables = list(tables) if len(tables) == 1: return tables[0] return ConcatenationTable.from_tables(tables, axis=axis) def list_table_cache_files(table: Table) -> List[str]: """ Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: `List[str]`: A list of paths to the cache files loaded by the table. """ if isinstance(table, ConcatenationTable): cache_files = [] for subtables in table.blocks: for subtable in subtables: cache_files += list_table_cache_files(subtable) return cache_files elif isinstance(table, MemoryMappedTable): return [table.path] else: return [] def _wrap_for_chunked_arrays(func): """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly""" def wrapper(array, *args, **kwargs): if isinstance(array, pa.ChunkedArray): return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) else: return func(array, *args, **kwargs) return wrapper def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool: """Check if all the sub-lists of a `pa.ListArray` have the specified length.""" return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array) def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array: """Add the null bitmap to the offsets of a `pa.ListArray`.""" offsets = array.offsets if array.null_count > 0: offsets = pa.concat_arrays( [ pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())), offsets[-1:], ] ) return offsets def _storage_type(type: pa.DataType) -> pa.DataType: """Convert a (possibly nested) `pa.ExtensionType` to its storage type.""" if isinstance(type, pa.ExtensionType): return _storage_type(type.storage_type) elif isinstance(type, pa.StructType): return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type]) elif isinstance(type, pa.ListType): return pa.list_(_storage_type(type.value_type)) elif isinstance(type, pa.FixedSizeListType): return pa.list_(_storage_type(type.value_type), type.list_size) return type def _short_str(value: Any) -> str: out = str(value) if len(out) > 3000: out = out[:1500] + "\n...\n" + out[-1500:] return out @_wrap_for_chunked_arrays def array_cast( array: pa.Array, pa_type: pa.DataType, allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True ) -> Union[pa.Array, pa.FixedSizeListArray, pa.ListArray, pa.StructArray, pa.ExtensionArray]: """Improved version of `pa.Array.cast` It supports casting `pa.StructArray` objects to re-order the fields. It also let you control certain aspects of the casting, e.g. whether to disable casting primitives (`booleans`, `floats` or `ints`) or disable casting decimals to strings. Args: array (`pa.Array`): PyArrow array to cast pa_type (`pa.DataType`): Target PyArrow type allow_primitive_to_str (`bool`, defaults to `True`): Whether to allow casting primitives to strings. Defaults to `True`. allow_decimal_to_str (`bool`, defaults to `True`): Whether to allow casting decimals to strings. Defaults to `True`. Raises: `pa.ArrowInvalidError`: if the arrow data casting fails `TypeError`: if the target type is not supported according, e.g. - if a field is missing - if casting from primitives to strings and `allow_primitive_to_str` is `False` - if casting from decimals to strings and `allow_decimal_to_str` is `False` Returns: `List[pyarrow.Array]`: the casted array """ _c = partial(array_cast, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) if isinstance(array, pa.ExtensionArray): array = array.storage if isinstance(pa_type, pa.ExtensionType): return pa_type.wrap_array(_c(array, pa_type.storage_type)) elif array.type == pa_type: return array elif pa.types.is_struct(array.type): if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}): if array.type.num_fields == 0: return array arrays = [_c(array.field(field.name), field.type) for field in pa_type] return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null()) elif pa.types.is_list(array.type) or pa.types.is_large_list(array.type): if pa.types.is_fixed_size_list(pa_type): if _are_list_values_of_length(array, pa_type.list_size): if array.null_count > 0: # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array array_type = array.type storage_type = _storage_type(array_type) if array_type != storage_type: # Temporarily convert to the storage type to support extension types in the slice operation array = _c(array, storage_type) array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True) array = _c(array, array_type) else: array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True) array_values = array.values return pa.FixedSizeListArray.from_arrays( _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null() ) else: array_values = array.values[ array.offset * pa_type.list_size : (array.offset + len(array)) * pa_type.list_size ] return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size) elif pa.types.is_list(pa_type): # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type)) elif pa.types.is_large_list(pa_type): # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.LargeListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type)) elif pa.types.is_fixed_size_list(array.type): if pa.types.is_fixed_size_list(pa_type): if pa_type.list_size == array.type.list_size: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] return pa.FixedSizeListArray.from_arrays( _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null() ) elif pa.types.is_list(pa_type): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null()) elif pa.types.is_large_list(pa_type): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.LargeListArray.from_arrays( array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null() ) else: if pa.types.is_string(pa_type): if not allow_primitive_to_str and pa.types.is_primitive(array.type): raise TypeError( f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)} " f"since allow_primitive_to_str is set to {allow_primitive_to_str} " ) if not allow_decimal_to_str and pa.types.is_decimal(array.type): raise TypeError( f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)} " f"and allow_decimal_to_str is set to {allow_decimal_to_str}" ) if pa.types.is_null(pa_type) and not pa.types.is_null(array.type): raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}") return array.cast(pa_type) raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}") @_wrap_for_chunked_arrays def cast_array_to_feature( array: pa.Array, feature: "FeatureType", allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True ) -> pa.Array: """Cast an array to the arrow type that corresponds to the requested feature type. For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods they defined to enable casting from other arrow types. Args: array (`pa.Array`): The PyArrow array to cast. feature (`datasets.features.FeatureType`): The target feature type. allow_primitive_to_str (`bool`, defaults to `True`): Whether to allow casting primitives to strings. Defaults to `True`. allow_decimal_to_str (`bool`, defaults to `True`): Whether to allow casting decimals to strings. Defaults to `True`. Raises: `pa.ArrowInvalidError`: if the arrow data casting fails `TypeError`: if the target type is not supported according, e.g. - if a field is missing - if casting from primitives and `allow_primitive_to_str` is `False` - if casting from decimals and `allow_decimal_to_str` is `False` Returns: array (`pyarrow.Array`): the casted array """ from .features.features import LargeList, Sequence, get_nested_type _c = partial( cast_array_to_feature, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "cast_storage"): return feature.cast_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): sequence_kwargs = vars(feature).copy() feature = sequence_kwargs.pop("feature") feature = {name: Sequence(subfeature, **sequence_kwargs) for name, subfeature in feature.items()} if isinstance(feature, dict) and {field.name for field in array.type} == set(feature): if array.type.num_fields == 0: return array arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type) or pa.types.is_large_list(array.type): # feature must be either [subfeature] or LargeList(subfeature) or Sequence(subfeature) if isinstance(feature, list): casted_array_values = _c(array.values, feature[0]) if pa.types.is_list(array.type) and casted_array_values.type == array.values.type: # Both array and feature have equal list type and values (within the list) type return array else: # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.ListArray.from_arrays(array_offsets, casted_array_values) elif isinstance(feature, LargeList): casted_array_values = _c(array.values, feature.feature) if pa.types.is_large_list(array.type) and casted_array_values.type == array.values.type: # Both array and feature have equal large_list type and values (within the list) type return array else: # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.LargeListArray.from_arrays(array_offsets, casted_array_values) elif isinstance(feature, Sequence): if feature.length > -1: if _are_list_values_of_length(array, feature.length): if array.null_count > 0: # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array array_type = array.type storage_type = _storage_type(array_type) if array_type != storage_type: # Temporarily convert to the storage type to support extension types in the slice operation array = array_cast( array, storage_type, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) array = array_cast( array, array_type, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) else: array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) array_values = array.values casted_array_values = _c(array_values, feature.feature) return pa.FixedSizeListArray.from_arrays( casted_array_values, feature.length, mask=array.is_null() ) else: array_values = array.values[ array.offset * feature.length : (array.offset + len(array)) * feature.length ] return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length) else: casted_array_values = _c(array.values, feature.feature) if pa.types.is_list(array.type) and casted_array_values.type == array.values.type: # Both array and feature have equal list type and values (within the list) type return array else: # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.ListArray.from_arrays(array_offsets, casted_array_values) elif pa.types.is_fixed_size_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null()) elif isinstance(feature, LargeList): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.LargeListArray.from_arrays( array_offsets, _c(array.values, feature.feature), mask=array.is_null() ) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length == array.type.list_size: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] casted_array_values = _c(array_values, feature.feature) return pa.FixedSizeListArray.from_arrays(casted_array_values, feature.length, mask=array.is_null()) else: array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null()) if pa.types.is_null(array.type): return array_cast( array, get_nested_type(feature), allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) elif not isinstance(feature, (Sequence, dict, list, tuple)): return array_cast( array, feature(), allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) raise TypeError(f"Couldn't cast array of type\n{_short_str(array.type)}\nto\n{_short_str(feature)}") @_wrap_for_chunked_arrays def embed_array_storage(array: pa.Array, feature: "FeatureType"): """Embed data into an arrays's storage. For custom features like Audio or Image, it takes into account the "embed_storage" methods they define to embed external data (e.g. an image file) into an array. <Added version="2.4.0"/> Args: array (`pa.Array`): The PyArrow array in which to embed data. feature (`datasets.features.FeatureType`): Array features. Raises: `TypeError`: if the target type is not supported according, e.g. - if a field is missing Returns: array (`pyarrow.Array`): the casted array """ from .features import Sequence _e = embed_array_storage if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "embed_storage"): return feature.embed_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } if isinstance(feature, dict): arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) if isinstance(feature, list): return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0])) if isinstance(feature, Sequence) and feature.length == -1: return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature)) elif pa.types.is_large_list(array.type): # feature must be LargeList(subfeature) # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.LargeListArray.from_arrays(array_offsets, _e(array.values, feature.feature)) elif pa.types.is_fixed_size_list(array.type): # feature must be Sequence(subfeature) if isinstance(feature, Sequence) and feature.length > -1: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] embedded_array_values = _e(array_values, feature.feature) return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null()) if not isinstance(feature, (Sequence, dict, list, tuple)): return array raise TypeError(f"Couldn't embed array of type\n{_short_str(array.type)}\nwith\n{_short_str(feature)}") class CastError(ValueError): """When it's not possible to cast an Arrow table to a specific schema or set of features""" def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None: super().__init__(*args) self.table_column_names = table_column_names self.requested_column_names = requested_column_names def __reduce__(self): # Fix unpickling: TypeError: __init__() missing 2 required keyword-only arguments: 'table_column_names' and 'requested_column_names' return partial( CastError, table_column_names=self.table_column_names, requested_column_names=self.requested_column_names ), () def details(self): new_columns = set(self.table_column_names) - set(self.requested_column_names) missing_columns = set(self.requested_column_names) - set(self.table_column_names) if new_columns and missing_columns: return f"there are {len(new_columns)} new columns ({_short_str(new_columns)}) and {len(missing_columns)} missing columns ({_short_str(missing_columns)})." elif new_columns: return f"there are {len(new_columns)} new columns ({_short_str(new_columns)})" else: return f"there are {len(missing_columns)} missing columns ({_short_str(missing_columns)})" def cast_table_to_features(table: pa.Table, features: "Features"): """Cast a table to the arrow schema that corresponds to the requested features. Args: table (`pyarrow.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: table (`pyarrow.Table`): the casted table """ if sorted(table.column_names) != sorted(features): raise CastError( f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features), ) arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] return pa.Table.from_arrays(arrays, schema=features.arrow_schema) def cast_table_to_schema(table: pa.Table, schema: pa.Schema): """Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability. Args: table (`pa.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: `pa.Table`: the casted table """ from .features import Features features = Features.from_arrow_schema(schema) if sorted(table.column_names) != sorted(features): raise CastError( f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features), ) arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] return pa.Table.from_arrays(arrays, schema=schema) def embed_table_storage(table: pa.Table): """Embed external data into a table's storage. <Added version="2.4.0"/> Args: table (`pyarrow.Table`): PyArrow table in which to embed data. Returns: table (`pyarrow.Table`): the table with embedded data """ from .features.features import Features, require_storage_embed features = Features.from_arrow_schema(table.schema) arrays = [ embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name] for name, feature in features.items() ] return pa.Table.from_arrays(arrays, schema=features.arrow_schema) def table_cast(table: pa.Table, schema: pa.Schema): """Improved version of `pa.Table.cast`. It supports casting to feature types stored in the schema metadata. Args: table (`pyarrow.Table`): PyArrow table to cast. schema (`pyarrow.Schema`): Target PyArrow schema. Returns: table (`pyarrow.Table`): the casted table """ if table.schema != schema: return cast_table_to_schema(table, schema) elif table.schema.metadata != schema.metadata: return table.replace_schema_metadata(schema.metadata) else: return table def table_flatten(table: pa.Table): """Improved version of `pa.Table.flatten`. It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field, but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False. Args: table (`pa.Table`): PyArrow table to flatten. Returns: `Table`: the flattened table """ from .features import Features features = Features.from_arrow_schema(table.schema) if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()): flat_arrays = [] flat_column_names = [] for field in table.schema: array = table.column(field.name) subfeature = features[field.name] if pa.types.is_struct(field.type) and ( not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature ): flat_arrays.extend(array.flatten()) flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type]) else: flat_arrays.append(array) flat_column_names.append(field.name) flat_table = pa.Table.from_arrays( flat_arrays, names=flat_column_names, ) else: flat_table = table.flatten() # Preserve complex types in the metadata flat_features = features.flatten(max_depth=2) flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names}) return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata) def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]): """Visit all arrays in a table and apply a function to them. Args: table (`pyarrow.Table`): PyArrow table to visit. function (`Callable[[pa.Array], None]`): Function to apply to each array. """ from .features import Features, Sequence features = Features.from_arrow_schema(table.schema) def _visit(array, feature): if isinstance(array, pa.ChunkedArray): for chunk in array.chunks: _visit(chunk, feature) else: if isinstance(array, pa.ExtensionArray): array = array.storage function(array, feature) if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"): if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } for name, subfeature in feature.items(): _visit(array.field(name), subfeature) elif pa.types.is_list(array.type): if isinstance(feature, list): _visit(array.values, feature[0]) elif isinstance(feature, Sequence): _visit(array.values, feature.feature) for name, feature in features.items(): _visit(table[name], feature) def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]: """Iterate over sub-tables of size `batch_size`. Args: table (`pyarrow.Table`): PyArrow table to iterate over. batch_size (`int`): Size of each sub-table to yield. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. """ chunks_buffer = [] chunks_buffer_size = 0 for chunk in table.to_reader(max_chunksize=batch_size): if len(chunk) == 0: continue elif chunks_buffer_size + len(chunk) < batch_size: chunks_buffer.append(chunk) chunks_buffer_size += len(chunk) continue elif chunks_buffer_size + len(chunk) == batch_size: chunks_buffer.append(chunk) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [] chunks_buffer_size = 0 else: cropped_chunk_length = batch_size - chunks_buffer_size chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] chunks_buffer_size = len(chunk) - cropped_chunk_length if not drop_last_batch and chunks_buffer: yield pa.Table.from_batches(chunks_buffer)
datasets/src/datasets/table.py/0
{ "file_path": "datasets/src/datasets/table.py", "repo_id": "datasets", "token_count": 41738 }
89
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Some python utils function and classes.""" import copy import functools import itertools import multiprocessing.pool import os import queue import re import types import warnings from contextlib import contextmanager from dataclasses import fields, is_dataclass from multiprocessing import Manager from pathlib import Path from queue import Empty from shutil import disk_usage from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union from urllib.parse import urlparse import multiprocess import multiprocess.pool import numpy as np from tqdm.auto import tqdm from .. import config from ..parallel import parallel_map from . import logging from . import tqdm as hf_tqdm from ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0 Pickler, dump, dumps, pklregister, ) from ._filelock import FileLock try: # pragma: no branch import typing_extensions as _typing_extensions from typing_extensions import Final, Literal except ImportError: _typing_extensions = Literal = Final = None logger = logging.get_logger(__name__) # NOTE: When used on an instance method, the cache is shared across all # instances and IS NOT per-instance. # See # https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance # For @property methods, use @memoized_property below. memoize = functools.lru_cache def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "Unknown size". For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """ if not size_in_bytes: return "Unknown size" _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)] size_in_bytes = float(size_in_bytes) for name, size_bytes in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return f"{value:.2f} {name}" return f"{int(size_in_bytes)} bytes" def convert_file_size_to_int(size: Union[int, str]) -> int: """ Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ if isinstance(size, int): return size if size.upper().endswith("PIB"): return int(size[:-3]) * (2**50) if size.upper().endswith("TIB"): return int(size[:-3]) * (2**40) if size.upper().endswith("GIB"): return int(size[:-3]) * (2**30) if size.upper().endswith("MIB"): return int(size[:-3]) * (2**20) if size.upper().endswith("KIB"): return int(size[:-3]) * (2**10) if size.upper().endswith("PB"): int_size = int(size[:-2]) * (10**15) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("TB"): int_size = int(size[:-2]) * (10**12) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("GB"): int_size = int(size[:-2]) * (10**9) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("MB"): int_size = int(size[:-2]) * (10**6) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("KB"): int_size = int(size[:-2]) * (10**3) return int_size // 8 if size.endswith("b") else int_size raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") def glob_pattern_to_regex(pattern): # partially taken from fsspec: # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735 return ( pattern.replace("\\", r"\\") .replace(".", r"\.") .replace("*", ".*") .replace("+", r"\+") .replace("//", "/") .replace("(", r"\(") .replace(")", r"\)") .replace("|", r"\|") .replace("^", r"\^") .replace("$", r"\$") .rstrip("/") .replace("?", ".") ) def string_to_dict(string: str, pattern: str) -> Dict[str, str]: """Un-format a string using a python f-string pattern. From https://stackoverflow.com/a/36838374 Example:: >>> p = 'hello, my name is {name} and I am a {age} year old {what}' >>> s = p.format(name='cody', age=18, what='quarterback') >>> s 'hello, my name is cody and I am a 18 year old quarterback' >>> string_to_dict(s, p) {'age': '18', 'name': 'cody', 'what': 'quarterback'} Args: string (str): input string pattern (str): pattern formatted like a python f-string Returns: Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern Raises: ValueError: if the string doesn't match the pattern """ regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern) result = re.search(regex, string) if result is None: raise ValueError(f"String {string} doesn't match the pattern {pattern}") values = list(result.groups()) keys = re.findall(r"{(.+?)}", pattern) _dict = dict(zip(keys, values)) return _dict def asdict(obj): """Convert an object to its dictionary representation recursively. <Added version="2.4.0"/> """ # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict def _is_dataclass_instance(obj): # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass return is_dataclass(obj) and not isinstance(obj, type) def _asdict_inner(obj): if _is_dataclass_instance(obj): result = {} for f in fields(obj): value = _asdict_inner(getattr(obj, f.name)) if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False): result[f.name] = value return result elif isinstance(obj, tuple) and hasattr(obj, "_fields"): # obj is a namedtuple return type(obj)(*[_asdict_inner(v) for v in obj]) elif isinstance(obj, (list, tuple)): # Assume we can create an object of this type by passing in a # generator (which is not true for namedtuples, handled # above). return type(obj)(_asdict_inner(v) for v in obj) elif isinstance(obj, dict): return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()} else: return copy.deepcopy(obj) if not isinstance(obj, dict) and not _is_dataclass_instance(obj): raise TypeError(f"{obj} is not a dict or a dataclass") return _asdict_inner(obj) @contextmanager def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value.""" original = getattr(obj, attr, None) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original) @contextmanager def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False): """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow.""" np_state = np.random.get_state() np.random.seed(seed) if set_pytorch and config.TORCH_AVAILABLE: import torch torch_state = torch.random.get_rng_state() torch.random.manual_seed(seed) if torch.cuda.is_available(): torch_cuda_states = torch.cuda.get_rng_state_all() torch.cuda.manual_seed_all(seed) if set_tensorflow and config.TF_AVAILABLE: import tensorflow as tf from tensorflow.python.eager import context as tfpycontext tf_state = tf.random.get_global_generator() temp_gen = tf.random.Generator.from_seed(seed) tf.random.set_global_generator(temp_gen) if not tf.executing_eagerly(): raise ValueError("Setting random seed for TensorFlow is only available in eager mode") tf_context = tfpycontext.context() # eager mode context tf_seed = tf_context._seed tf_rng_initialized = hasattr(tf_context, "_rng") if tf_rng_initialized: tf_rng = tf_context._rng tf_context._set_global_seed(seed) try: yield finally: np.random.set_state(np_state) if set_pytorch and config.TORCH_AVAILABLE: torch.random.set_rng_state(torch_state) if torch.cuda.is_available(): torch.cuda.set_rng_state_all(torch_cuda_states) if set_tensorflow and config.TF_AVAILABLE: tf.random.set_global_generator(tf_state) tf_context._seed = tf_seed if tf_rng_initialized: tf_context._rng = tf_rng else: delattr(tf_context, "_rng") def unique_values(values): """Iterate over iterable and return only unique values in order.""" seen = set() for value in values: if value not in seen: seen.add(value) yield value def no_op_if_value_is_null(func): """If the value is None, return None, else call `func`.""" def wrapper(value): return func(value) if value is not None else None return wrapper def first_non_null_value(iterable): """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index.""" for i, value in enumerate(iterable): if value is not None: return i, value return -1, None def zip_dict(*dicts): """Iterate over items of dictionaries grouped by their keys.""" for key in unique_values(itertools.chain(*dicts)): # set merge all keys # Will raise KeyError if the dict don't have the same keys yield key, tuple(d[key] for d in dicts) class NonMutableDict(dict): """Dict where keys can only be added but not modified. Will raise an error if the user try to overwrite one key. The error message can be customized during construction. It will be formatted using {key} for the overwritten key. """ def __init__(self, *args, **kwargs): self._error_msg = kwargs.pop( "error_msg", "Try to overwrite existing key: {key}", ) if kwargs: raise ValueError("NonMutableDict cannot be initialized with kwargs.") super().__init__(*args, **kwargs) def __setitem__(self, key, value): if key in self: raise ValueError(self._error_msg.format(key=key)) return super().__setitem__(key, value) def update(self, other): if any(k in self for k in other): raise ValueError(self._error_msg.format(key=set(self) & set(other))) return super().update(other) class classproperty(property): # pylint: disable=invalid-name """Descriptor to be used as decorator for @classmethods.""" def __get__(self, obj, objtype=None): return self.fget.__get__(None, objtype)() def _single_map_nested(args): """Apply a function recursively to each element of a nested data struct.""" function, data_struct, batched, batch_size, types, rank, disable_tqdm, desc = args # Singleton first to spare some computation if not isinstance(data_struct, dict) and not isinstance(data_struct, types): if batched: return function([data_struct])[0] else: return function(data_struct) if ( batched and not isinstance(data_struct, dict) and isinstance(data_struct, types) and all(not isinstance(v, (dict, types)) for v in data_struct) ): return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)] # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): print(" ", end="", flush=True) # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar: if isinstance(data_struct, dict): return { k: _single_map_nested((function, v, batched, batch_size, types, None, True, None)) for k, v in pbar } else: mapped = [_single_map_nested((function, v, batched, batch_size, types, None, True, None)) for v in pbar] if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) def map_nested( function: Callable[[Any], Any], data_struct: Any, dict_only: bool = False, map_list: bool = True, map_tuple: bool = False, map_numpy: bool = False, num_proc: Optional[int] = None, parallel_min_length: int = 2, batched: bool = False, batch_size: Optional[int] = 1000, types: Optional[tuple] = None, disable_tqdm: bool = True, desc: Optional[str] = None, ) -> Any: """Apply a function recursively to each element of a nested data struct. Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to `parallel_min_length`. <Changed version="2.5.0"> Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``. Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and multiprocessing is used. </Changed> Args: function (`Callable`): Function to be applied to `data_struct`. data_struct (`Any`): Data structure to apply `function` to. dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in `data_struct`. map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict` values). map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides `dict` values). map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides `dict` values). num_proc (`int`, *optional*): Number of processes. The level in the data struct used for multiprocessing is the first level that has smaller sub-structs, starting from the root. parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel processing. <Added version="2.5.0"/> batched (`bool`, defaults to `False`): Provide batch of items to `function`. <Added version="2.19.0"/> batch_size (`int`, *optional*, defaults to `1000`): Number of items per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`. <Added version="2.19.0"/> types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar. desc (`str`, *optional*): Prefix for the tqdm progressbar. Returns: `Any` """ if types is None: types = [] if not dict_only: if map_list: types.append(list) if map_tuple: types.append(tuple) if map_numpy: types.append(np.ndarray) types = tuple(types) # Singleton if not isinstance(data_struct, dict) and not isinstance(data_struct, types): if batched: data_struct = [data_struct] mapped = function(data_struct) if batched: mapped = mapped[0] return mapped iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct if num_proc is None: num_proc = 1 if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable): mapped = [ map_nested( function=function, data_struct=obj, num_proc=num_proc, parallel_min_length=parallel_min_length, batched=batched, batch_size=batch_size, types=types, ) for obj in iterable ] elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: if batched: if batch_size is None or batch_size <= 0: batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1) iterable = list(iter_batched(iterable, batch_size)) mapped = [ _single_map_nested((function, obj, batched, batch_size, types, None, True, None)) for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) ] if batched: mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch] else: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".* is experimental and might be subject to breaking changes in the future\\.$", category=UserWarning, ) if batched: if batch_size is None or batch_size <= 0: batch_size = len(iterable) // num_proc + int(len(iterable) % num_proc > 0) iterable = list(iter_batched(iterable, batch_size)) mapped = parallel_map( function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, _single_map_nested ) if batched: mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch] if isinstance(data_struct, dict): return dict(zip(data_struct.keys(), mapped)) else: if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) class NestedDataStructure: def __init__(self, data=None): self.data = data if data is not None else [] def flatten(self, data=None): data = data if data is not None else self.data if isinstance(data, dict): return self.flatten(list(data.values())) elif isinstance(data, (list, tuple)): return [flattened for item in data for flattened in self.flatten(item)] else: return [data] def has_sufficient_disk_space(needed_bytes, directory="."): try: free_bytes = disk_usage(os.path.abspath(directory)).free except OSError: return True return needed_bytes < free_bytes def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]: """Convert a link to a file on a github repo in a link to the raw github object.""" parsed = urlparse(url_path) sub_directory = None if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com": if "blob" in url_path: if not url_path.endswith(".py"): raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'") url_path = url_path.replace("blob", "raw") # Point to the raw file else: # Parse github url to point to zip github_path = parsed.path[1:] repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master") repo_owner, repo_name = repo_info.split("/") url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip" sub_directory = f"{repo_name}-{branch}" return url_path, sub_directory def lock_importable_file(importable_local_file: str) -> FileLock: # Check the directory with a unique name in our dataset folder # path is: ./datasets/dataset_name/hash_from_code/script.py # we use a hash as subdirectory_name to be able to have multiple versions of a dataset processing file together importable_directory_path = str(Path(importable_local_file).resolve().parent.parent) lock_path = importable_directory_path + ".lock" return FileLock(lock_path) def get_imports(file_path: str) -> Tuple[str, str, str, str]: """Find whether we should import or clone additional files for a given processing script. And list the import. We allow: - library dependencies, - local dependencies and - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository. external dependencies will be downloaded (and extracted if needed in the dataset folder). We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script. Note that only direct import in the dataset processing script will be handled We don't recursively explore the additional import to download further files. Example:: import tensorflow import .c4_utils import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset """ lines = [] with open(file_path, encoding="utf-8") as f: lines.extend(f.readlines()) logger.debug(f"Checking {file_path} for additional imports.") imports: List[Tuple[str, str, str, Optional[str]]] = [] is_in_docstring = False for line in lines: docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line) if len(docstr_start_match) == 1: # flip True <=> False only if doctstring # starts at line without finishing is_in_docstring = not is_in_docstring if is_in_docstring: # import statements in doctstrings should # not be added as required dependencies continue match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE) if match is None: match = re.match( r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE, ) if match is None: continue if match.group(1): # The import starts with a '.', we will download the relevant file if any(imp[1] == match.group(2) for imp in imports): # We already have this import continue if match.group(3): # The import has a comment with 'From:', we'll retrieve it from the given url url_path = match.group(3) url_path, sub_directory = _convert_github_url(url_path) imports.append(("external", match.group(2), url_path, sub_directory)) elif match.group(2): # The import should be at the same place as the file imports.append(("internal", match.group(2), match.group(2), None)) else: if match.group(3): # The import has a comment with `From: git+https:...`, asks user to pip install from git. url_path = match.group(3) imports.append(("library", match.group(2), url_path, None)) else: imports.append(("library", match.group(2), match.group(2), None)) return imports def copyfunc(func): result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__) result.__kwdefaults__ = func.__kwdefaults__ return result Y = TypeVar("Y") def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int: for i, result in enumerate(func(**kwargs)): queue.put(result) return i def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]: return {f.pid for f in pool._pool} def iflatmap_unordered( pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool], func: Callable[..., Iterable[Y]], *, kwargs_iterable: Iterable[dict], ) -> Iterable[Y]: initial_pool_pid = _get_pool_pid(pool) pool_changed = False manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager with manager_cls() as manager: queue = manager.Queue() async_results = [ pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable ] try: while True: try: yield queue.get(timeout=0.05) except Empty: if all(async_result.ready() for async_result in async_results) and queue.empty(): break if _get_pool_pid(pool) != initial_pool_pid: pool_changed = True # One of the subprocesses has died. We should not wait forever. raise RuntimeError( "One of the subprocesses has abruptly died during map operation." "To debug the error, disable multiprocessing." ) finally: if not pool_changed: # we get the result in case there's an error to raise [async_result.get(timeout=0.05) for async_result in async_results] T = TypeVar("T") def iter_batched(iterable: Iterable[T], n: int) -> Iterable[List[T]]: if n < 1: raise ValueError(f"Invalid batch size {n}") batch = [] for item in iterable: batch.append(item) if len(batch) == n: yield batch batch = [] if batch: yield batch
datasets/src/datasets/utils/py_utils.py/0
{ "file_path": "datasets/src/datasets/utils/py_utils.py", "repo_id": "datasets", "token_count": 11710 }
90
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """TODO: Add a description here.""" import csv import json import os import datasets # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This new dataset is designed to solve this great NLP task and is crafted with a lot of care. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URLS = { "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip", "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip", } # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case class NewDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.1.0") # This is an example of a dataset with multiple configurations. # If you don't want/need to define several sub-sets in your dataset, # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. # If you need to make complex sub-parts in the datasets with configurable options # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig # BUILDER_CONFIG_CLASS = MyBuilderConfig # You will be able to load one or the other configurations in the following list with # data = datasets.load_dataset('my_dataset', 'first_domain') # data = datasets.load_dataset('my_dataset', 'second_domain') BUILDER_CONFIGS = [ datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), ] DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. def _info(self): # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above features = datasets.Features( { "sentence": datasets.Value("string"), "option1": datasets.Value("string"), "answer": datasets.Value("string") # These are the features of your dataset like images, labels ... } ) else: # This is an example to show how to have different features for "first_domain" and "second_domain" features = datasets.Features( { "sentence": datasets.Value("string"), "option2": datasets.Value("string"), "second_domain_answer": datasets.Value("string") # These are the features of your dataset like images, labels ... } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive urls = _URLS[self.config.name] data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "train.jsonl"), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "dev.jsonl"), "split": "dev", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "test.jsonl"), "split": "test" }, ), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath, split): # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. with open(filepath, encoding="utf-8") as f: for key, row in enumerate(f): data = json.loads(row) if self.config.name == "first_domain": # Yields examples as (key, example) tuples yield key, { "sentence": data["sentence"], "option1": data["option1"], "answer": "" if split == "test" else data["answer"], } else: yield key, { "sentence": data["sentence"], "option2": data["option2"], "second_domain_answer": "" if split == "test" else data["second_domain_answer"], }
datasets/templates/new_dataset_script.py/0
{ "file_path": "datasets/templates/new_dataset_script.py", "repo_id": "datasets", "token_count": 3156 }
91
import contextlib import os import sqlite3 import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _check_sql_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = SqlDatasetReader( "dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory ).read() _check_sql_dataset(dataset, expected_features) @require_sqlalchemy @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read() _check_sql_dataset(dataset, expected_features) def iter_sql_file(sqlite_path): with contextlib.closing(sqlite3.connect(sqlite_path)) as con: cur = con.cursor() cur.execute("SELECT * FROM dataset") for row in cur: yield row @require_sqlalchemy def test_dataset_to_sql(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" output_sqlite_path = os.path.join(cache_dir, "tmp.sql") dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read() SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write() original_sql = iter_sql_file(sqlite_path) expected_sql = iter_sql_file(output_sqlite_path) for row1, row2 in zip(original_sql, expected_sql): assert row1 == row2 @require_sqlalchemy def test_dataset_to_sql_multiproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" output_sqlite_path = os.path.join(cache_dir, "tmp.sql") dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read() SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write() original_sql = iter_sql_file(sqlite_path) expected_sql = iter_sql_file(output_sqlite_path) for row1, row2 in zip(original_sql, expected_sql): assert row1 == row2 @require_sqlalchemy def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" output_sqlite_path = os.path.join(cache_dir, "tmp.sql") dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read() with pytest.raises(ValueError): SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write()
datasets/tests/io/test_sql.py/0
{ "file_path": "datasets/tests/io/test_sql.py", "repo_id": "datasets", "token_count": 1628 }
92
import contextlib import copy import itertools import json import os import pickle import re import sys import tempfile from functools import partial from pathlib import Path from unittest import TestCase from unittest.mock import MagicMock, patch import numpy as np import numpy.testing as npt import pandas as pd import pyarrow as pa import pytest from absl.testing import parameterized from fsspec.core import strip_protocol from packaging import version import datasets.arrow_dataset from datasets import concatenate_datasets, interleave_datasets, load_from_disk from datasets.arrow_dataset import Dataset, transmit_format, update_metadata_with_features from datasets.dataset_dict import DatasetDict from datasets.features import ( Array2D, Array3D, ClassLabel, Features, Image, LargeList, Sequence, Translation, TranslationVariableLanguages, Value, ) from datasets.info import DatasetInfo from datasets.iterable_dataset import IterableDataset from datasets.splits import NamedSplit from datasets.table import ConcatenationTable, InMemoryTable, MemoryMappedTable from datasets.utils.logging import INFO, get_logger from datasets.utils.py_utils import temp_seed from .utils import ( assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_dill_gt_0_3_2, require_jax, require_not_windows, require_numpy1_on_windows, require_pil, require_polars, require_pyspark, require_sqlalchemy, require_tf, require_torch, require_transformers, set_current_working_directory_to_temp_dir, ) class PickableMagicMock(MagicMock): def __reduce__(self): return MagicMock, () class Unpicklable: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __getstate__(self): raise pickle.PicklingError() def picklable_map_function(x): return {"id": int(x["filename"].split("_")[-1])} def picklable_map_function_with_indices(x, i): return {"id": i} def picklable_map_function_with_rank(x, r): return {"rank": r} def picklable_map_function_with_indices_and_rank(x, i, r): return {"id": i, "rank": r} def picklable_filter_function(x): return int(x["filename"].split("_")[-1]) < 10 def picklable_filter_function_with_rank(x, r): return r == 0 def assert_arrow_metadata_are_synced_with_dataset_features(dataset: Dataset): assert dataset.data.schema.metadata is not None assert b"huggingface" in dataset.data.schema.metadata metadata = json.loads(dataset.data.schema.metadata[b"huggingface"].decode()) assert "info" in metadata features = DatasetInfo.from_dict(metadata["info"]).features assert features is not None assert features == dataset.features assert features == Features.from_arrow_schema(dataset.data.schema) assert list(features) == dataset.data.column_names assert list(features) == list(dataset.features) IN_MEMORY_PARAMETERS = [ {"testcase_name": name, "in_memory": im} for im, name in [(True, "in_memory"), (False, "on_disk")] ] @parameterized.named_parameters(IN_MEMORY_PARAMETERS) class BaseDatasetTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures(self, caplog, set_sqlalchemy_silence_uber_warning): self._caplog = caplog def _create_dummy_dataset( self, in_memory: bool, tmp_dir: str, multiple_columns=False, array_features=False, nested_features=False ) -> Dataset: assert int(multiple_columns) + int(array_features) + int(nested_features) < 2 if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": [False, True, False, True]} dset = Dataset.from_dict(data) elif array_features: data = { "col_1": [[[True, False], [False, True]]] * 4, # 2D "col_2": [[[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]] * 4, # 3D array "col_3": [[3, 2, 1, 0]] * 4, # Sequence } features = Features( { "col_1": Array2D(shape=(2, 2), dtype="bool"), "col_2": Array3D(shape=(2, 2, 2), dtype="string"), "col_3": Sequence(feature=Value("int64")), } ) dset = Dataset.from_dict(data, features=features) elif nested_features: data = {"nested": [{"a": i, "x": i * 10, "c": i * 100} for i in range(1, 11)]} features = Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}}) dset = Dataset.from_dict(data, features=features) else: dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]}) if not in_memory: dset = self._to(in_memory, tmp_dir, dset) return dset def _to(self, in_memory, tmp_dir, *datasets): if in_memory: datasets = [dataset.map(keep_in_memory=True) for dataset in datasets] else: start = 0 while os.path.isfile(os.path.join(tmp_dir, f"dataset{start}.arrow")): start += 1 datasets = [ dataset.map(cache_file_name=os.path.join(tmp_dir, f"dataset{start + i}.arrow")) for i, dataset in enumerate(datasets) ] return datasets if len(datasets) > 1 else datasets[0] def test_dummy_dataset(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}), ) self.assertEqual(dset[0]["col_1"], 3) self.assertEqual(dset["col_1"][0], 3) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: self.assertDictEqual( dset.features, Features( { "col_1": Array2D(shape=(2, 2), dtype="bool"), "col_2": Array3D(shape=(2, 2, 2), dtype="string"), "col_3": Sequence(feature=Value("int64")), } ), ) self.assertEqual(dset[0]["col_2"], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]) self.assertEqual(dset["col_2"][0], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]) def test_dataset_getitem(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") self.assertEqual(dset[-1]["filename"], "my_name-train_29") self.assertEqual(dset["filename"][-1], "my_name-train_29") self.assertListEqual(dset[:2]["filename"], ["my_name-train_0", "my_name-train_1"]) self.assertListEqual(dset["filename"][:2], ["my_name-train_0", "my_name-train_1"]) self.assertEqual(dset[:-1]["filename"][-1], "my_name-train_28") self.assertEqual(dset["filename"][:-1][-1], "my_name-train_28") self.assertListEqual(dset[[0, -1]]["filename"], ["my_name-train_0", "my_name-train_29"]) self.assertListEqual(dset[range(0, -2, -1)]["filename"], ["my_name-train_0", "my_name-train_29"]) self.assertListEqual(dset[np.array([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"]) self.assertListEqual(dset[pd.Series([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"]) with dset.select(range(2)) as dset_subset: self.assertListEqual(dset_subset[-1:]["filename"], ["my_name-train_1"]) self.assertListEqual(dset_subset["filename"][-1:], ["my_name-train_1"]) def test_dummy_dataset_deepcopy(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset2 = copy.deepcopy(dset) # don't copy the underlying arrow data using memory self.assertEqual(len(dset2), 10) self.assertDictEqual(dset2.features, Features({"filename": Value("string")})) self.assertEqual(dset2[0]["filename"], "my_name-train_0") self.assertEqual(dset2["filename"][0], "my_name-train_0") del dset2 def test_dummy_dataset_pickle(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: tmp_file = os.path.join(tmp_dir, "dset.pt") with self._create_dummy_dataset(in_memory, tmp_dir).select(range(0, 10, 2)) as dset: with open(tmp_file, "wb") as f: pickle.dump(dset, f) with open(tmp_file, "rb") as f: with pickle.load(f) as dset: self.assertEqual(len(dset), 5) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir).select( range(0, 10, 2), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow") ) as dset: if not in_memory: dset._data.table = Unpicklable() dset._indices.table = Unpicklable() with open(tmp_file, "wb") as f: pickle.dump(dset, f) with open(tmp_file, "rb") as f: with pickle.load(f) as dset: self.assertEqual(len(dset), 5) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") def test_dummy_dataset_serialize(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with set_current_working_directory_to_temp_dir(): with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: dataset_path = "my_dataset" # rel path dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") expected = dset.to_dict() with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: dataset_path = os.path.join(tmp_dir, "my_dataset") # abs path dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir).select( range(10), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow") ) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") with self._create_dummy_dataset(in_memory, tmp_dir, nested_features=True) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual( dset.features, Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}}), ) self.assertDictEqual(dset[0]["nested"], {"a": 1, "c": 100, "x": 10}) self.assertDictEqual(dset["nested"][0], {"a": 1, "c": 100, "x": 10}) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path, num_shards=4) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 4) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path, num_proc=2) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 2) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): dset.save_to_disk(dataset_path, num_shards=7, num_proc=2) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 7) with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: with assert_arrow_memory_doesnt_increase(): max_shard_size = dset._estimate_nbytes() // 2 + 1 dset.save_to_disk(dataset_path, max_shard_size=max_shard_size) with Dataset.load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset.to_dict(), expected) self.assertEqual(len(dset.cache_files), 2) def test_dummy_dataset_load_from_disk(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: dataset_path = os.path.join(tmp_dir, "my_dataset") dset.save_to_disk(dataset_path) with load_from_disk(dataset_path) as dset: self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertEqual(dset[0]["filename"], "my_name-train_0") self.assertEqual(dset["filename"][0], "my_name-train_0") def test_restore_saved_format(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) dataset_path = os.path.join(tmp_dir, "my_dataset") dset.save_to_disk(dataset_path) with load_from_disk(dataset_path) as loaded_dset: self.assertEqual(dset.format, loaded_dset.format) def test_set_format_numpy_multiple_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint dset.set_format(type="numpy", columns=["col_1"]) self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], np.int64) self.assertEqual(dset[0]["col_1"].item(), 3) self.assertIsInstance(dset["col_1"], np.ndarray) self.assertListEqual(list(dset["col_1"].shape), [4]) np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0])) self.assertNotEqual(dset._fingerprint, fingerprint) dset.reset_format() with dset.formatted_as(type="numpy", columns=["col_1"]): self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], np.int64) self.assertEqual(dset[0]["col_1"].item(), 3) self.assertIsInstance(dset["col_1"], np.ndarray) self.assertListEqual(list(dset["col_1"].shape), [4]) np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0])) self.assertEqual(dset.format["type"], None) self.assertEqual(dset.format["format_kwargs"], {}) self.assertEqual(dset.format["columns"], dset.column_names) self.assertEqual(dset.format["output_all_columns"], False) dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") dset.set_format(type="numpy", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0]), 2) self.assertIsInstance(dset[0]["col_2"], np.str_) self.assertEqual(dset[0]["col_2"].item(), "a") @require_numpy1_on_windows @require_torch def test_set_format_torch(self, in_memory): import torch with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="torch", columns=["col_1"]) self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], torch.Tensor) self.assertIsInstance(dset["col_1"], torch.Tensor) self.assertListEqual(list(dset[0]["col_1"].shape), []) self.assertEqual(dset[0]["col_1"].item(), 3) dset.set_format(type="torch", columns=["col_1"], output_all_columns=True) self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") dset.set_format(type="torch") self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_1"], torch.Tensor) self.assertIsInstance(dset["col_1"], torch.Tensor) self.assertListEqual(list(dset[0]["col_1"].shape), []) self.assertEqual(dset[0]["col_1"].item(), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") self.assertIsInstance(dset[0]["col_3"], torch.Tensor) self.assertIsInstance(dset["col_3"], torch.Tensor) self.assertListEqual(list(dset[0]["col_3"].shape), []) @require_tf def test_set_format_tf(self, in_memory): import tensorflow as tf with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="tensorflow", columns=["col_1"]) self.assertEqual(len(dset[0]), 1) self.assertIsInstance(dset[0]["col_1"], tf.Tensor) self.assertListEqual(list(dset[0]["col_1"].shape), []) self.assertEqual(dset[0]["col_1"].numpy().item(), 3) dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True) self.assertEqual(len(dset[0]), 3) self.assertIsInstance(dset[0]["col_2"], str) self.assertEqual(dset[0]["col_2"], "a") dset.set_format(type="tensorflow", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0]), 2) self.assertEqual(dset[0]["col_2"].numpy().decode("utf-8"), "a") def test_set_format_pandas(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="pandas", columns=["col_1"]) self.assertEqual(len(dset[0].columns), 1) self.assertIsInstance(dset[0], pd.DataFrame) self.assertListEqual(list(dset[0].shape), [1, 1]) self.assertEqual(dset[0]["col_1"].item(), 3) dset.set_format(type="pandas", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0].columns), 2) self.assertEqual(dset[0]["col_2"].item(), "a") @require_polars def test_set_format_polars(self, in_memory): import polars as pl with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format(type="polars", columns=["col_1"]) self.assertEqual(len(dset[0].columns), 1) self.assertIsInstance(dset[0], pl.DataFrame) self.assertListEqual(list(dset[0].shape), [1, 1]) self.assertEqual(dset[0]["col_1"].item(), 3) dset.set_format(type="polars", columns=["col_1", "col_2"]) self.assertEqual(len(dset[0].columns), 2) self.assertEqual(dset[0]["col_2"].item(), "a") def test_set_transform(self, in_memory): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_transform(transform=transform, columns=["col_1"]) self.assertEqual(dset.format["type"], "custom") self.assertEqual(len(dset[0].keys()), 1) self.assertEqual(dset[0]["col_1"], "3") self.assertEqual(dset[:2]["col_1"], ["3", "2"]) self.assertEqual(dset["col_1"][:2], ["3", "2"]) prev_format = dset.format dset.set_format(**dset.format) self.assertEqual(prev_format, dset.format) dset.set_transform(transform=transform, columns=["col_1", "col_2"]) self.assertEqual(len(dset[0].keys()), 2) self.assertEqual(dset[0]["col_2"], "A") def test_transmit_format(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: transform = datasets.arrow_dataset.transmit_format(lambda x: x) # make sure identity transform doesn't apply unnecessary format self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) dset.set_format(**dset.format) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) # check lists comparisons dset.set_format(columns=["col_1"]) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) dset.set_format(columns=["col_1", "col_2"]) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) dset.set_format("numpy", columns=["col_1", "col_2"]) self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) def test_cast(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: features = dset.features features["col_1"] = Value("float64") features = Features({k: features[k] for k in list(features)[::-1]}) fingerprint = dset._fingerprint # TODO: with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): with dset.cast(features) as casted_dset: self.assertEqual(casted_dset.num_columns, 3) self.assertEqual(casted_dset.features["col_1"], Value("float64")) self.assertIsInstance(casted_dset[0]["col_1"], float) self.assertNotEqual(casted_dset._fingerprint, fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) def test_class_encode_column(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with self.assertRaises(ValueError): dset.class_encode_column(column="does not exist") with dset.class_encode_column("col_1") as casted_dset: self.assertIsInstance(casted_dset.features["col_1"], ClassLabel) self.assertListEqual(casted_dset.features["col_1"].names, ["0", "1", "2", "3"]) self.assertListEqual(casted_dset["col_1"], [3, 2, 1, 0]) self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) with dset.class_encode_column("col_2") as casted_dset: self.assertIsInstance(casted_dset.features["col_2"], ClassLabel) self.assertListEqual(casted_dset.features["col_2"].names, ["a", "b", "c", "d"]) self.assertListEqual(casted_dset["col_2"], [0, 1, 2, 3]) self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) with dset.class_encode_column("col_3") as casted_dset: self.assertIsInstance(casted_dset.features["col_3"], ClassLabel) self.assertListEqual(casted_dset.features["col_3"].names, ["False", "True"]) self.assertListEqual(casted_dset["col_3"], [0, 1, 0, 1]) self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) self.assertNotEqual(casted_dset, dset) assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) # Test raises if feature is an array / sequence with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: for column in dset.column_names: with self.assertRaises(ValueError): dset.class_encode_column(column) def test_remove_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.remove_columns(column_names="col_1") as new_dset: self.assertEqual(new_dset.num_columns, 2) self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.remove_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset: self.assertEqual(new_dset.num_columns, 0) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset._format_columns = ["col_1", "col_2", "col_3"] with dset.remove_columns(column_names=["col_1"]) as new_dset: self.assertListEqual(new_dset._format_columns, ["col_2", "col_3"]) self.assertEqual(new_dset.num_columns, 2) self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) def test_rename_column(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.rename_column(original_column_name="col_1", new_column_name="new_name") as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"]) self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) def test_rename_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.rename_columns({"col_1": "new_name"}) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"]) self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) with dset.rename_columns({"col_1": "new_name", "col_2": "new_name2"}) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["new_name", "new_name2", "col_3"]) self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) # Original column not in dataset with self.assertRaises(ValueError): dset.rename_columns({"not_there": "new_name"}) # Empty new name with self.assertRaises(ValueError): dset.rename_columns({"col_1": ""}) # Duplicates with self.assertRaises(ValueError): dset.rename_columns({"col_1": "new_name", "col_2": "new_name"}) def test_select_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.select_columns(column_names=[]) as new_dset: self.assertEqual(new_dset.num_columns, 0) self.assertListEqual(list(new_dset.column_names), []) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: fingerprint = dset._fingerprint with dset.select_columns(column_names="col_1") as new_dset: self.assertEqual(new_dset.num_columns, 1) self.assertListEqual(list(new_dset.column_names), ["col_1"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.select_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["col_1", "col_2", "col_3"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.select_columns(column_names=["col_3", "col_2", "col_1"]) as new_dset: self.assertEqual(new_dset.num_columns, 3) self.assertListEqual(list(new_dset.column_names), ["col_3", "col_2", "col_1"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset._format_columns = ["col_1", "col_2", "col_3"] with dset.select_columns(column_names=["col_1"]) as new_dset: self.assertListEqual(new_dset._format_columns, ["col_1"]) self.assertEqual(new_dset.num_columns, 1) self.assertListEqual(list(new_dset.column_names), ["col_1"]) self.assertNotEqual(new_dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(new_dset) def test_concatenate(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"], [0, 1, 2, 3, 4, 5, 6, 7]) self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2") del dset1, dset2, dset3 def test_concatenate_formatted(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1.set_format("numpy") with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: self.assertEqual(dset_concat.format["type"], None) dset2.set_format("numpy") dset3.set_format("numpy") with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: self.assertEqual(dset_concat.format["type"], "numpy") del dset1, dset2, dset3 def test_concatenate_with_indices(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7, 8]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1, dset2, dset3 = dset1.select([2, 1, 0]), dset2.select([2, 1, 0]), dset3 with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"], [6, 7, 8, 5, 4, 3, 2, 1, 0]) # in_memory = False: # 3 cache files for the dset_concat._data table # no cache file for the indices because it's in memory # in_memory = True: # no cache files since both dset_concat._data and dset_concat._indices are in memory self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") dset1 = dset1.rename_columns({"id": "id1"}) dset2 = dset2.rename_columns({"id": "id2"}) dset3 = dset3.rename_columns({"id": "id3"}) with concatenate_datasets([dset1, dset2, dset3], axis=1) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3)) self.assertEqual(len(dset_concat), len(dset1)) self.assertListEqual(dset_concat["id1"], [2, 1, 0]) self.assertListEqual(dset_concat["id2"], [5, 4, 3]) self.assertListEqual(dset_concat["id3"], [6, 7, 8]) # in_memory = False: # 3 cache files for the dset_concat._data table # no cache file for the indices because it's None # in_memory = True: # no cache files since dset_concat._data is in memory and dset_concat._indices is None self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) self.assertIsNone(dset_concat._indices) self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2") with concatenate_datasets([dset1], axis=1) as dset_concat: self.assertEqual(len(dset_concat), len(dset1)) self.assertListEqual(dset_concat["id1"], [2, 1, 0]) # in_memory = False: # 1 cache file for the dset_concat._data table # no cache file for the indices because it's in memory # in_memory = True: # no cache files since both dset_concat._data and dset_concat._indices are in memory self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 1) self.assertTrue(dset_concat._indices == dset1._indices) self.assertEqual(dset_concat.info.description, "Dataset1") del dset1, dset2, dset3 def test_concatenate_with_indices_from_disk(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) dset1, dset2, dset3 = ( dset1.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")), dset2.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")), dset3.select([1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow")), ) with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"], [7, 6, 5, 4, 3, 2, 1, 0]) # in_memory = False: # 3 cache files for the dset_concat._data table, and 1 for the dset_concat._indices_table # There is only 1 for the indices tables (i1.arrow) # Indeed, the others are brought to memory since an offset is applied to them. # in_memory = True: # 1 cache file for i1.arrow since both dset_concat._data and dset_concat._indices are in memory self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 3 + 1) self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") del dset1, dset2, dset3 def test_concatenate_pickle(self, in_memory): data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7], "foo": ["bar", "bar"]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: dset1, dset2, dset3 = ( Dataset.from_dict(data1, info=info1), Dataset.from_dict(data2, info=info2), Dataset.from_dict(data3), ) schema = dset1.data.schema # mix from in-memory and on-disk datasets dset1, dset2 = self._to(in_memory, tmp_dir, dset1, dset2) dset3 = self._to(not in_memory, tmp_dir, dset3) dset1, dset2, dset3 = ( dset1.select( [2, 1, 0], keep_in_memory=in_memory, indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow") if not in_memory else None, ), dset2.select( [2, 1, 0], keep_in_memory=in_memory, indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow") if not in_memory else None, ), dset3.select( [1, 0], keep_in_memory=in_memory, indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow") if not in_memory else None, ), ) dset3 = dset3.rename_column("foo", "new_foo") dset3 = dset3.remove_columns("new_foo") if in_memory: dset3._data.table = Unpicklable(schema=schema) else: dset1._data.table, dset2._data.table = Unpicklable(schema=schema), Unpicklable(schema=schema) dset1, dset2, dset3 = (pickle.loads(pickle.dumps(d)) for d in (dset1, dset2, dset3)) with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: if not in_memory: dset_concat._data.table = Unpicklable(schema=schema) with pickle.loads(pickle.dumps(dset_concat)) as dset_concat: self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(dset_concat["id"], [7, 6, 5, 4, 3, 2, 1, 0]) # in_memory = True: 1 cache file for dset3 # in_memory = False: 2 caches files for dset1 and dset2, and 1 cache file for i1.arrow self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 2 + 1) self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") del dset1, dset2, dset3 def test_flatten(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.b.c", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.b.c", "foo"]) self.assertDictEqual( dset.features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")}) ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"en": "Thank you", "fr": "Merci"}] * 10, "foo": [1] * 10}, features=Features({"a": Translation(languages=["en", "fr"]), "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.en", "a.fr", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.en", "a.fr", "foo"]) self.assertDictEqual( dset.features, Features({"a.en": Value("string"), "a.fr": Value("string"), "foo": Value("int64")}), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"en": "the cat", "fr": ["le chat", "la chatte"], "de": "die katze"}] * 10, "foo": [1] * 10}, features=Features( { "a": TranslationVariableLanguages(languages=["en", "fr", "de"]), "foo": Value("int64"), } ), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.language", "a.translation", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.language", "a.translation", "foo"]) self.assertDictEqual( dset.features, Features( { "a.language": Sequence(Value("string")), "a.translation": Sequence(Value("string")), "foo": Value("int64"), } ), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) @require_pil def test_flatten_complex_image(self, in_memory): # decoding turned on with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10}, features=Features({"a": Image(), "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a", "foo"]) self.assertDictEqual(dset.features, Features({"a": Image(), "foo": Value("int64")})) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) # decoding turned on + nesting with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Image()}, "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.b", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.b", "foo"]) self.assertDictEqual(dset.features, Features({"a.b": Image(), "foo": Value("int64")})) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) # decoding turned off with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10}, features=Features({"a": Image(decode=False), "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.bytes", "a.path", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.bytes", "a.path", "foo"]) self.assertDictEqual( dset.features, Features({"a.bytes": Value("binary"), "a.path": Value("string"), "foo": Value("int64")}), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) # decoding turned off + nesting with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Image(decode=False)}, "foo": Value("int64")}), ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fingerprint = dset._fingerprint with dset.flatten() as dset: self.assertListEqual(sorted(dset.column_names), ["a.b.bytes", "a.b.path", "foo"]) self.assertListEqual(sorted(dset.features.keys()), ["a.b.bytes", "a.b.path", "foo"]) self.assertDictEqual( dset.features, Features( { "a.b.bytes": Value("binary"), "a.b.path": Value("string"), "foo": Value("int64"), } ), ) self.assertNotEqual(dset._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset) def test_map(self, in_memory): # standard with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) fingerprint = dset._fingerprint with dset.map( lambda x: {"name": x["filename"][:-2], "id": int(x["filename"].split("_")[-1])} ) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) self.assertListEqual(dset_test["id"], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) # no transform with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(lambda x: None) as dset_test: self.assertEqual(len(dset_test), 30) self.assertEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) # with indices with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map( lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True ) as dset_test_with_indices: self.assertEqual(len(dset_test_with_indices), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_with_indices.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) self.assertListEqual(dset_test_with_indices["id"], list(range(30))) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) # interrupted with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: def func(x, i): if i == 4: raise KeyboardInterrupt() return {"name": x["filename"][:-2], "id": i} tmp_file = os.path.join(tmp_dir, "test.arrow") self.assertRaises( KeyboardInterrupt, dset.map, function=func, with_indices=True, cache_file_name=tmp_file, writer_batch_size=2, ) self.assertFalse(os.path.exists(tmp_file)) with dset.map( lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True, cache_file_name=tmp_file, writer_batch_size=2, ) as dset_test_with_indices: self.assertTrue(os.path.exists(tmp_file)) self.assertEqual(len(dset_test_with_indices), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_with_indices.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) self.assertListEqual(dset_test_with_indices["id"], list(range(30))) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) # formatted with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format("numpy", columns=["col_1"]) with dset.map(lambda x: {"col_1_plus_one": x["col_1"] + 1}) as dset_test: self.assertEqual(len(dset_test), 4) self.assertEqual(dset_test.format["type"], "numpy") self.assertIsInstance(dset_test["col_1"], np.ndarray) self.assertIsInstance(dset_test["col_1_plus_one"], np.ndarray) self.assertListEqual(sorted(dset_test[0].keys()), ["col_1", "col_1_plus_one"]) self.assertListEqual(sorted(dset_test.column_names), ["col_1", "col_1_plus_one", "col_2", "col_3"]) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) def test_map_multiprocessing(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # standard with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) fingerprint = dset._fingerprint with dset.map(picklable_map_function, num_proc=2) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) if not in_memory: self.assertIn("_of_00002.arrow", dset_test.cache_files[0]["filename"]) self.assertListEqual(dset_test["id"], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # num_proc > num rows with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertDictEqual(dset.features, Features({"filename": Value("string")})) fingerprint = dset._fingerprint with dset.select([0, 1], keep_in_memory=True).map(picklable_map_function, num_proc=10) as dset_test: self.assertEqual(len(dset_test), 2) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) self.assertListEqual(dset_test["id"], list(range(2))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # with_indices with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(picklable_map_function_with_indices, num_proc=3, with_indices=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) self.assertListEqual(dset_test["id"], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # with_rank with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(picklable_map_function_with_rank, num_proc=3, with_rank=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "rank": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) self.assertListEqual(dset_test["rank"], [0] * 10 + [1] * 10 + [2] * 10) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # with_indices AND with_rank with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map( picklable_map_function_with_indices_and_rank, num_proc=3, with_indices=True, with_rank=True ) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64"), "rank": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) self.assertListEqual(dset_test["id"], list(range(30))) self.assertListEqual(dset_test["rank"], [0] * 10 + [1] * 10 + [2] * 10) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) with tempfile.TemporaryDirectory() as tmp_dir: # new_fingerprint new_fingerprint = "foobar" invalid_new_fingerprint = "foobar/hey" with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint self.assertRaises( ValueError, dset.map, picklable_map_function, num_proc=2, new_fingerprint=invalid_new_fingerprint ) with dset.map(picklable_map_function, num_proc=2, new_fingerprint=new_fingerprint) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) self.assertListEqual(dset_test["id"], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) self.assertEqual(dset_test._fingerprint, new_fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) file_names = sorted(Path(cache_file["filename"]).name for cache_file in dset_test.cache_files) for i, file_name in enumerate(file_names): self.assertIn(new_fingerprint + f"_{i:05d}", file_name) with tempfile.TemporaryDirectory() as tmp_dir: # lambda (requires multiprocess from pathos) with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.map(lambda x: {"id": int(x["filename"].split("_")[-1])}, num_proc=2) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "id": Value("int64")}), ) self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) self.assertListEqual(dset_test["id"], list(range(30))) self.assertNotEqual(dset_test._fingerprint, fingerprint) assert_arrow_metadata_are_synced_with_dataset_features(dset_test) def test_map_new_features(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: features = Features({"filename": Value("string"), "label": ClassLabel(names=["positive", "negative"])}) with dset.map( lambda x, i: {"label": i % 2}, with_indices=True, features=features ) as dset_test_with_indices: self.assertEqual(len(dset_test_with_indices), 30) self.assertDictEqual( dset_test_with_indices.features, features, ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) def test_map_batched(self, in_memory): def map_batched(example): return {"filename_new": [x + "_extension" for x in example["filename"]]} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(map_batched, batched=True) as dset_test_batched: self.assertEqual(len(dset_test_batched), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) # change batch size and drop the last batch with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: batch_size = 4 with dset.map( map_batched, batched=True, batch_size=batch_size, drop_last_batch=True ) as dset_test_batched: self.assertEqual(len(dset_test_batched), 30 // batch_size * batch_size) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.formatted_as("numpy", columns=["filename"]): with dset.map(map_batched, batched=True) as dset_test_batched: self.assertEqual(len(dset_test_batched), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) def map_batched_with_indices(example, idx): return {"filename_new": [x + "_extension_" + str(idx) for x in example["filename"]]} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map( map_batched_with_indices, batched=True, with_indices=True ) as dset_test_with_indices_batched: self.assertEqual(len(dset_test_with_indices_batched), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_with_indices_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices_batched) # check remove columns for even if the function modifies input in-place def map_batched_modifying_inputs_inplace(example): result = {"filename_new": [x + "_extension" for x in example["filename"]]} del example["filename"] return result with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map( map_batched_modifying_inputs_inplace, batched=True, remove_columns="filename" ) as dset_test_modifying_inputs_inplace: self.assertEqual(len(dset_test_modifying_inputs_inplace), 30) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual( dset_test_modifying_inputs_inplace.features, Features({"filename_new": Value("string")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset_test_modifying_inputs_inplace) def test_map_nested(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"field": ["a", "b"]}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(lambda example: {"otherfield": {"capital": example["field"].capitalize()}}) as dset: with dset.map(lambda example: {"otherfield": {"append_x": example["field"] + "x"}}) as dset: self.assertEqual(dset[0], {"field": "a", "otherfield": {"append_x": "ax"}}) def test_map_return_example_as_dict_value(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"en": ["aa", "bb"], "fr": ["cc", "dd"]}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(lambda example: {"translation": example}) as dset: self.assertEqual(dset[0], {"en": "aa", "fr": "cc", "translation": {"en": "aa", "fr": "cc"}}) def test_map_fn_kwargs(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"id": range(10)}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fn_kwargs = {"offset": 3} with dset.map( lambda example, offset: {"id+offset": example["id"] + offset}, fn_kwargs=fn_kwargs ) as mapped_dset: assert mapped_dset["id+offset"] == list(range(3, 13)) with dset.map( lambda id, offset: {"id+offset": id + offset}, fn_kwargs=fn_kwargs, input_columns="id" ) as mapped_dset: assert mapped_dset["id+offset"] == list(range(3, 13)) with dset.map( lambda id, i, offset: {"id+offset": i + offset}, fn_kwargs=fn_kwargs, input_columns="id", with_indices=True, ) as mapped_dset: assert mapped_dset["id+offset"] == list(range(3, 13)) def test_map_caching(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with patch( "datasets.arrow_dataset.Dataset._map_single", autospec=Dataset._map_single, side_effect=Dataset._map_single, ) as mock_map_single: with dset.map(lambda x: {"foo": "bar"}) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) self.assertEqual(mock_map_single.call_count, 1) with dset.map(lambda x: {"foo": "bar"}) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory)) self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory) self.assertEqual(mock_map_single.call_count, 2 if in_memory else 1) with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(lambda x: {"foo": "bar"}) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) with dset.map(lambda x: {"foo": "bar"}, load_from_cache_file=False) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory)) self.assertNotIn("Loading cached processed dataset", self._caplog.text) with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with patch( "datasets.arrow_dataset.Pool", new_callable=PickableMagicMock, side_effect=datasets.arrow_dataset.Pool, ) as mock_pool: with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) self.assertEqual(mock_pool.call_count, 1) with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertTrue( (len(re.findall("Loading cached processed dataset", self._caplog.text)) == 1) ^ in_memory ) self.assertEqual(mock_pool.call_count, 2 if in_memory else 1) with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1: dset_test1_data_files = list(dset_test1.cache_files) with dset.map(lambda x: {"foo": "bar"}, num_proc=2, load_from_cache_file=False) as dset_test2: self.assertEqual(dset_test1_data_files, dset_test2.cache_files) self.assertEqual(len(dset_test2.cache_files), (1 - int(in_memory)) * 2) self.assertNotIn("Loading cached processed dataset", self._caplog.text) if not in_memory: try: self._caplog.clear() with tempfile.TemporaryDirectory() as tmp_dir: with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: datasets.disable_caching() with dset.map(lambda x: {"foo": "bar"}) as dset_test1: with dset.map(lambda x: {"foo": "bar"}) as dset_test2: self.assertNotEqual(dset_test1.cache_files, dset_test2.cache_files) self.assertEqual(len(dset_test1.cache_files), 1) self.assertEqual(len(dset_test2.cache_files), 1) self.assertNotIn("Loading cached processed dataset", self._caplog.text) # make sure the arrow files are going to be removed self.assertIn( Path(tempfile.gettempdir()), Path(dset_test1.cache_files[0]["filename"]).parents, ) self.assertIn( Path(tempfile.gettempdir()), Path(dset_test2.cache_files[0]["filename"]).parents, ) finally: datasets.enable_caching() def test_map_return_pa_table(self, in_memory): def func_return_single_row_pa_table(x): return pa.table({"id": [0], "text": ["a"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pa_table) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Batched def func_return_single_row_pa_table_batched(x): batch_size = len(x[next(iter(x))]) return pa.table({"id": [0] * batch_size, "text": ["a"] * batch_size}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pa_table_batched, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Error when returning a table with more than one row in the non-batched mode def func_return_multi_row_pa_table(x): return pa.table({"id": [0, 1], "text": ["a", "b"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertRaises(ValueError, dset.map, func_return_multi_row_pa_table) # arrow formatted dataset def func_return_table_from_expression(t): import pyarrow.dataset as pds return pds.dataset(t).to_table( columns={"new_column": pds.field("")._call("ascii_capitalize", [pds.field("filename")])} ) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.with_format("arrow").map(func_return_table_from_expression, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"new_column": Value("string")}), ) self.assertEqual(dset_test.with_format(None)[0]["new_column"], dset[0]["filename"].capitalize()) def test_map_return_pd_dataframe(self, in_memory): def func_return_single_row_pd_dataframe(x): return pd.DataFrame({"id": [0], "text": ["a"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pd_dataframe) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Batched def func_return_single_row_pd_dataframe_batched(x): batch_size = len(x[next(iter(x))]) return pd.DataFrame({"id": [0] * batch_size, "text": ["a"] * batch_size}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pd_dataframe_batched, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Error when returning a table with more than one row in the non-batched mode def func_return_multi_row_pd_dataframe(x): return pd.DataFrame({"id": [0, 1], "text": ["a", "b"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertRaises(ValueError, dset.map, func_return_multi_row_pd_dataframe) @require_polars def test_map_return_pl_dataframe(self, in_memory): import polars as pl def func_return_single_row_pl_dataframe(x): return pl.DataFrame({"id": [0], "text": ["a"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pl_dataframe) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("large_string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Batched def func_return_single_row_pl_dataframe_batched(x): batch_size = len(x[next(iter(x))]) return pl.DataFrame({"id": [0] * batch_size, "text": ["a"] * batch_size}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func_return_single_row_pl_dataframe_batched, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"id": Value("int64"), "text": Value("large_string")}), ) self.assertEqual(dset_test[0]["id"], 0) self.assertEqual(dset_test[0]["text"], "a") # Error when returning a table with more than one row in the non-batched mode def func_return_multi_row_pl_dataframe(x): return pl.DataFrame({"id": [0, 1], "text": ["a", "b"]}) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertRaises(ValueError, dset.map, func_return_multi_row_pl_dataframe) @require_numpy1_on_windows @require_torch def test_map_torch(self, in_memory): import torch def func(example): return {"tensor": torch.tensor([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) @require_tf def test_map_tf(self, in_memory): import tensorflow as tf def func(example): return {"tensor": tf.constant([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) @require_jax def test_map_jax(self, in_memory): import jax.numpy as jnp def func(example): return {"tensor": jnp.asarray([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) def test_map_numpy(self, in_memory): def func(example): return {"tensor": np.array([1.0, 2, 3])} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float64"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) @require_numpy1_on_windows @require_torch def test_map_tensor_batched(self, in_memory): import torch def func(batch): return {"tensor": torch.tensor([[1.0, 2, 3]] * len(batch["filename"]))} with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(func, batched=True) as dset_test: self.assertEqual(len(dset_test), 30) self.assertDictEqual( dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), ) self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) def test_map_input_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.map(lambda col_1: {"label": col_1 % 2}, input_columns="col_1") as mapped_dset: self.assertEqual(mapped_dset[0].keys(), {"col_1", "col_2", "col_3", "label"}) self.assertEqual( mapped_dset.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), "label": Value("int64"), } ), ) def test_map_remove_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.map(lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True) as dset: self.assertTrue("id" in dset[0]) self.assertDictEqual( dset.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), ) assert_arrow_metadata_are_synced_with_dataset_features(dset) with dset.map(lambda x: x, remove_columns=["id"]) as mapped_dset: self.assertTrue("id" not in mapped_dset[0]) self.assertDictEqual( mapped_dset.features, Features({"filename": Value("string"), "name": Value("string")}) ) assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) with mapped_dset.with_format("numpy", columns=mapped_dset.column_names) as mapped_dset: with mapped_dset.map( lambda x: {"name": 1}, remove_columns=mapped_dset.column_names ) as mapped_dset: self.assertTrue("filename" not in mapped_dset[0]) self.assertTrue("name" in mapped_dset[0]) self.assertDictEqual(mapped_dset.features, Features({"name": Value(dtype="int64")})) assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) # empty dataset columns_names = dset.column_names with dset.select([]) as empty_dset: self.assertEqual(len(empty_dset), 0) with empty_dset.map(lambda x: {}, remove_columns=columns_names[0]) as mapped_dset: self.assertListEqual(columns_names[1:], mapped_dset.column_names) assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) def test_map_stateful_callable(self, in_memory): # be sure that the state of the map callable is unaffected # before processing the dataset examples class ExampleCounter: def __init__(self, batched=False): self.batched = batched # state self.cnt = 0 def __call__(self, example): if self.batched: self.cnt += len(example) else: self.cnt += 1 with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: ex_cnt = ExampleCounter() dset.map(ex_cnt) self.assertEqual(ex_cnt.cnt, len(dset)) ex_cnt = ExampleCounter(batched=True) dset.map(ex_cnt) self.assertEqual(ex_cnt.cnt, len(dset)) @require_not_windows def test_map_crash_subprocess(self, in_memory): # be sure that a crash in one of the subprocess will not # hang dataset.map() call forever def do_crash(row): import os os.kill(os.getpid(), 9) return row with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with pytest.raises(RuntimeError) as excinfo: dset.map(do_crash, num_proc=2) assert str(excinfo.value) == ( "One of the subprocesses has abruptly died during map operation." "To debug the error, disable multiprocessing." ) def test_filter(self, in_memory): # keep only first five examples with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five: self.assertEqual(len(dset_filter_first_five), 5) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_first_five.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_filter_first_five._fingerprint, fingerprint) # filter filenames with even id at the end + formatted with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: dset.set_format("numpy") fingerprint = dset._fingerprint with dset.filter(lambda x: (int(x["filename"][-1]) % 2 == 0)) as dset_filter_even_num: self.assertEqual(len(dset_filter_even_num), 15) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_even_num.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_filter_even_num._fingerprint, fingerprint) self.assertEqual(dset_filter_even_num.format["type"], "numpy") def test_filter_with_indices_mapping(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: dset = Dataset.from_dict({"col": [0, 1, 2]}) with self._to(in_memory, tmp_dir, dset) as dset: with dset.filter(lambda x: x["col"] > 0) as dset: self.assertListEqual(dset["col"], [1, 2]) with dset.filter(lambda x: x["col"] < 2) as dset: self.assertListEqual(dset["col"], [1]) def test_filter_empty(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertIsNone(dset._indices, None) tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 0) self.assertIsNotNone(dset._indices, None) tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") with dset.filter(lambda _: False, cache_file_name=tmp_file_2) as dset2: self.assertEqual(len(dset2), 0) self.assertEqual(dset._indices, dset2._indices) def test_filter_batched(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: dset = Dataset.from_dict({"col": [0, 1, 2]}) with self._to(in_memory, tmp_dir, dset) as dset: with dset.filter(lambda x: [i > 0 for i in x["col"]], batched=True) as dset: self.assertListEqual(dset["col"], [1, 2]) with dset.filter(lambda x: [i < 2 for i in x["col"]], batched=True) as dset: self.assertListEqual(dset["col"], [1]) def test_filter_input_columns(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: dset = Dataset.from_dict({"col_1": [0, 1, 2], "col_2": ["a", "b", "c"]}) with self._to(in_memory, tmp_dir, dset) as dset: with dset.filter(lambda x: x > 0, input_columns=["col_1"]) as filtered_dset: self.assertListEqual(filtered_dset.column_names, dset.column_names) self.assertListEqual(filtered_dset["col_1"], [1, 2]) self.assertListEqual(filtered_dset["col_2"], ["b", "c"]) def test_filter_fn_kwargs(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict({"id": range(10)}) as dset: with self._to(in_memory, tmp_dir, dset) as dset: fn_kwargs = {"max_offset": 3} with dset.filter( lambda example, max_offset: example["id"] < max_offset, fn_kwargs=fn_kwargs ) as filtered_dset: assert len(filtered_dset) == 3 with dset.filter( lambda id, max_offset: id < max_offset, fn_kwargs=fn_kwargs, input_columns="id" ) as filtered_dset: assert len(filtered_dset) == 3 with dset.filter( lambda id, i, max_offset: i < max_offset, fn_kwargs=fn_kwargs, input_columns="id", with_indices=True, ) as filtered_dset: assert len(filtered_dset) == 3 def test_filter_multiprocessing(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.filter(picklable_filter_function, num_proc=2) as dset_filter_first_ten: self.assertEqual(len(dset_filter_first_ten), 10) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_first_ten.features, Features({"filename": Value("string")})) self.assertEqual(len(dset_filter_first_ten.cache_files), 0 if in_memory else 2) self.assertNotEqual(dset_filter_first_ten._fingerprint, fingerprint) with tempfile.TemporaryDirectory() as tmp_dir: # with_rank with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint with dset.filter( picklable_filter_function_with_rank, num_proc=2, with_rank=True ) as dset_filter_first_rank: self.assertEqual(len(dset_filter_first_rank), min(len(dset) // 2, len(dset))) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_filter_first_rank.features, Features({"filename": Value("string")})) self.assertEqual(len(dset_filter_first_rank.cache_files), 0 if in_memory else 2) self.assertNotEqual(dset_filter_first_rank._fingerprint, fingerprint) def test_filter_caching(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: self._caplog.clear() with self._caplog.at_level(INFO, logger=get_logger().name): with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five1: dset_test1_data_files = list(dset_filter_first_five1.cache_files) with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five2: self.assertEqual(dset_test1_data_files, dset_filter_first_five2.cache_files) self.assertEqual(len(dset_filter_first_five2.cache_files), 0 if in_memory else 2) self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory) def test_keep_features_after_transform_specified(self, in_memory): features = Features( { "tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels, features=features) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) def test_keep_features_after_transform_unspecified(self, in_memory): features = Features( { "tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) def test_keep_features_after_transform_to_file(self, in_memory): features = Features( { "tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: tmp_file = os.path.join(tmp_dir, "test.arrow") dset.map(invert_labels, cache_file_name=tmp_file) with Dataset.from_file(tmp_file) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) def test_keep_features_after_transform_to_memory(self, in_memory): features = Features( { "tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels, keep_in_memory=True) as inverted_dset: self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) def test_keep_features_after_loading_from_cache(self, in_memory): features = Features( { "tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]]} with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: tmp_file1 = os.path.join(tmp_dir, "test1.arrow") tmp_file2 = os.path.join(tmp_dir, "test2.arrow") # TODO: Why mapped twice? inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file1) inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file2) self.assertGreater(len(inverted_dset.cache_files), 0) self.assertEqual(inverted_dset.features.type, features.type) self.assertDictEqual(inverted_dset.features, features) del inverted_dset def test_keep_features_with_new_features(self, in_memory): features = Features( { "tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"])), } ) def invert_labels(x): return {"labels": [(1 - label) for label in x["labels"]], "labels2": x["labels"]} expected_features = Features( { "tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"])), "labels2": Sequence(Value("int64")), } ) with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict( {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features ) as dset: with self._to(in_memory, tmp_dir, dset) as dset: with dset.map(invert_labels) as inverted_dset: self.assertEqual(inverted_dset.features.type, expected_features.type) self.assertDictEqual(inverted_dset.features, expected_features) assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) def test_select(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: # select every two example indices = list(range(0, len(dset), 2)) tmp_file = os.path.join(tmp_dir, "test.arrow") fingerprint = dset._fingerprint with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_even: self.assertIsNotNone(dset_select_even._indices) # an indices mapping is created self.assertTrue(os.path.exists(tmp_file)) self.assertEqual(len(dset_select_even), 15) for row in dset_select_even: self.assertEqual(int(row["filename"][-1]) % 2, 0) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_even.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_select_even._fingerprint, fingerprint) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: indices = list(range(0, len(dset))) with dset.select(indices) as dset_select_all: # no indices mapping, since the indices are contiguous # (in this case the arrow table is simply sliced, which is more efficient) self.assertIsNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) self.assertListEqual(list(dset_select_all), list(dset)) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_select_all._fingerprint, fingerprint) indices = range(0, len(dset)) with dset.select(indices) as dset_select_all: # same but with range self.assertIsNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) self.assertListEqual(list(dset_select_all), list(dset)) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_select_all._fingerprint, fingerprint) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: bad_indices = list(range(5)) bad_indices[-1] = len(dset) + 10 # out of bounds tmp_file = os.path.join(tmp_dir, "test.arrow") self.assertRaises( Exception, dset.select, indices=bad_indices, indices_cache_file_name=tmp_file, writer_batch_size=2, ) self.assertFalse(os.path.exists(tmp_file)) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: indices = iter(range(len(dset))) # iterator of contiguous indices with dset.select(indices) as dset_select_all: # no indices mapping, since the indices are contiguous self.assertIsNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) indices = reversed(range(len(dset))) # iterator of not contiguous indices tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_all: # new indices mapping, since the indices are not contiguous self.assertIsNotNone(dset_select_all._indices) self.assertEqual(len(dset_select_all), len(dset)) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: bad_indices = list(range(5)) bad_indices[3] = "foo" # wrong type tmp_file = os.path.join(tmp_dir, "test.arrow") self.assertRaises( Exception, dset.select, indices=bad_indices, indices_cache_file_name=tmp_file, writer_batch_size=2, ) self.assertFalse(os.path.exists(tmp_file)) dset.set_format("numpy") with dset.select( range(5), indices_cache_file_name=tmp_file, writer_batch_size=2, ) as dset_select_five: self.assertIsNone(dset_select_five._indices) self.assertEqual(len(dset_select_five), 5) self.assertEqual(dset_select_five.format["type"], "numpy") for i, row in enumerate(dset_select_five): self.assertEqual(int(row["filename"][-1]), i) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_select_five.features, Features({"filename": Value("string")})) def test_select_then_map(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.select([0]) as d1: with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1: self.assertEqual(d1[0]["id"], 0) with dset.select([1]) as d2: with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2: self.assertEqual(d2[0]["id"], 1) with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: with dset.select([0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")) as d1: with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1: self.assertEqual(d1[0]["id"], 0) with dset.select([1], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")) as d2: with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2: self.assertEqual(d2[0]["id"], 1) def test_pickle_after_many_transforms_on_disk(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertEqual(len(dset.cache_files), 0 if in_memory else 1) with dset.rename_column("filename", "file") as dset: self.assertListEqual(dset.column_names, ["file"]) with dset.select(range(5)) as dset: self.assertEqual(len(dset), 5) with dset.map(lambda x: {"id": int(x["file"][-1])}) as dset: self.assertListEqual(sorted(dset.column_names), ["file", "id"]) with dset.rename_column("id", "number") as dset: self.assertListEqual(sorted(dset.column_names), ["file", "number"]) with dset.select([1, 0]) as dset: self.assertEqual(dset[0]["file"], "my_name-train_1") self.assertEqual(dset[0]["number"], 1) self.assertEqual(dset._indices["indices"].to_pylist(), [1, 0]) if not in_memory: self.assertIn( ("rename_columns", (["file", "number"],), {}), dset._data.replays, ) if not in_memory: dset._data.table = Unpicklable() # check that we don't pickle the entire table pickled = pickle.dumps(dset) with pickle.loads(pickled) as loaded: self.assertEqual(loaded[0]["file"], "my_name-train_1") self.assertEqual(loaded[0]["number"], 1) def test_shuffle(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: tmp_file = os.path.join(tmp_dir, "test.arrow") fingerprint = dset._fingerprint with dset.shuffle(seed=1234, keep_in_memory=True) as dset_shuffled: self.assertEqual(len(dset_shuffled), 30) self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28") self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_shuffled._fingerprint, fingerprint) with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled: self.assertEqual(len(dset_shuffled), 30) self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28") self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_shuffled._fingerprint, fingerprint) # Reproducibility tmp_file = os.path.join(tmp_dir, "test_2.arrow") with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled_2: self.assertListEqual(dset_shuffled["filename"], dset_shuffled_2["filename"]) # Compatible with temp_seed with temp_seed(42), dset.shuffle() as d1: with temp_seed(42), dset.shuffle() as d2, dset.shuffle() as d3: self.assertListEqual(d1["filename"], d2["filename"]) self.assertEqual(d1._fingerprint, d2._fingerprint) self.assertNotEqual(d3["filename"], d2["filename"]) self.assertNotEqual(d3._fingerprint, d2._fingerprint) def test_sort(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Sort on a single key with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir) as dset: # Keep only 10 examples tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(range(10), indices_cache_file_name=tmp_file) as dset: tmp_file = os.path.join(tmp_dir, "test_2.arrow") with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 10) self.assertEqual(dset[0]["filename"], "my_name-train_8") self.assertEqual(dset[1]["filename"], "my_name-train_9") # Sort tmp_file = os.path.join(tmp_dir, "test_3.arrow") fingerprint = dset._fingerprint with dset.sort("filename", indices_cache_file_name=tmp_file) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(int(row["filename"][-1]), i) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # Sort reversed tmp_file = os.path.join(tmp_dir, "test_4.arrow") fingerprint = dset._fingerprint with dset.sort("filename", indices_cache_file_name=tmp_file, reverse=True) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(int(row["filename"][-1]), len(dset_sorted) - 1 - i) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # formatted dset.set_format("numpy") with dset.sort("filename") as dset_sorted_formatted: self.assertEqual(dset_sorted_formatted.format["type"], "numpy") # Sort on multiple keys with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True) as dset: tmp_file = os.path.join(tmp_dir, "test_5.arrow") fingerprint = dset._fingerprint # Throw error when reverse is a list of bools that does not match the length of column_names with pytest.raises(ValueError): dset.sort(["col_1", "col_2", "col_3"], reverse=[False]) with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset: # Sort with dset.sort(["col_1", "col_2", "col_3"], reverse=[False, True, False]) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(row["col_1"], i) self.assertDictEqual( dset.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertDictEqual( dset_sorted.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # Sort reversed with dset.sort(["col_1", "col_2", "col_3"], reverse=[True, False, True]) as dset_sorted: for i, row in enumerate(dset_sorted): self.assertEqual(row["col_1"], len(dset_sorted) - 1 - i) self.assertDictEqual( dset.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertDictEqual( dset_sorted.features, Features( { "col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool"), } ), ) self.assertNotEqual(dset_sorted._fingerprint, fingerprint) # formatted dset.set_format("numpy") with dset.sort( ["col_1", "col_2", "col_3"], reverse=[False, True, False] ) as dset_sorted_formatted: self.assertEqual(dset_sorted_formatted.format["type"], "numpy") def test_to_csv(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # File path argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.csv") bytes_written = dset.to_csv(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) # File buffer argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_buffer.csv") with open(file_path, "wb+") as buffer: bytes_written = dset.to_csv(path_or_buf=buffer) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) # After a select/shuffle transform with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset = dset.select(range(0, len(dset), 2)).shuffle() file_path = os.path.join(tmp_dir, "test_path.csv") bytes_written = dset.to_csv(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) # With array features with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: file_path = os.path.join(tmp_dir, "test_path.csv") bytes_written = dset.to_csv(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) self.assertEqual(bytes_written, os.path.getsize(file_path)) csv_dset = pd.read_csv(file_path) self.assertEqual(csv_dset.shape, dset.shape) self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) def test_to_dict(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: # Full dset_to_dict = dset.to_dict() self.assertIsInstance(dset_to_dict, dict) self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names)) for col_name in dset.column_names: self.assertLessEqual(len(dset_to_dict[col_name]), len(dset)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_dict = dset.to_dict() self.assertIsInstance(dset_to_dict, dict) self.assertEqual(len(dset_to_dict), 3) self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names)) for col_name in dset.column_names: self.assertIsInstance(dset_to_dict[col_name], list) self.assertEqual(len(dset_to_dict[col_name]), len(dset)) def test_to_list(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset_to_list = dset.to_list() self.assertIsInstance(dset_to_list, list) for row in dset_to_list: self.assertIsInstance(row, dict) self.assertListEqual(sorted(row.keys()), sorted(dset.column_names)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_list = dset.to_list() self.assertIsInstance(dset_to_list, list) self.assertEqual(len(dset_to_list), 3) for row in dset_to_list: self.assertIsInstance(row, dict) self.assertListEqual(sorted(row.keys()), sorted(dset.column_names)) def test_to_pandas(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Batched with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: batch_size = dset.num_rows - 1 to_pandas_generator = dset.to_pandas(batched=True, batch_size=batch_size) for batch in to_pandas_generator: self.assertIsInstance(batch, pd.DataFrame) self.assertListEqual(sorted(batch.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertLessEqual(len(batch[col_name]), batch_size) # Full dset_to_pandas = dset.to_pandas() self.assertIsInstance(dset_to_pandas, pd.DataFrame) self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_pandas[col_name]), len(dset)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_pandas = dset.to_pandas() self.assertIsInstance(dset_to_pandas, pd.DataFrame) self.assertEqual(len(dset_to_pandas), 3) self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_pandas[col_name]), dset.num_rows) @require_polars def test_to_polars(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Batched with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: batch_size = dset.num_rows - 1 to_polars_generator = dset.to_polars(batched=True, batch_size=batch_size) for batch in to_polars_generator: self.assertIsInstance(batch, sys.modules["polars"].DataFrame) self.assertListEqual(sorted(batch.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertLessEqual(len(batch[col_name]), batch_size) del batch # Full dset_to_polars = dset.to_polars() self.assertIsInstance(dset_to_polars, sys.modules["polars"].DataFrame) self.assertListEqual(sorted(dset_to_polars.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_polars[col_name]), len(dset)) # With index mapping with dset.select([1, 0, 3]) as dset: dset_to_polars = dset.to_polars() self.assertIsInstance(dset_to_polars, sys.modules["polars"].DataFrame) self.assertEqual(len(dset_to_polars), 3) self.assertListEqual(sorted(dset_to_polars.columns), sorted(dset.column_names)) for col_name in dset.column_names: self.assertEqual(len(dset_to_polars[col_name]), dset.num_rows) def test_to_parquet(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # File path argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.parquet") dset.to_parquet(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) # File buffer argument with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_buffer.parquet") with open(file_path, "wb+") as buffer: dset.to_parquet(path_or_buf=buffer) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) # After a select/shuffle transform with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset = dset.select(range(0, len(dset), 2)).shuffle() file_path = os.path.join(tmp_dir, "test_path.parquet") dset.to_parquet(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) # With array features with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: file_path = os.path.join(tmp_dir, "test_path.parquet") dset.to_parquet(path_or_buf=file_path) self.assertTrue(os.path.isfile(file_path)) # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match parquet_dset = pd.read_parquet(file_path) self.assertEqual(parquet_dset.shape, dset.shape) self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) @require_sqlalchemy def test_to_sql(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: # Destionation specified as database URI string with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path) self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # Destionation specified as sqlite3 connection with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: import sqlite3 file_path = os.path.join(tmp_dir, "test_path.sqlite") with contextlib.closing(sqlite3.connect(file_path)) as con: _ = dset.to_sql("data", con, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # Test writing to a database in chunks with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path, batch_size=1, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # After a select/shuffle transform with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset = dset.select(range(0, len(dset), 2)).shuffle() file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) # With array features with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: file_path = os.path.join(tmp_dir, "test_path.sqlite") _ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace") self.assertTrue(os.path.isfile(file_path)) sql_dset = pd.read_sql("data", "sqlite:///" + file_path) self.assertEqual(sql_dset.shape, dset.shape) self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) def test_train_test_split(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: fingerprint = dset._fingerprint dset_dict = dset.train_test_split(test_size=10, shuffle=False) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 20) self.assertEqual(len(dset_test), 10) self.assertEqual(dset_train[0]["filename"], "my_name-train_0") self.assertEqual(dset_train[-1]["filename"], "my_name-train_19") self.assertEqual(dset_test[0]["filename"], "my_name-train_20") self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_train._fingerprint, fingerprint) self.assertNotEqual(dset_test._fingerprint, fingerprint) self.assertNotEqual(dset_train._fingerprint, dset_test._fingerprint) dset_dict = dset.train_test_split(test_size=0.5, shuffle=False) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 15) self.assertEqual(len(dset_test), 15) self.assertEqual(dset_train[0]["filename"], "my_name-train_0") self.assertEqual(dset_train[-1]["filename"], "my_name-train_14") self.assertEqual(dset_test[0]["filename"], "my_name-train_15") self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) dset_dict = dset.train_test_split(train_size=10, shuffle=False) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 10) self.assertEqual(len(dset_test), 20) self.assertEqual(dset_train[0]["filename"], "my_name-train_0") self.assertEqual(dset_train[-1]["filename"], "my_name-train_9") self.assertEqual(dset_test[0]["filename"], "my_name-train_10") self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) dset.set_format("numpy") dset_dict = dset.train_test_split(train_size=10, seed=42) self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) dset_train = dset_dict["train"] dset_test = dset_dict["test"] self.assertEqual(len(dset_train), 10) self.assertEqual(len(dset_test), 20) self.assertEqual(dset_train.format["type"], "numpy") self.assertEqual(dset_test.format["type"], "numpy") self.assertNotEqual(dset_train[0]["filename"].item(), "my_name-train_0") self.assertNotEqual(dset_train[-1]["filename"].item(), "my_name-train_9") self.assertNotEqual(dset_test[0]["filename"].item(), "my_name-train_10") self.assertNotEqual(dset_test[-1]["filename"].item(), "my_name-train_29") self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) del dset_test, dset_train, dset_dict # DatasetDict def test_shard(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset: tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(range(10), indices_cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 10) # Shard tmp_file_1 = os.path.join(tmp_dir, "test_1.arrow") fingerprint = dset._fingerprint with dset.shard(num_shards=8, index=1, indices_cache_file_name=tmp_file_1) as dset_sharded: self.assertEqual(2, len(dset_sharded)) self.assertEqual(["my_name-train_1", "my_name-train_9"], dset_sharded["filename"]) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sharded.features, Features({"filename": Value("string")})) self.assertNotEqual(dset_sharded._fingerprint, fingerprint) # Shard contiguous tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") with dset.shard( num_shards=3, index=0, contiguous=True, indices_cache_file_name=tmp_file_2 ) as dset_sharded_contiguous: self.assertEqual([f"my_name-train_{i}" for i in (0, 1, 2, 3)], dset_sharded_contiguous["filename"]) self.assertDictEqual(dset.features, Features({"filename": Value("string")})) self.assertDictEqual(dset_sharded_contiguous.features, Features({"filename": Value("string")})) # Test lengths of sharded contiguous self.assertEqual( [4, 3, 3], [ len(dset.shard(3, index=i, contiguous=True, indices_cache_file_name=tmp_file_2 + str(i))) for i in range(3) ], ) # formatted dset.set_format("numpy") with dset.shard(num_shards=3, index=0) as dset_sharded_formatted: self.assertEqual(dset_sharded_formatted.format["type"], "numpy") def test_flatten_indices(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertIsNone(dset._indices) tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.select(range(0, 10, 2), indices_cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 5) self.assertIsNotNone(dset._indices) tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") fingerprint = dset._fingerprint dset.set_format("numpy") with dset.flatten_indices(cache_file_name=tmp_file_2) as dset: self.assertEqual(len(dset), 5) self.assertEqual(len(dset.data), len(dset)) self.assertIsNone(dset._indices) self.assertNotEqual(dset._fingerprint, fingerprint) self.assertEqual(dset.format["type"], "numpy") # Test unique works dset.unique(dset.column_names[0]) assert_arrow_metadata_are_synced_with_dataset_features(dset) # Empty indices mapping with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir) as dset: self.assertIsNone(dset._indices, None) tmp_file = os.path.join(tmp_dir, "test.arrow") with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset: self.assertEqual(len(dset), 0) self.assertIsNotNone(dset._indices, None) tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") fingerprint = dset._fingerprint dset.set_format("numpy") with dset.flatten_indices(cache_file_name=tmp_file_2) as dset: self.assertEqual(len(dset), 0) self.assertEqual(len(dset.data), len(dset)) self.assertIsNone(dset._indices, None) self.assertNotEqual(dset._fingerprint, fingerprint) self.assertEqual(dset.format["type"], "numpy") # Test unique works dset.unique(dset.column_names[0]) assert_arrow_metadata_are_synced_with_dataset_features(dset) @require_tf @require_torch def test_format_vectors(self, in_memory): import numpy as np import tensorflow as tf import torch with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset( in_memory, tmp_dir ) as dset, dset.map(lambda ex, i: {"vec": np.ones(3) * i}, with_indices=True) as dset: columns = dset.column_names self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], (str, list)) self.assertIsInstance(dset[:2][col], list) self.assertDictEqual( dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))}) ) dset.set_format("tensorflow") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor)) self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor)) self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor)) self.assertTupleEqual(tuple(dset[:2]["vec"].shape), (2, 3)) self.assertTupleEqual(tuple(dset["vec"][:2].shape), (2, 3)) dset.set_format("numpy") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[0]["filename"], np.str_) self.assertIsInstance(dset[:2]["filename"], np.ndarray) self.assertIsInstance(dset["filename"], np.ndarray) self.assertIsInstance(dset[0]["vec"], np.ndarray) self.assertIsInstance(dset[:2]["vec"], np.ndarray) self.assertIsInstance(dset["vec"], np.ndarray) self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3)) self.assertTupleEqual(dset["vec"][:2].shape, (2, 3)) dset.set_format("torch", columns=["vec"]) self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) # torch.Tensor is only for numerical columns self.assertIsInstance(dset[0]["vec"], torch.Tensor) self.assertIsInstance(dset[:2]["vec"], torch.Tensor) self.assertIsInstance(dset["vec"][:2], torch.Tensor) self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3)) self.assertTupleEqual(dset["vec"][:2].shape, (2, 3)) @require_tf @require_torch def test_format_ragged_vectors(self, in_memory): import numpy as np import tensorflow as tf import torch with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset( in_memory, tmp_dir ) as dset, dset.map(lambda ex, i: {"vec": np.ones(3 + i) * i}, with_indices=True) as dset: columns = dset.column_names self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], (str, list)) self.assertIsInstance(dset[:2][col], list) self.assertDictEqual( dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))}) ) dset.set_format("tensorflow") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) for col in columns: self.assertIsInstance(dset[0][col], tf.Tensor) self.assertIsInstance(dset[:2][col], tf.RaggedTensor if col == "vec" else tf.Tensor) self.assertIsInstance(dset[col], tf.RaggedTensor if col == "vec" else tf.Tensor) # dim is None for ragged vectors in tensorflow self.assertListEqual(dset[:2]["vec"].shape.as_list(), [2, None]) self.assertListEqual(dset["vec"][:2].shape.as_list(), [2, None]) dset.set_format("numpy") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[0]["filename"], np.str_) self.assertIsInstance(dset[:2]["filename"], np.ndarray) self.assertIsInstance(dset["filename"], np.ndarray) self.assertIsInstance(dset[0]["vec"], np.ndarray) self.assertIsInstance(dset[:2]["vec"], np.ndarray) self.assertIsInstance(dset["vec"], np.ndarray) # array is flat for ragged vectors in numpy self.assertTupleEqual(dset[:2]["vec"].shape, (2,)) self.assertTupleEqual(dset["vec"][:2].shape, (2,)) dset.set_format("torch") self.assertIsNotNone(dset[0]) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[0]["filename"], str) self.assertIsInstance(dset[:2]["filename"], list) self.assertIsInstance(dset["filename"], list) self.assertIsInstance(dset[0]["vec"], torch.Tensor) self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor) self.assertIsInstance(dset["vec"][0], torch.Tensor) # pytorch doesn't support ragged tensors, so we should have lists self.assertIsInstance(dset[:2]["vec"], list) self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor) self.assertIsInstance(dset["vec"][:2], list) self.assertIsInstance(dset["vec"][0], torch.Tensor) @require_tf @require_torch def test_format_nested(self, in_memory): import numpy as np import tensorflow as tf import torch with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset( in_memory, tmp_dir ) as dset, dset.map(lambda ex: {"nested": [{"foo": np.ones(3)}] * len(ex["filename"])}, batched=True) as dset: self.assertDictEqual( dset.features, Features({"filename": Value("string"), "nested": {"foo": Sequence(Value("float64"))}}) ) dset.set_format("tensorflow") self.assertIsNotNone(dset[0]) self.assertIsInstance(dset[0]["nested"]["foo"], (tf.Tensor, tf.RaggedTensor)) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[:2]["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor)) self.assertIsInstance(dset["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor)) dset.set_format("numpy") self.assertIsNotNone(dset[0]) self.assertIsInstance(dset[0]["nested"]["foo"], np.ndarray) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[:2]["nested"][0]["foo"], np.ndarray) self.assertIsInstance(dset["nested"][0]["foo"], np.ndarray) dset.set_format("torch", columns="nested") self.assertIsNotNone(dset[0]) self.assertIsInstance(dset[0]["nested"]["foo"], torch.Tensor) self.assertIsNotNone(dset[:2]) self.assertIsInstance(dset[:2]["nested"][0]["foo"], torch.Tensor) self.assertIsInstance(dset["nested"][0]["foo"], torch.Tensor) def test_format_pandas(self, in_memory): import pandas as pd with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format("pandas") self.assertIsInstance(dset[0], pd.DataFrame) self.assertIsInstance(dset[:2], pd.DataFrame) self.assertIsInstance(dset["col_1"], pd.Series) @require_polars def test_format_polars(self, in_memory): import polars as pl with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: dset.set_format("polars") self.assertIsInstance(dset[0], pl.DataFrame) self.assertIsInstance(dset[:2], pl.DataFrame) self.assertIsInstance(dset["col_1"], pl.Series) def test_transmit_format_single(self, in_memory): @transmit_format def my_single_transform(self, return_factory, *args, **kwargs): return return_factory() with tempfile.TemporaryDirectory() as tmp_dir: return_factory = partial( self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True ) with return_factory() as dset: dset.set_format("numpy", columns=["col_1"]) prev_format = dset.format with my_single_transform(dset, return_factory) as transformed_dset: self.assertDictEqual(transformed_dset.format, prev_format) def test_transmit_format_dict(self, in_memory): @transmit_format def my_split_transform(self, return_factory, *args, **kwargs): return DatasetDict({"train": return_factory()}) with tempfile.TemporaryDirectory() as tmp_dir: return_factory = partial( self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True ) with return_factory() as dset: dset.set_format("numpy", columns=["col_1"]) prev_format = dset.format transformed_dset = my_split_transform(dset, return_factory)["train"] self.assertDictEqual(transformed_dset.format, prev_format) del transformed_dset # DatasetDict def test_with_format(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: with dset.with_format("numpy", columns=["col_1"]) as dset2: dset.set_format("numpy", columns=["col_1"]) self.assertDictEqual(dset.format, dset2.format) self.assertEqual(dset._fingerprint, dset2._fingerprint) # dset.reset_format() # self.assertNotEqual(dset.format, dset2.format) # self.assertNotEqual(dset._fingerprint, dset2._fingerprint) def test_with_transform(self, in_memory): with tempfile.TemporaryDirectory() as tmp_dir: with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: transform = lambda x: {"foo": x["col_1"]} # noqa: E731 with dset.with_transform(transform, columns=["col_1"]) as dset2: dset.set_transform(transform, columns=["col_1"]) self.assertDictEqual(dset.format, dset2.format) self.assertEqual(dset._fingerprint, dset2._fingerprint) dset.reset_format() self.assertNotEqual(dset.format, dset2.format) self.assertNotEqual(dset._fingerprint, dset2._fingerprint) @require_tf def test_tf_dataset_conversion(self, in_memory): tmp_dir = tempfile.TemporaryDirectory() for num_workers in [0, 1, 2]: if num_workers > 0 and sys.platform == "win32" and not in_memory: continue # This test hangs on the Py3.10 test worker, but it runs fine locally on my Windows machine with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2, 4]) self.assertEqual(batch.dtype.name, "int64") with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2]) self.assertEqual(batch.dtype.name, "int64") with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: # Check that it works with all default options (except batch_size because the dummy dataset only has 4) tf_dataset = dset.to_tf_dataset(batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch["col_1"].shape.as_list(), [2]) self.assertEqual(batch["col_2"].shape.as_list(), [2]) self.assertEqual(batch["col_1"].dtype.name, "int64") self.assertEqual(batch["col_2"].dtype.name, "string") # Assert that we're converting strings properly with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: # Check that when we use a transform that creates a new column from existing column values # but don't load the old columns that the new column depends on in the final dataset, # that they're still kept around long enough to be used in the transform transform_dset = dset.with_transform( lambda x: {"new_col": [val * 2 for val in x["col_1"]], "col_1": x["col_1"]} ) tf_dataset = transform_dset.to_tf_dataset(columns="new_col", batch_size=2, num_workers=num_workers) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2]) self.assertEqual(batch.dtype.name, "int64") del transform_dset del tf_dataset # For correct cleanup @require_tf def test_tf_index_reshuffling(self, in_memory): # This test checks that when we do two epochs over a tf.data.Dataset from to_tf_dataset # that we get a different shuffle order each time # It also checks that when we aren't shuffling, that the dataset order is fully preserved # even when loading is split across multiple workers data = {"col_1": list(range(20))} for num_workers in [0, 1, 2, 3]: with Dataset.from_dict(data) as dset: tf_dataset = dset.to_tf_dataset(batch_size=10, shuffle=True, num_workers=num_workers) indices = [] for batch in tf_dataset: indices.append(batch["col_1"]) indices = np.concatenate([arr.numpy() for arr in indices]) second_indices = [] for batch in tf_dataset: second_indices.append(batch["col_1"]) second_indices = np.concatenate([arr.numpy() for arr in second_indices]) self.assertFalse(np.array_equal(indices, second_indices)) self.assertEqual(len(indices), len(np.unique(indices))) self.assertEqual(len(second_indices), len(np.unique(second_indices))) tf_dataset = dset.to_tf_dataset(batch_size=1, shuffle=False, num_workers=num_workers) for i, batch in enumerate(tf_dataset): # Assert that the unshuffled order is fully preserved even when multiprocessing self.assertEqual(i, batch["col_1"].numpy()) @require_tf def test_tf_label_renaming(self, in_memory): # Protect TF-specific imports in here import tensorflow as tf from datasets.utils.tf_utils import minimal_tf_collate_fn_with_renaming tmp_dir = tempfile.TemporaryDirectory() with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: with dset.rename_columns({"col_1": "features", "col_2": "label"}) as new_dset: tf_dataset = new_dset.to_tf_dataset(collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4) batch = next(iter(tf_dataset)) self.assertTrue("labels" in batch and "features" in batch) tf_dataset = new_dset.to_tf_dataset( columns=["features", "labels"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4 ) batch = next(iter(tf_dataset)) self.assertTrue("labels" in batch and "features" in batch) tf_dataset = new_dset.to_tf_dataset( columns=["features", "label"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4 ) batch = next(iter(tf_dataset)) self.assertTrue("labels" in batch and "features" in batch) # Assert renaming was handled correctly tf_dataset = new_dset.to_tf_dataset( columns=["features"], label_cols=["labels"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4, ) batch = next(iter(tf_dataset)) self.assertEqual(len(batch), 2) # Assert that we don't have any empty entries here self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor)) tf_dataset = new_dset.to_tf_dataset( columns=["features"], label_cols=["label"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4, ) batch = next(iter(tf_dataset)) self.assertEqual(len(batch), 2) # Assert that we don't have any empty entries here self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor)) tf_dataset = new_dset.to_tf_dataset( columns=["features"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4, ) batch = next(iter(tf_dataset)) # Assert that labels didn't creep in when we don't ask for them # just because the collate_fn added them self.assertTrue(isinstance(batch, tf.Tensor)) del tf_dataset # For correct cleanup @require_tf def test_tf_dataset_options(self, in_memory): tmp_dir = tempfile.TemporaryDirectory() # Test that batch_size option works as expected with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2) batch = next(iter(tf_dataset)) self.assertEqual(batch.shape.as_list(), [2, 4]) self.assertEqual(batch.dtype.name, "int64") # Test that batch_size=None (optional) works as expected with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=None) single_example = next(iter(tf_dataset)) self.assertEqual(single_example.shape.as_list(), []) self.assertEqual(single_example.dtype.name, "int64") # Assert that we can batch it with `tf.data.Dataset.batch` method batched_dataset = tf_dataset.batch(batch_size=2) batch = next(iter(batched_dataset)) self.assertEqual(batch.shape.as_list(), [2]) self.assertEqual(batch.dtype.name, "int64") # Test that batching a batch_size=None dataset produces the same results as using batch_size arg with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: batch_size = 2 tf_dataset_no_batch = dset.to_tf_dataset(columns="col_3") tf_dataset_batch = dset.to_tf_dataset(columns="col_3", batch_size=batch_size) self.assertEqual(tf_dataset_no_batch.element_spec, tf_dataset_batch.unbatch().element_spec) self.assertEqual(tf_dataset_no_batch.cardinality(), tf_dataset_batch.cardinality() * batch_size) for batch_1, batch_2 in zip(tf_dataset_no_batch.batch(batch_size=batch_size), tf_dataset_batch): self.assertEqual(batch_1.shape, batch_2.shape) self.assertEqual(batch_1.dtype, batch_2.dtype) self.assertListEqual(batch_1.numpy().tolist(), batch_2.numpy().tolist()) # Test that requesting label_cols works as expected with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_1", label_cols=["col_2", "col_3"], batch_size=4) batch = next(iter(tf_dataset)) self.assertEqual(len(batch), 2) self.assertEqual(set(batch[1].keys()), {"col_2", "col_3"}) self.assertEqual(batch[0].dtype.name, "int64") # Assert data comes out as expected and isn't shuffled self.assertEqual(batch[0].numpy().tolist(), [3, 2, 1, 0]) self.assertEqual(batch[1]["col_2"].numpy().tolist(), [b"a", b"b", b"c", b"d"]) self.assertEqual(batch[1]["col_3"].numpy().tolist(), [0, 1, 0, 1]) # Check that incomplete batches are dropped if requested with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=3) tf_dataset_with_drop = dset.to_tf_dataset(columns="col_1", batch_size=3, drop_remainder=True) self.assertEqual(len(tf_dataset), 2) # One batch of 3 and one batch of 1 self.assertEqual(len(tf_dataset_with_drop), 1) # Incomplete batch of 1 is dropped # Test that `NotImplementedError` is raised `batch_size` is None and `num_workers` is > 0 if sys.version_info >= (3, 8): with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: with self.assertRaisesRegex( NotImplementedError, "`batch_size` must be specified when using multiple workers" ): dset.to_tf_dataset(columns="col_1", batch_size=None, num_workers=2) del tf_dataset # For correct cleanup del tf_dataset_with_drop class MiscellaneousDatasetTest(TestCase): def test_from_pandas(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} df = pd.DataFrame.from_dict(data) with Dataset.from_pandas(df) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) features = Features({"col_1": Value("int64"), "col_2": Value("string")}) with Dataset.from_pandas(df, features=features) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) features = Features({"col_1": Value("int64"), "col_2": Value("string")}) with Dataset.from_pandas(df, features=features, info=DatasetInfo(features=features)) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) features = Features({"col_1": Sequence(Value("string")), "col_2": Value("string")}) self.assertRaises(TypeError, Dataset.from_pandas, df, features=features) @require_polars def test_from_polars(self): import polars as pl data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} df = pl.from_dict(data) with Dataset.from_polars(df) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")})) features = Features({"col_1": Value("int64"), "col_2": Value("large_string")}) with Dataset.from_polars(df, features=features) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")})) features = Features({"col_1": Value("int64"), "col_2": Value("large_string")}) with Dataset.from_polars(df, features=features, info=DatasetInfo(features=features)) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")})) features = Features({"col_1": Sequence(Value("string")), "col_2": Value("large_string")}) self.assertRaises(TypeError, Dataset.from_polars, df, features=features) def test_from_dict(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": pa.array([True, False, True, False])} with Dataset.from_dict(data) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(dset["col_3"], data["col_3"].to_pylist()) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) ) features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) with Dataset.from_dict(data, features=features) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(dset["col_3"], data["col_3"].to_pylist()) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) ) features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) with Dataset.from_dict(data, features=features, info=DatasetInfo(features=features)) as dset: self.assertListEqual(dset["col_1"], data["col_1"]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(dset["col_3"], data["col_3"].to_pylist()) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) ) features = Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")}) with Dataset.from_dict(data, features=features) as dset: # the integers are converted to strings self.assertListEqual(dset["col_1"], [str(x) for x in data["col_1"]]) self.assertListEqual(dset["col_2"], data["col_2"]) self.assertListEqual(dset["col_3"], [int(x) for x in data["col_3"].to_pylist()]) self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) self.assertDictEqual( dset.features, Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")}) ) features = Features({"col_1": Value("int64"), "col_2": Value("int64"), "col_3": Value("bool")}) self.assertRaises(ValueError, Dataset.from_dict, data, features=features) def test_concatenate_mixed_memory_and_disk(self): data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} info1 = DatasetInfo(description="Dataset1") info2 = DatasetInfo(description="Dataset2") with tempfile.TemporaryDirectory() as tmp_dir: with Dataset.from_dict(data1, info=info1).map( cache_file_name=os.path.join(tmp_dir, "d1.arrow") ) as dset1, Dataset.from_dict(data2, info=info2).map( cache_file_name=os.path.join(tmp_dir, "d2.arrow") ) as dset2, Dataset.from_dict(data3) as dset3: with concatenate_datasets([dset1, dset2, dset3]) as concatenated_dset: self.assertEqual(len(concatenated_dset), len(dset1) + len(dset2) + len(dset3)) self.assertListEqual(concatenated_dset["id"], dset1["id"] + dset2["id"] + dset3["id"]) @require_transformers @pytest.mark.integration def test_set_format_encode(self): from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") def encode(batch): return tokenizer(batch["text"], padding="longest", return_tensors="np") with Dataset.from_dict({"text": ["hello there", "foo"]}) as dset: dset.set_transform(transform=encode) self.assertEqual(str(dset[:2]), str(encode({"text": ["hello there", "foo"]}))) @require_tf def test_tf_string_encoding(self): data = {"col_1": ["á", "é", "í", "ó", "ú"], "col_2": ["à", "è", "ì", "ò", "ù"]} with Dataset.from_dict(data) as dset: tf_dset_wo_batch = dset.to_tf_dataset(columns=["col_1", "col_2"]) for tf_row, row in zip(tf_dset_wo_batch, dset): self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"]) self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"]) tf_dset_w_batch = dset.to_tf_dataset(columns=["col_1", "col_2"], batch_size=2) for tf_row, row in zip(tf_dset_w_batch.unbatch(), dset): self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"]) self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"]) self.assertEqual(tf_dset_w_batch.unbatch().element_spec, tf_dset_wo_batch.element_spec) self.assertEqual(tf_dset_w_batch.element_spec, tf_dset_wo_batch.batch(2).element_spec) def test_cast_with_sliced_list(): old_features = Features({"foo": Sequence(Value("int64"))}) new_features = Features({"foo": Sequence(Value("int32"))}) dataset = Dataset.from_dict({"foo": [[i] * (i % 3) for i in range(20)]}, features=old_features) casted_dataset = dataset.cast(new_features, batch_size=2) # small batch size to slice the ListArray assert dataset["foo"] == casted_dataset["foo"] assert casted_dataset.features == new_features @pytest.mark.parametrize("include_nulls", [False, True]) def test_class_encode_column_with_none(include_nulls): dataset = Dataset.from_dict({"col_1": ["a", "b", "c", None, "d", None]}) dataset = dataset.class_encode_column("col_1", include_nulls=include_nulls) class_names = ["a", "b", "c", "d"] if include_nulls: class_names += ["None"] assert isinstance(dataset.features["col_1"], ClassLabel) assert set(dataset.features["col_1"].names) == set(class_names) assert (None in dataset.unique("col_1")) == (not include_nulls) @pytest.mark.parametrize("null_placement", ["first", "last"]) def test_sort_with_none(null_placement): dataset = Dataset.from_dict({"col_1": ["item_2", "item_3", "item_1", None, "item_4", None]}) dataset = dataset.sort("col_1", null_placement=null_placement) if null_placement == "first": assert dataset["col_1"] == [None, None, "item_1", "item_2", "item_3", "item_4"] else: assert dataset["col_1"] == ["item_1", "item_2", "item_3", "item_4", None, None] def test_update_metadata_with_features(dataset_dict): table1 = pa.Table.from_pydict(dataset_dict) features1 = Features.from_arrow_schema(table1.schema) features2 = features1.copy() features2["col_2"] = ClassLabel(num_classes=len(table1)) assert features1 != features2 table2 = update_metadata_with_features(table1, features2) metadata = json.loads(table2.schema.metadata[b"huggingface"].decode()) assert features2 == Features.from_dict(metadata["info"]["features"]) with Dataset(table1) as dset1, Dataset(table2) as dset2: assert dset1.features == features1 assert dset2.features == features2 @pytest.mark.parametrize("dataset_type", ["in_memory", "memory_mapped", "mixed"]) @pytest.mark.parametrize("axis, expected_shape", [(0, (4, 3)), (1, (2, 6))]) def test_concatenate_datasets(dataset_type, axis, expected_shape, dataset_dict, arrow_path): table = { "in_memory": InMemoryTable.from_pydict(dataset_dict), "memory_mapped": MemoryMappedTable.from_file(arrow_path), } tables = [ table[dataset_type if dataset_type != "mixed" else "memory_mapped"].slice(0, 2), # shape = (2, 3) table[dataset_type if dataset_type != "mixed" else "in_memory"].slice(2, 4), # shape = (2, 3) ] if axis == 1: # don't duplicate columns tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names]) datasets = [Dataset(table) for table in tables] dataset = concatenate_datasets(datasets, axis=axis) assert dataset.shape == expected_shape assert_arrow_metadata_are_synced_with_dataset_features(dataset) def test_concatenate_datasets_new_columns(): dataset1 = Dataset.from_dict({"col_1": ["a", "b", "c"]}) dataset2 = Dataset.from_dict({"col_1": ["d", "e", "f"], "col_2": [True, False, True]}) dataset = concatenate_datasets([dataset1, dataset2]) assert dataset.data.shape == (6, 2) assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool")}) assert dataset[:] == {"col_1": ["a", "b", "c", "d", "e", "f"], "col_2": [None, None, None, True, False, True]} dataset3 = Dataset.from_dict({"col_3": ["a_1"]}) dataset = concatenate_datasets([dataset, dataset3]) assert dataset.data.shape == (7, 3) assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool"), "col_3": Value("string")}) assert dataset[:] == { "col_1": ["a", "b", "c", "d", "e", "f", None], "col_2": [None, None, None, True, False, True, None], "col_3": [None, None, None, None, None, None, "a_1"], } @pytest.mark.parametrize("axis", [0, 1]) def test_concatenate_datasets_complex_features(axis): n = 5 dataset1 = Dataset.from_dict( {"col_1": [0] * n, "col_2": list(range(n))}, features=Features({"col_1": Value("int32"), "col_2": ClassLabel(num_classes=n)}), ) if axis == 1: dataset2 = dataset1.rename_columns({col: col + "_" for col in dataset1.column_names}) expected_features = Features({**dataset1.features, **dataset2.features}) else: dataset2 = dataset1 expected_features = dataset1.features assert concatenate_datasets([dataset1, dataset2], axis=axis).features == expected_features @pytest.mark.parametrize("other_dataset_type", ["in_memory", "memory_mapped", "concatenation"]) @pytest.mark.parametrize("axis, expected_shape", [(0, (8, 3)), (1, (4, 6))]) def test_concatenate_datasets_with_concatenation_tables( axis, expected_shape, other_dataset_type, dataset_dict, arrow_path ): def _create_concatenation_table(axis): if axis == 0: # shape: (4, 3) = (4, 1) + (4, 2) concatenation_table = ConcatenationTable.from_blocks( [ [ InMemoryTable.from_pydict({"col_1": dataset_dict["col_1"]}), MemoryMappedTable.from_file(arrow_path).remove_column(0), ] ] ) elif axis == 1: # shape: (4, 3) = (1, 3) + (3, 3) concatenation_table = ConcatenationTable.from_blocks( [ [InMemoryTable.from_pydict(dataset_dict).slice(0, 1)], [MemoryMappedTable.from_file(arrow_path).slice(1, 4)], ] ) return concatenation_table concatenation_table = _create_concatenation_table(axis) assert concatenation_table.shape == (4, 3) if other_dataset_type == "in_memory": other_table = InMemoryTable.from_pydict(dataset_dict) elif other_dataset_type == "memory_mapped": other_table = MemoryMappedTable.from_file(arrow_path) elif other_dataset_type == "concatenation": other_table = _create_concatenation_table(axis) assert other_table.shape == (4, 3) tables = [concatenation_table, other_table] if axis == 1: # don't duplicate columns tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names]) for tables in [tables, reversed(tables)]: datasets = [Dataset(table) for table in tables] dataset = concatenate_datasets(datasets, axis=axis) assert dataset.shape == expected_shape def test_concatenate_datasets_duplicate_columns(dataset): with pytest.raises(ValueError) as excinfo: concatenate_datasets([dataset, dataset], axis=1) assert "duplicated" in str(excinfo.value) def test_interleave_datasets(): d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets([d1, d2, d3]) expected_length = 3 * min(len(d1), len(d2), len(d3)) expected_values = [x["a"] for x in itertools.chain(*zip(d1, d2, d3))] assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert dataset._fingerprint == interleave_datasets([d1, d2, d3])._fingerprint def test_interleave_datasets_probabilities(): seed = 42 probabilities = [0.3, 0.5, 0.2] d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed) expected_length = 7 # hardcoded expected_values = [10, 11, 20, 12, 0, 21, 13] # hardcoded assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert ( dataset._fingerprint == interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)._fingerprint ) def test_interleave_datasets_oversampling_strategy(): d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") expected_length = 3 * max(len(d1), len(d2), len(d3)) expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20] # hardcoded assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert dataset._fingerprint == interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")._fingerprint def test_interleave_datasets_probabilities_oversampling_strategy(): seed = 42 probabilities = [0.3, 0.5, 0.2] d1 = Dataset.from_dict({"a": [0, 1, 2]}) d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) dataset = interleave_datasets( [d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed ) expected_length = 16 # hardcoded expected_values = [10, 11, 20, 12, 0, 21, 13, 10, 1, 11, 12, 22, 13, 20, 10, 2] # hardcoded assert isinstance(dataset, Dataset) assert len(dataset) == expected_length assert dataset["a"] == expected_values assert ( dataset._fingerprint == interleave_datasets( [d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed )._fingerprint ) @pytest.mark.parametrize("batch_size", [4, 5]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_dataset_iter_batch(batch_size, drop_last_batch): n = 25 dset = Dataset.from_dict({"i": list(range(n))}) all_col_values = list(range(n)) batches = [] for i, batch in enumerate(dset.iter(batch_size, drop_last_batch=drop_last_batch)): assert batch == {"i": all_col_values[i * batch_size : (i + 1) * batch_size]} batches.append(batch) if drop_last_batch: assert all(len(batch["i"]) == batch_size for batch in batches) else: assert all(len(batch["i"]) == batch_size for batch in batches[:-1]) assert len(batches[-1]["i"]) <= batch_size @pytest.mark.parametrize( "column, expected_dtype", [(["a", "b", "c", "d"], "string"), ([1, 2, 3, 4], "int64"), ([1.0, 2.0, 3.0, 4.0], "float64")], ) @pytest.mark.parametrize("in_memory", [False, True]) @pytest.mark.parametrize( "transform", [ None, ("shuffle", (42,), {}), ("with_format", ("pandas",), {}), ("class_encode_column", ("col_2",), {}), ("select", (range(3),), {}), ], ) def test_dataset_add_column(column, expected_dtype, in_memory, transform, dataset_dict, arrow_path): column_name = "col_4" original_dataset = ( Dataset(InMemoryTable.from_pydict(dataset_dict)) if in_memory else Dataset(MemoryMappedTable.from_file(arrow_path)) ) if transform is not None: transform_name, args, kwargs = transform original_dataset: Dataset = getattr(original_dataset, transform_name)(*args, **kwargs) column = column[:3] if transform is not None and transform_name == "select" else column dataset = original_dataset.add_column(column_name, column) assert dataset.data.shape == (3, 4) if transform is not None and transform_name == "select" else (4, 4) expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} # Sort expected features as in the original dataset expected_features = {feature: expected_features[feature] for feature in original_dataset.features} # Add new column feature expected_features[column_name] = expected_dtype assert dataset.data.column_names == list(expected_features.keys()) for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one assert dataset.format["type"] == original_dataset.format["type"] assert dataset._fingerprint != original_dataset._fingerprint dataset.reset_format() original_dataset.reset_format() assert all(dataset[col] == original_dataset[col] for col in original_dataset.column_names) assert set(dataset["col_4"]) == set(column) if dataset._indices is not None: dataset_indices = dataset._indices["indices"].to_pylist() expected_dataset_indices = original_dataset._indices["indices"].to_pylist() assert dataset_indices == expected_dataset_indices assert_arrow_metadata_are_synced_with_dataset_features(dataset) @pytest.mark.parametrize( "transform", [None, ("shuffle", (42,), {}), ("with_format", ("pandas",), {}), ("class_encode_column", ("col_2",), {})], ) @pytest.mark.parametrize("in_memory", [False, True]) @pytest.mark.parametrize( "item", [ {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "2", "col_2": "2", "col_3": "2"}, {"col_1": 2, "col_2": 2, "col_3": 2}, {"col_1": 2.0, "col_2": 2.0, "col_3": 2.0}, ], ) def test_dataset_add_item(item, in_memory, dataset_dict, arrow_path, transform): dataset_to_test = ( Dataset(InMemoryTable.from_pydict(dataset_dict)) if in_memory else Dataset(MemoryMappedTable.from_file(arrow_path)) ) if transform is not None: transform_name, args, kwargs = transform dataset_to_test: Dataset = getattr(dataset_to_test, transform_name)(*args, **kwargs) dataset = dataset_to_test.add_item(item) assert dataset.data.shape == (5, 3) expected_features = dataset_to_test.features assert sorted(dataset.data.column_names) == sorted(expected_features.keys()) for feature, expected_dtype in expected_features.items(): assert dataset.features[feature] == expected_dtype assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one assert dataset.format["type"] == dataset_to_test.format["type"] assert dataset._fingerprint != dataset_to_test._fingerprint dataset.reset_format() dataset_to_test.reset_format() assert dataset[:-1] == dataset_to_test[:] assert {k: int(v) for k, v in dataset[-1].items()} == {k: int(v) for k, v in item.items()} if dataset._indices is not None: dataset_indices = dataset._indices["indices"].to_pylist() dataset_to_test_indices = dataset_to_test._indices["indices"].to_pylist() assert dataset_indices == dataset_to_test_indices + [len(dataset_to_test._data)] def test_dataset_add_item_new_columns(): dataset = Dataset.from_dict({"col_1": [0, 1, 2]}, features=Features({"col_1": Value("uint8")})) dataset = dataset.add_item({"col_1": 3, "col_2": "a"}) assert dataset.data.shape == (4, 2) assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string")}) assert dataset[:] == {"col_1": [0, 1, 2, 3], "col_2": [None, None, None, "a"]} dataset = dataset.add_item({"col_3": True}) assert dataset.data.shape == (5, 3) assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string"), "col_3": Value("bool")}) assert dataset[:] == { "col_1": [0, 1, 2, 3, None], "col_2": [None, None, None, "a", None], "col_3": [None, None, None, None, True], } def test_dataset_add_item_introduce_feature_type(): dataset = Dataset.from_dict({"col_1": [None, None, None]}) dataset = dataset.add_item({"col_1": "a"}) assert dataset.data.shape == (4, 1) assert dataset.features == Features({"col_1": Value("string")}) assert dataset[:] == {"col_1": [None, None, None, "a"]} def test_dataset_filter_batched_indices(): ds = Dataset.from_dict({"num": [0, 1, 2, 3]}) ds = ds.filter(lambda num: num % 2 == 0, input_columns="num", batch_size=2) assert all(item["num"] % 2 == 0 for item in ds) @pytest.mark.parametrize("in_memory", [False, True]) def test_dataset_from_file(in_memory, dataset, arrow_file): filename = arrow_file with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset_from_file = Dataset.from_file(filename, in_memory=in_memory) assert dataset_from_file.features.type == dataset.features.type assert dataset_from_file.features == dataset.features assert dataset_from_file.cache_files == ([{"filename": filename}] if not in_memory else []) def _check_csv_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_csv_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_csv_features(features, csv_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_csv(csv_path, features=features, cache_dir=cache_dir) _check_csv_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_csv_split(split, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, split=split) _check_csv_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path): if issubclass(path_type, str): path = csv_path elif issubclass(path_type, list): path = [csv_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_csv(path, cache_dir=cache_dir) _check_csv_dataset(dataset, expected_features) def _check_json_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_json(jsonl_path, features=features, cache_dir=cache_dir) _check_json_dataset(dataset, expected_features) def test_dataset_from_json_with_class_label_feature(jsonl_str_path, tmp_path): features = Features( { "col_1": ClassLabel(names=["s0", "s1", "s2", "s3"]), "col_2": Value("int64"), "col_3": Value("float64"), } ) cache_dir = tmp_path / "cache" dataset = Dataset.from_json(jsonl_str_path, features=features, cache_dir=cache_dir) assert dataset.features["col_1"].dtype == "int64" @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_json_split(split, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, split=split) _check_json_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path): if issubclass(path_type, str): path = jsonl_path elif issubclass(path_type, list): path = [jsonl_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_json(path, cache_dir=cache_dir) _check_json_dataset(dataset, expected_features) def _check_parquet_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_parquet_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_parquet_features(features, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_parquet(parquet_path, features=features, cache_dir=cache_dir) _check_parquet_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_parquet_split(split, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, split=split) _check_parquet_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path): if issubclass(path_type, str): path = parquet_path elif issubclass(path_type, list): path = [parquet_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = Dataset.from_parquet(path, cache_dir=cache_dir) _check_parquet_dataset(dataset, expected_features) def _check_text_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_text(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_text_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ], ) def test_dataset_from_text_features(features, text_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"text": "string"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_text(text_path, features=features, cache_dir=cache_dir) _check_text_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_text_split(split, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = Dataset.from_text(text_path, cache_dir=cache_dir, split=split) _check_text_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_text_path_type(path_type, text_path, tmp_path): if issubclass(path_type, str): path = text_path elif issubclass(path_type, list): path = [text_path] cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = Dataset.from_text(path, cache_dir=cache_dir) _check_text_dataset(dataset, expected_features) @pytest.fixture def data_generator(): def _gen(): data = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] for item in data: yield item return _gen def _check_generator_dataset(dataset, expected_features, split): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.split == split assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_generator_keep_in_memory(keep_in_memory, data_generator, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_generator_dataset(dataset, expected_features, NamedSplit("train")) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_generator_features(features, data_generator, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_generator(data_generator, features=features, cache_dir=cache_dir) _check_generator_dataset(dataset, expected_features, NamedSplit("train")) @pytest.mark.parametrize( "split", [None, NamedSplit("train"), "train", NamedSplit("foo"), "foo"], ) def test_dataset_from_generator_split(split, data_generator, tmp_path): cache_dir = tmp_path / "cache" default_expected_split = "train" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_split = split if split else default_expected_split if split: dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, split=split) else: dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir) _check_generator_dataset(dataset, expected_features, expected_split) @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark(): import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [ ("0", 0, 0.0), ("1", 1, 1.0), ("2", 2, 2.0), ("3", 3, 3.0), ] df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float") dataset = Dataset.from_spark(df) assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_features(): import PIL.Image import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())] df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>") features = Features({"idx": Value("int64"), "image": Image()}) dataset = Dataset.from_spark( df, features=features, ) assert isinstance(dataset, Dataset) assert dataset.num_rows == 1 assert dataset.num_columns == 2 assert dataset.column_names == ["idx", "image"] assert isinstance(dataset[0]["image"], PIL.Image.Image) assert dataset.features == features assert_arrow_metadata_are_synced_with_dataset_features(dataset) @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_different_cache(): import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.createDataFrame([("0", 0)], "col_1: string, col_2: int") dataset = Dataset.from_spark(df) assert isinstance(dataset, Dataset) different_df = spark.createDataFrame([("1", 1)], "col_1: string, col_2: int") different_dataset = Dataset.from_spark(different_df) assert isinstance(different_dataset, Dataset) assert dataset[0]["col_1"] == "0" # Check to make sure that the second dataset wasn't read from the cache. assert different_dataset[0]["col_1"] == "1" def _check_sql_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("con_type", ["string", "engine"]) def test_dataset_from_sql_con_type(con_type, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning, caplog): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} if con_type == "string": con = "sqlite:///" + sqlite_path elif con_type == "engine": import sqlalchemy con = sqlalchemy.create_engine("sqlite:///" + sqlite_path) with caplog.at_level(INFO, logger=get_logger().name): dataset = Dataset.from_sql( "dataset", con, cache_dir=cache_dir, ) if con_type == "string": assert "couldn't be hashed properly" not in caplog.text elif con_type == "engine": assert "couldn't be hashed properly" in caplog.text dataset = Dataset.from_sql( "dataset", con, cache_dir=cache_dir, ) _check_sql_dataset(dataset, expected_features) @require_sqlalchemy @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = Dataset.from_sql("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir) _check_sql_dataset(dataset, expected_features) @require_sqlalchemy @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = Dataset.from_sql( "dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory ) _check_sql_dataset(dataset, expected_features) def test_dataset_to_json(dataset, tmp_path): file_path = tmp_path / "test_path.jsonl" bytes_written = dataset.to_json(path_or_buf=file_path) assert file_path.is_file() assert bytes_written == file_path.stat().st_size df = pd.read_json(file_path, orient="records", lines=True) assert df.shape == dataset.shape assert list(df.columns) == list(dataset.column_names) @pytest.mark.parametrize("in_memory", [False, True]) @pytest.mark.parametrize( "method_and_params", [ ("rename_column", (), {"original_column_name": "labels", "new_column_name": "label"}), ("remove_columns", (), {"column_names": "labels"}), ( "cast", (), { "features": Features( { "tokens": Sequence(Value("string")), "labels": Sequence(Value("int16")), "answers": Sequence( { "text": Value("string"), "answer_start": Value("int32"), } ), "id": Value("int32"), } ) }, ), ("flatten", (), {}), ], ) def test_pickle_dataset_after_transforming_the_table(in_memory, method_and_params, arrow_file): method, args, kwargs = method_and_params with Dataset.from_file(arrow_file, in_memory=in_memory) as dataset, Dataset.from_file( arrow_file, in_memory=in_memory ) as reference_dataset: out = getattr(dataset, method)(*args, **kwargs) dataset = out if out is not None else dataset pickled_dataset = pickle.dumps(dataset) reloaded_dataset = pickle.loads(pickled_dataset) assert dataset._data != reference_dataset._data assert dataset._data.table == reloaded_dataset._data.table def test_dummy_dataset_serialize_fs(dataset, mockfs): dataset_path = "mock://my_dataset" dataset.save_to_disk(dataset_path, storage_options=mockfs.storage_options) assert mockfs.isdir(dataset_path) assert mockfs.glob(dataset_path + "/*") reloaded = Dataset.load_from_disk(dataset_path, storage_options=mockfs.storage_options) assert len(reloaded) == len(dataset) assert reloaded.features == dataset.features assert reloaded.to_dict() == dataset.to_dict() @pytest.mark.parametrize( "uri_or_path", [ "relative/path", "/absolute/path", "s3://bucket/relative/path", "hdfs://relative/path", "hdfs:///absolute/path", ], ) def test_build_local_temp_path(uri_or_path): extracted_path = strip_protocol(uri_or_path) local_temp_path = Dataset._build_local_temp_path(extracted_path).as_posix() extracted_path_without_anchor = Path(extracted_path).relative_to(Path(extracted_path).anchor).as_posix() # Check that the local temp path is relative to the system temp dir path_relative_to_tmp_dir = Path(local_temp_path).relative_to(Path(tempfile.gettempdir())).as_posix() assert ( "hdfs://" not in path_relative_to_tmp_dir and "s3://" not in path_relative_to_tmp_dir and not local_temp_path.startswith(extracted_path_without_anchor) and local_temp_path.endswith(extracted_path_without_anchor) ), f"Local temp path: {local_temp_path}" class StratifiedTest(TestCase): def test_errors_train_test_split_stratify(self): ys = [ np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]), np.array([0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), ] for i in range(len(ys)): features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(ys[i])))}) data = {"text": np.ones(len(ys[i])), "label": ys[i]} d1 = Dataset.from_dict(data, features=features) # For checking stratify_by_column exist as key in self.features.keys() if i == 0: self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="labl") # For checking minimum class count error elif i == 1: self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label") # For check typeof label as ClassLabel type elif i == 2: d1 = Dataset.from_dict(data) self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label") # For checking test_size should be greater than or equal to number of classes elif i == 3: self.assertRaises(ValueError, d1.train_test_split, 0.30, stratify_by_column="label") # For checking train_size should be greater than or equal to number of classes elif i == 4: self.assertRaises(ValueError, d1.train_test_split, 0.60, stratify_by_column="label") def test_train_test_split_startify(self): ys = [ np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3]), np.array([0] * 800 + [1] * 50), ] for y in ys: features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(y)))}) data = {"text": np.ones(len(y)), "label": y} d1 = Dataset.from_dict(data, features=features) d1 = d1.train_test_split(test_size=0.33, stratify_by_column="label") y = np.asanyarray(y) # To make it indexable for y[train] test_size = np.ceil(0.33 * len(y)) train_size = len(y) - test_size npt.assert_array_equal(np.unique(d1["train"]["label"]), np.unique(d1["test"]["label"])) # checking classes proportion p_train = np.bincount(np.unique(d1["train"]["label"], return_inverse=True)[1]) / float( len(d1["train"]["label"]) ) p_test = np.bincount(np.unique(d1["test"]["label"], return_inverse=True)[1]) / float( len(d1["test"]["label"]) ) npt.assert_array_almost_equal(p_train, p_test, 1) assert len(d1["train"]["text"]) + len(d1["test"]["text"]) == y.size assert len(d1["train"]["text"]) == train_size assert len(d1["test"]["text"]) == test_size def test_dataset_estimate_nbytes(): ds = Dataset.from_dict({"a": ["0" * 100] * 100}) assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than full dataset size" ds = Dataset.from_dict({"a": ["0" * 100] * 100}).select([0]) assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk" ds = Dataset.from_dict({"a": ["0" * 100] * 100}) ds = concatenate_datasets([ds] * 100) assert 0.9 * ds._estimate_nbytes() < 100 * 100 * 100, "must be smaller than full dataset size" assert 1.1 * ds._estimate_nbytes() > 100 * 100 * 100, "must be bigger than full dataset size" ds = Dataset.from_dict({"a": ["0" * 100] * 100}) ds = concatenate_datasets([ds] * 100).select([0]) assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk" def test_dataset_to_iterable_dataset(dataset: Dataset): iterable_dataset = dataset.to_iterable_dataset() assert isinstance(iterable_dataset, IterableDataset) assert list(iterable_dataset) == list(dataset) assert iterable_dataset.features == dataset.features iterable_dataset = dataset.to_iterable_dataset(num_shards=3) assert isinstance(iterable_dataset, IterableDataset) assert list(iterable_dataset) == list(dataset) assert iterable_dataset.features == dataset.features assert iterable_dataset.n_shards == 3 with pytest.raises(ValueError): dataset.to_iterable_dataset(num_shards=len(dataset) + 1) with pytest.raises(NotImplementedError): dataset.with_format("torch").to_iterable_dataset() @require_pil def test_dataset_format_with_unformatted_image(): import PIL ds = Dataset.from_dict( {"a": [np.arange(4 * 4 * 3).reshape(4, 4, 3)] * 10, "b": [[0, 1]] * 10}, Features({"a": Image(), "b": Sequence(Value("int64"))}), ) ds.set_format("np", columns=["b"], output_all_columns=True) assert isinstance(ds[0]["a"], PIL.Image.Image) assert isinstance(ds[0]["b"], np.ndarray) @pytest.mark.parametrize("batch_size", [1, 4]) @require_torch def test_dataset_with_torch_dataloader(dataset, batch_size): from torch.utils.data import DataLoader from datasets import config dataloader = DataLoader(dataset, batch_size=batch_size) with patch.object(dataset, "_getitem", wraps=dataset._getitem) as mock_getitem: out = list(dataloader) getitem_call_count = mock_getitem.call_count assert len(out) == len(dataset) // batch_size + int(len(dataset) % batch_size > 0) # calling dataset[list_of_indices] is much more efficient than [dataset[idx] for idx in list of indices] if config.TORCH_VERSION >= version.parse("1.13.0"): assert getitem_call_count == len(dataset) // batch_size + int(len(dataset) % batch_size > 0) @pytest.mark.parametrize("return_lazy_dict", [True, False, "mix"]) def test_map_cases(return_lazy_dict): def f(x): """May return a mix of LazyDict and regular Dict""" if x["a"] < 2: x["a"] = -1 return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [-1, -1, 2, 3]} def f(x): """May return a mix of LazyDict and regular Dict, but sometimes with None values""" if x["a"] < 2: x["a"] = None return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [None, None, 2, 3]} def f(x): """Return a LazyDict, but we remove a lazy column and add a new one""" if x["a"] < 2: x["b"] = -1 return x else: x["b"] = x["a"] return x ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f, remove_columns=["a"]) outputs = ds[:] assert outputs == {"b": [-1, -1, 2, 3]} # The formatted dataset version removes the lazy column from a different dictionary, hence it should be preserved in the output ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.with_format("numpy") ds = ds.map(f, remove_columns=["a"]) ds = ds.with_format(None) outputs = ds[:] assert outputs == {"a": [0, 1, 2, 3], "b": [-1, -1, 2, 3]} def f(x): """May return a mix of LazyDict and regular Dict, but we replace a lazy column""" if x["a"] < 2: x["a"] = -1 return dict(x) if return_lazy_dict is False else x else: x["a"] = x["a"] return x if return_lazy_dict is True else {"a": x["a"]} ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) ds = ds.map(f, remove_columns=["a"]) outputs = ds[:] assert outputs == ({"a": [-1, -1, 2, 3]} if return_lazy_dict is False else {}) def f(x): """May return a mix of LazyDict and regular Dict, but we modify a nested lazy column in-place""" if x["a"]["b"] < 2: x["a"]["c"] = -1 return dict(x) if return_lazy_dict is False else x else: x["a"]["c"] = x["a"]["b"] return x if return_lazy_dict is True else {} ds = Dataset.from_dict({"a": [{"b": 0}, {"b": 1}, {"b": 2}, {"b": 3}]}) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [{"b": 0, "c": -1}, {"b": 1, "c": -1}, {"b": 2, "c": 2}, {"b": 3, "c": 3}]} def f(x): """May return a mix of LazyDict and regular Dict, but using an extension type""" if x["a"][0][0] < 2: x["a"] = [[-1]] return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} features = Features({"a": Array2D(shape=(1, 1), dtype="int32")}) ds = Dataset.from_dict({"a": [[[i]] for i in [0, 1, 2, 3]]}, features=features) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [[[i]] for i in [-1, -1, 2, 3]]} def f(x): """May return a mix of LazyDict and regular Dict, but using a nested extension type""" if x["a"]["nested"][0][0] < 2: x["a"] = {"nested": [[-1]]} return dict(x) if return_lazy_dict is False else x else: return x if return_lazy_dict is True else {} features = Features({"a": {"nested": Array2D(shape=(1, 1), dtype="int64")}}) ds = Dataset.from_dict({"a": [{"nested": [[i]]} for i in [0, 1, 2, 3]]}, features=features) ds = ds.map(f) outputs = ds[:] assert outputs == {"a": [{"nested": [[i]]} for i in [-1, -1, 2, 3]]} def test_dataset_getitem_raises(): ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) with pytest.raises(TypeError): ds[False] with pytest.raises(TypeError): ds._getitem(True) def test_categorical_dataset(tmpdir): n_legs = pa.array([2, 4, 5, 100]) animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]).cast( pa.dictionary(pa.int32(), pa.string()) ) names = ["n_legs", "animals"] table = pa.Table.from_arrays([n_legs, animals], names=names) table_path = str(tmpdir / "data.parquet") pa.parquet.write_table(table, table_path) dataset = Dataset.from_parquet(table_path) entry = dataset[0] # Categorical types get transparently converted to string assert entry["animals"] == "Flamingo" def test_dataset_batch(): # Create a simple Dataset data = {"id": list(range(10)), "text": [f"Text {i}" for i in range(10)]} ds = Dataset.from_dict(data) # Test with batch_size=3, drop_last_batch=False batched_ds = ds.batch(batch_size=3, drop_last_batch=False) batches = list(batched_ds) assert len(batches) == 4 # 3 full batches and 1 partial batch for i, batch in enumerate(batches[:3]): # Check full batches assert len(batch["id"]) == 3 assert len(batch["text"]) == 3 assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2] assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"] # Check last partial batch assert len(batches[3]["id"]) == 1 assert len(batches[3]["text"]) == 1 assert batches[3]["id"] == [9] assert batches[3]["text"] == ["Text 9"] # Test with batch_size=3, drop_last_batch=True batched_ds = ds.batch(batch_size=3, drop_last_batch=True) batches = list(batched_ds) assert len(batches) == 3 # Only full batches for i, batch in enumerate(batches): assert len(batch["id"]) == 3 assert len(batch["text"]) == 3 assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2] assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"] # Test with batch_size=4 (doesn't evenly divide dataset size) batched_ds = ds.batch(batch_size=4, drop_last_batch=False) batches = list(batched_ds) assert len(batches) == 3 # 2 full batches and 1 partial batch for i, batch in enumerate(batches[:2]): # Check full batches assert len(batch["id"]) == 4 assert len(batch["text"]) == 4 assert batch["id"] == [4 * i, 4 * i + 1, 4 * i + 2, 4 * i + 3] assert batch["text"] == [f"Text {4 * i}", f"Text {4 * i + 1}", f"Text {4 * i + 2}", f"Text {4 * i + 3}"] # Check last partial batch assert len(batches[2]["id"]) == 2 assert len(batches[2]["text"]) == 2 assert batches[2]["id"] == [8, 9] assert batches[2]["text"] == ["Text 8", "Text 9"] def test_dataset_from_dict_with_large_list(): data = {"col_1": [[1, 2], [3, 4]]} features = Features({"col_1": LargeList(Value("int64"))}) ds = Dataset.from_dict(data, features=features) assert isinstance(ds, Dataset) assert pa.types.is_large_list(ds.data.schema.field("col_1").type) def test_dataset_save_to_disk_with_large_list(tmp_path): data = {"col_1": [[1, 2], [3, 4]]} features = Features({"col_1": LargeList(Value("int64"))}) ds = Dataset.from_dict(data, features=features) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() def test_dataset_save_to_disk_and_load_from_disk_round_trip_with_large_list(tmp_path): data = {"col_1": [[1, 2], [3, 4]]} features = Features({"col_1": LargeList(Value("int64"))}) ds = Dataset.from_dict(data, features=features) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() loaded_ds = load_from_disk(dataset_path) assert len(loaded_ds) == len(ds) assert loaded_ds.features == ds.features assert loaded_ds.to_dict() == ds.to_dict() @require_polars def test_from_polars_with_large_list(): import polars as pl df = pl.from_dict({"col_1": [[1, 2], [3, 4]]}) ds = Dataset.from_polars(df) assert isinstance(ds, Dataset) @require_polars def test_from_polars_save_to_disk_with_large_list(tmp_path): import polars as pl df = pl.from_dict({"col_1": [[1, 2], [3, 4]]}) ds = Dataset.from_polars(df) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() @require_polars def test_from_polars_save_to_disk_and_load_from_disk_round_trip_with_large_list(tmp_path): import polars as pl df = pl.from_dict({"col_1": [[1, 2], [3, 4]]}) ds = Dataset.from_polars(df) dataset_path = tmp_path / "dataset_dir" ds.save_to_disk(dataset_path) assert (dataset_path / "data-00000-of-00001.arrow").exists() loaded_ds = load_from_disk(dataset_path) assert len(loaded_ds) == len(ds) assert loaded_ds.features == ds.features assert loaded_ds.to_dict() == ds.to_dict() @require_polars def test_polars_round_trip(): ds = Dataset.from_dict({"x": [[1, 2], [3, 4, 5]], "y": ["a", "b"]}) assert isinstance(Dataset.from_polars(ds.to_polars()), Dataset)
datasets/tests/test_arrow_dataset.py/0
{ "file_path": "datasets/tests/test_arrow_dataset.py", "repo_id": "datasets", "token_count": 118239 }
93
import datetime from pathlib import Path from unittest import TestCase import numpy as np import pandas as pd import pyarrow as pa import pytest from datasets import Audio, Features, Image, IterableDataset from datasets.formatting import NumpyFormatter, PandasFormatter, PythonFormatter, query_table from datasets.formatting.formatting import ( LazyBatch, LazyRow, NumpyArrowExtractor, PandasArrowExtractor, PythonArrowExtractor, ) from datasets.table import InMemoryTable from .utils import ( require_jax, require_librosa, require_numpy1_on_windows, require_pil, require_polars, require_sndfile, require_tf, require_torch, ) class AnyArray: def __init__(self, data) -> None: self.data = data def __array__(self) -> np.ndarray: return np.asarray(self.data) def _gen_any_arrays(): for _ in range(10): yield {"array": AnyArray(list(range(10)))} @pytest.fixture def any_arrays_dataset(): return IterableDataset.from_generator(_gen_any_arrays) _COL_A = [0, 1, 2] _COL_B = ["foo", "bar", "foobar"] _COL_C = [[[1.0, 0.0, 0.0]] * 2, [[0.0, 1.0, 0.0]] * 2, [[0.0, 0.0, 1.0]] * 2] _COL_D = [datetime.datetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)] * 3 _INDICES = [1, 0] IMAGE_PATH_1 = Path(__file__).parent / "features" / "data" / "test_image_rgb.jpg" IMAGE_PATH_2 = Path(__file__).parent / "features" / "data" / "test_image_rgba.png" AUDIO_PATH_1 = Path(__file__).parent / "features" / "data" / "test_audio_44100.wav" class ArrowExtractorTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D}) def test_python_extractor(self): pa_table = self._create_dummy_table() extractor = PythonArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0], "d": _COL_D[0]}) col = extractor.extract_column(pa_table) self.assertEqual(col, _COL_A) batch = extractor.extract_batch(pa_table) self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D}) def test_numpy_extractor(self): pa_table = self._create_dummy_table().drop(["c", "d"]) extractor = NumpyArrowExtractor() row = extractor.extract_row(pa_table) np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0]}) col = extractor.extract_column(pa_table) np.testing.assert_equal(col, np.array(_COL_A)) batch = extractor.extract_batch(pa_table) np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B)}) def test_numpy_extractor_nested(self): pa_table = self._create_dummy_table().drop(["a", "b", "d"]) extractor = NumpyArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row["c"][0].dtype, np.float64) self.assertEqual(row["c"].dtype, object) col = extractor.extract_column(pa_table) self.assertEqual(col[0][0].dtype, np.float64) self.assertEqual(col[0].dtype, object) self.assertEqual(col.dtype, object) batch = extractor.extract_batch(pa_table) self.assertEqual(batch["c"][0][0].dtype, np.float64) self.assertEqual(batch["c"][0].dtype, object) self.assertEqual(batch["c"].dtype, object) def test_numpy_extractor_temporal(self): pa_table = self._create_dummy_table().drop(["a", "b", "c"]) extractor = NumpyArrowExtractor() row = extractor.extract_row(pa_table) self.assertTrue(np.issubdtype(row["d"].dtype, np.datetime64)) col = extractor.extract_column(pa_table) self.assertTrue(np.issubdtype(col[0].dtype, np.datetime64)) self.assertTrue(np.issubdtype(col.dtype, np.datetime64)) batch = extractor.extract_batch(pa_table) self.assertTrue(np.issubdtype(batch["d"][0].dtype, np.datetime64)) self.assertTrue(np.issubdtype(batch["d"].dtype, np.datetime64)) def test_pandas_extractor(self): pa_table = self._create_dummy_table() extractor = PandasArrowExtractor() row = extractor.extract_row(pa_table) self.assertIsInstance(row, pd.DataFrame) pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1]) pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1]) col = extractor.extract_column(pa_table) pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a")) batch = extractor.extract_batch(pa_table) self.assertIsInstance(batch, pd.DataFrame) pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a")) pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b")) def test_pandas_extractor_nested(self): pa_table = self._create_dummy_table().drop(["a", "b", "d"]) extractor = PandasArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row["c"][0][0].dtype, np.float64) self.assertEqual(row["c"].dtype, object) col = extractor.extract_column(pa_table) self.assertEqual(col[0][0].dtype, np.float64) self.assertEqual(col[0].dtype, object) self.assertEqual(col.dtype, object) batch = extractor.extract_batch(pa_table) self.assertEqual(batch["c"][0][0].dtype, np.float64) self.assertEqual(batch["c"][0].dtype, object) self.assertEqual(batch["c"].dtype, object) def test_pandas_extractor_temporal(self): pa_table = self._create_dummy_table().drop(["a", "b", "c"]) extractor = PandasArrowExtractor() row = extractor.extract_row(pa_table) self.assertTrue(pd.api.types.is_datetime64_any_dtype(row["d"].dtype)) col = extractor.extract_column(pa_table) self.assertTrue(isinstance(col[0], datetime.datetime)) self.assertTrue(pd.api.types.is_datetime64_any_dtype(col.dtype)) batch = extractor.extract_batch(pa_table) self.assertTrue(isinstance(batch["d"][0], datetime.datetime)) self.assertTrue(pd.api.types.is_datetime64_any_dtype(batch["d"].dtype)) @require_polars def test_polars_extractor(self): import polars as pl from datasets.formatting.polars_formatter import PolarsArrowExtractor pa_table = self._create_dummy_table() extractor = PolarsArrowExtractor() row = extractor.extract_row(pa_table) self.assertIsInstance(row, pl.DataFrame) assert pl.Series.eq(row["a"], pl.Series("a", _COL_A)[:1]).all() assert pl.Series.eq(row["b"], pl.Series("b", _COL_B)[:1]).all() col = extractor.extract_column(pa_table) assert pl.Series.eq(col, pl.Series("a", _COL_A)).all() batch = extractor.extract_batch(pa_table) self.assertIsInstance(batch, pl.DataFrame) assert pl.Series.eq(batch["a"], pl.Series("a", _COL_A)).all() assert pl.Series.eq(batch["b"], pl.Series("b", _COL_B)).all() @require_polars def test_polars_nested(self): import polars as pl from datasets.formatting.polars_formatter import PolarsArrowExtractor pa_table = self._create_dummy_table().drop(["a", "b", "d"]) extractor = PolarsArrowExtractor() row = extractor.extract_row(pa_table) self.assertEqual(row["c"][0][0].dtype, pl.Float64) self.assertEqual(row["c"].dtype, pl.List(pl.List(pl.Float64))) col = extractor.extract_column(pa_table) self.assertEqual(col[0][0].dtype, pl.Float64) self.assertEqual(col[0].dtype, pl.List(pl.Float64)) self.assertEqual(col.dtype, pl.List(pl.List(pl.Float64))) batch = extractor.extract_batch(pa_table) self.assertEqual(batch["c"][0][0].dtype, pl.Float64) self.assertEqual(batch["c"][0].dtype, pl.List(pl.Float64)) self.assertEqual(batch["c"].dtype, pl.List(pl.List(pl.Float64))) @require_polars def test_polars_temporal(self): from datasets.formatting.polars_formatter import PolarsArrowExtractor pa_table = self._create_dummy_table().drop(["a", "b", "c"]) extractor = PolarsArrowExtractor() row = extractor.extract_row(pa_table) self.assertTrue(row["d"].dtype.is_temporal()) col = extractor.extract_column(pa_table) self.assertTrue(isinstance(col[0], datetime.datetime)) self.assertTrue(col.dtype.is_temporal()) batch = extractor.extract_batch(pa_table) self.assertTrue(isinstance(batch["d"][0], datetime.datetime)) self.assertTrue(batch["d"].dtype.is_temporal()) class LazyDictTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) def _create_dummy_formatter(self): return PythonFormatter(lazy=True) def test_lazy_dict_copy(self): pa_table = self._create_dummy_table() formatter = self._create_dummy_formatter() lazy_batch = formatter.format_batch(pa_table) lazy_batch_copy = lazy_batch.copy() self.assertEqual(type(lazy_batch), type(lazy_batch_copy)) self.assertEqual(lazy_batch.items(), lazy_batch_copy.items()) lazy_batch["d"] = [1, 2, 3] self.assertNotEqual(lazy_batch.items(), lazy_batch_copy.items()) class FormatterTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) def test_python_formatter(self): pa_table = self._create_dummy_table() formatter = PythonFormatter() row = formatter.format_row(pa_table) self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]}) col = formatter.format_column(pa_table) self.assertEqual(col, _COL_A) batch = formatter.format_batch(pa_table) self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C}) def test_python_formatter_lazy(self): pa_table = self._create_dummy_table() formatter = PythonFormatter(lazy=True) row = formatter.format_row(pa_table) self.assertIsInstance(row, LazyRow) self.assertEqual(row["a"], _COL_A[0]) self.assertEqual(row["b"], _COL_B[0]) self.assertEqual(row["c"], _COL_C[0]) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch, LazyBatch) self.assertEqual(batch["a"], _COL_A) self.assertEqual(batch["b"], _COL_B) self.assertEqual(batch["c"], _COL_C) def test_numpy_formatter(self): pa_table = self._create_dummy_table() formatter = NumpyFormatter() row = formatter.format_row(pa_table) np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])}) col = formatter.format_column(pa_table) np.testing.assert_equal(col, np.array(_COL_A)) batch = formatter.format_batch(pa_table) np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)}) assert batch["c"].shape == np.array(_COL_C).shape def test_numpy_formatter_np_array_kwargs(self): pa_table = self._create_dummy_table().drop(["b"]) formatter = NumpyFormatter(dtype=np.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, np.dtype(np.float16)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, np.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, np.dtype(np.float16)) self.assertEqual(batch["c"].dtype, np.dtype(np.float16)) @require_pil def test_numpy_formatter_image(self): # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = NumpyFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, np.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, np.uint8) self.assertEqual(col.shape, (2, 480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"].dtype, np.uint8) self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = NumpyFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, np.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertIsInstance(col, np.ndarray) self.assertEqual(col.dtype, object) self.assertEqual(col[0].dtype, np.uint8) self.assertEqual(col[0].shape, (480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], np.ndarray) self.assertEqual(batch["image"].dtype, object) self.assertEqual(batch["image"][0].dtype, np.uint8) self.assertEqual(batch["image"][0].shape, (480, 640, 3)) @require_librosa @require_sndfile def test_numpy_formatter_audio(self): pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = NumpyFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) self.assertEqual(row["audio"]["array"].dtype, np.dtype(np.float32)) col = formatter.format_column(pa_table) self.assertEqual(col[0]["array"].dtype, np.float32) batch = formatter.format_batch(pa_table) self.assertEqual(batch["audio"][0]["array"].dtype, np.dtype(np.float32)) def test_pandas_formatter(self): pa_table = self._create_dummy_table() formatter = PandasFormatter() row = formatter.format_row(pa_table) self.assertIsInstance(row, pd.DataFrame) pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1]) pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1]) col = formatter.format_column(pa_table) pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a")) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch, pd.DataFrame) pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a")) pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b")) @require_polars def test_polars_formatter(self): import polars as pl from datasets.formatting import PolarsFormatter pa_table = self._create_dummy_table() formatter = PolarsFormatter() row = formatter.format_row(pa_table) self.assertIsInstance(row, pl.DataFrame) assert pl.Series.eq(row["a"], pl.Series("a", _COL_A)[:1]).all() assert pl.Series.eq(row["b"], pl.Series("b", _COL_B)[:1]).all() col = formatter.format_column(pa_table) assert pl.Series.eq(col, pl.Series("a", _COL_A)).all() batch = formatter.format_batch(pa_table) self.assertIsInstance(batch, pl.DataFrame) assert pl.Series.eq(batch["a"], pl.Series("a", _COL_A)).all() assert pl.Series.eq(batch["b"], pl.Series("b", _COL_B)).all() @require_numpy1_on_windows @require_torch def test_torch_formatter(self): import torch from datasets.formatting import TorchFormatter pa_table = self._create_dummy_table() formatter = TorchFormatter() row = formatter.format_row(pa_table) torch.testing.assert_close(row["a"], torch.tensor(_COL_A, dtype=torch.int64)[0]) assert row["b"] == _COL_B[0] torch.testing.assert_close(row["c"], torch.tensor(_COL_C, dtype=torch.float32)[0]) col = formatter.format_column(pa_table) torch.testing.assert_close(col, torch.tensor(_COL_A, dtype=torch.int64)) batch = formatter.format_batch(pa_table) torch.testing.assert_close(batch["a"], torch.tensor(_COL_A, dtype=torch.int64)) assert batch["b"] == _COL_B torch.testing.assert_close(batch["c"], torch.tensor(_COL_C, dtype=torch.float32)) assert batch["c"].shape == np.array(_COL_C).shape @require_numpy1_on_windows @require_torch def test_torch_formatter_torch_tensor_kwargs(self): import torch from datasets.formatting import TorchFormatter pa_table = self._create_dummy_table().drop(["b"]) formatter = TorchFormatter(dtype=torch.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, torch.float16) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, torch.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, torch.float16) self.assertEqual(batch["c"].dtype, torch.float16) @require_numpy1_on_windows @require_torch @require_pil def test_torch_formatter_image(self): import torch from datasets.formatting import TorchFormatter # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = TorchFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, torch.uint8) # torch uses CHW format contrary to numpy which uses HWC self.assertEqual(row["image"].shape, (3, 480, 640)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, torch.uint8) self.assertEqual(col.shape, (2, 3, 480, 640)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"].dtype, torch.uint8) self.assertEqual(batch["image"].shape, (2, 3, 480, 640)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = TorchFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, torch.uint8) self.assertEqual(row["image"].shape, (3, 480, 640)) col = formatter.format_column(pa_table) self.assertIsInstance(col, list) self.assertEqual(col[0].dtype, torch.uint8) self.assertEqual(col[0].shape, (3, 480, 640)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], list) self.assertEqual(batch["image"][0].dtype, torch.uint8) self.assertEqual(batch["image"][0].shape, (3, 480, 640)) @require_torch @require_librosa @require_sndfile def test_torch_formatter_audio(self): import torch from datasets.formatting import TorchFormatter pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = TorchFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) self.assertEqual(row["audio"]["array"].dtype, torch.float32) col = formatter.format_column(pa_table) self.assertEqual(col[0]["array"].dtype, torch.float32) batch = formatter.format_batch(pa_table) self.assertEqual(batch["audio"][0]["array"].dtype, torch.float32) @require_tf def test_tf_formatter(self): import tensorflow as tf from datasets.formatting import TFFormatter pa_table = self._create_dummy_table() formatter = TFFormatter() row = formatter.format_row(pa_table) tf.debugging.assert_equal(row["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)[0]) tf.debugging.assert_equal(row["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)[0]) tf.debugging.assert_equal(row["c"], tf.convert_to_tensor(_COL_C, dtype=tf.float32)[0]) col = formatter.format_column(pa_table) tf.debugging.assert_equal(col, tf.ragged.constant(_COL_A, dtype=tf.int64)) batch = formatter.format_batch(pa_table) tf.debugging.assert_equal(batch["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)) tf.debugging.assert_equal(batch["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)) self.assertIsInstance(batch["c"], tf.Tensor) self.assertEqual(batch["c"].dtype, tf.float32) tf.debugging.assert_equal( batch["c"].shape.as_list(), tf.convert_to_tensor(_COL_C, dtype=tf.float32).shape.as_list() ) tf.debugging.assert_equal(tf.convert_to_tensor(batch["c"]), tf.convert_to_tensor(_COL_C, dtype=tf.float32)) @require_tf def test_tf_formatter_tf_tensor_kwargs(self): import tensorflow as tf from datasets.formatting import TFFormatter pa_table = self._create_dummy_table().drop(["b"]) formatter = TFFormatter(dtype=tf.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, tf.float16) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, tf.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, tf.float16) self.assertEqual(batch["c"].dtype, tf.float16) @require_tf @require_pil def test_tf_formatter_image(self): import tensorflow as tf from datasets.formatting import TFFormatter # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = TFFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, tf.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, tf.uint8) self.assertEqual(col.shape, (2, 480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"][0].dtype, tf.uint8) self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = TFFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, tf.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertIsInstance(col, list) self.assertEqual(col[0].dtype, tf.uint8) self.assertEqual(col[0].shape, (480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], list) self.assertEqual(batch["image"][0].dtype, tf.uint8) self.assertEqual(batch["image"][0].shape, (480, 640, 3)) @require_tf @require_sndfile def test_tf_formatter_audio(self): import tensorflow as tf from datasets.formatting import TFFormatter pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = TFFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) self.assertEqual(row["audio"]["array"].dtype, tf.float32) col = formatter.format_column(pa_table) self.assertEqual(col[0]["array"].dtype, tf.float32) batch = formatter.format_batch(pa_table) self.assertEqual(batch["audio"][0]["array"].dtype, tf.float32) @require_jax def test_jax_formatter(self): import jax import jax.numpy as jnp from datasets.formatting import JaxFormatter pa_table = self._create_dummy_table() formatter = JaxFormatter() row = formatter.format_row(pa_table) jnp.allclose(row["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)[0]) assert row["b"] == _COL_B[0] jnp.allclose(row["c"], jnp.array(_COL_C, dtype=jnp.float32)[0]) col = formatter.format_column(pa_table) jnp.allclose(col, jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)) batch = formatter.format_batch(pa_table) jnp.allclose(batch["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)) assert batch["b"] == _COL_B jnp.allclose(batch["c"], jnp.array(_COL_C, dtype=jnp.float32)) assert batch["c"].shape == np.array(_COL_C).shape @require_jax def test_jax_formatter_jnp_array_kwargs(self): import jax.numpy as jnp from datasets.formatting import JaxFormatter pa_table = self._create_dummy_table().drop(["b"]) formatter = JaxFormatter(dtype=jnp.float16) row = formatter.format_row(pa_table) self.assertEqual(row["c"].dtype, jnp.float16) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, jnp.float16) batch = formatter.format_batch(pa_table) self.assertEqual(batch["a"].dtype, jnp.float16) self.assertEqual(batch["c"].dtype, jnp.float16) @require_jax @require_pil def test_jax_formatter_image(self): import jax.numpy as jnp from datasets.formatting import JaxFormatter # same dimensions pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) formatter = JaxFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, jnp.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertEqual(col.dtype, jnp.uint8) self.assertEqual(col.shape, (2, 480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertEqual(batch["image"].dtype, jnp.uint8) self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) # different dimensions pa_table = pa.table( {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} ) formatter = JaxFormatter(features=Features({"image": Image()})) row = formatter.format_row(pa_table) self.assertEqual(row["image"].dtype, jnp.uint8) self.assertEqual(row["image"].shape, (480, 640, 3)) col = formatter.format_column(pa_table) self.assertIsInstance(col, list) self.assertEqual(col[0].dtype, jnp.uint8) self.assertEqual(col[0].shape, (480, 640, 3)) batch = formatter.format_batch(pa_table) self.assertIsInstance(batch["image"], list) self.assertEqual(batch["image"][0].dtype, jnp.uint8) self.assertEqual(batch["image"][0].shape, (480, 640, 3)) @require_jax @require_librosa @require_sndfile def test_jax_formatter_audio(self): import jax.numpy as jnp from datasets.formatting import JaxFormatter pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) formatter = JaxFormatter(features=Features({"audio": Audio()})) row = formatter.format_row(pa_table) self.assertEqual(row["audio"]["array"].dtype, jnp.float32) col = formatter.format_column(pa_table) self.assertEqual(col[0]["array"].dtype, jnp.float32) batch = formatter.format_batch(pa_table) self.assertEqual(batch["audio"][0]["array"].dtype, jnp.float32) @require_jax def test_jax_formatter_device(self): import jax from datasets.formatting import JaxFormatter pa_table = self._create_dummy_table() device = jax.devices()[0] formatter = JaxFormatter(device=str(device)) row = formatter.format_row(pa_table) assert row["a"].devices().pop() == device assert row["c"].devices().pop() == device col = formatter.format_column(pa_table) assert col.devices().pop() == device batch = formatter.format_batch(pa_table) assert batch["a"].devices().pop() == device assert batch["c"].devices().pop() == device class QueryTest(TestCase): def _create_dummy_table(self): return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) def _create_dummy_arrow_indices(self): return pa.Table.from_arrays([pa.array(_INDICES, type=pa.uint64())], names=["indices"]) def assertTableEqual(self, first: pa.Table, second: pa.Table): self.assertEqual(first.schema, second.schema) for first_array, second_array in zip(first, second): self.assertEqual(first_array, second_array) self.assertEqual(first, second) def test_query_table_int(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows # classical usage subtable = query_table(table, 0) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) subtable = query_table(table, 1) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) subtable = query_table(table, -1) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) # raise an IndexError with self.assertRaises(IndexError): query_table(table, n) with self.assertRaises(IndexError): query_table(table, -(n + 1)) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, 0, indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, len(indices), indices=indices) def test_query_table_slice(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows # classical usage subtable = query_table(table, slice(0, 1)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) subtable = query_table(table, slice(1, 2)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) subtable = query_table(table, slice(-2, -1)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": _COL_A[-2:-1], "b": _COL_B[-2:-1], "c": _COL_C[-2:-1]}) ) # usage with None subtable = query_table(table, slice(-1, None)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) subtable = query_table(table, slice(None, n + 1)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": _COL_A[: n + 1], "b": _COL_B[: n + 1], "c": _COL_C[: n + 1]}) ) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})) subtable = query_table(table, slice(-(n + 1), None)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": _COL_A[-(n + 1) :], "b": _COL_B[-(n + 1) :], "c": _COL_C[-(n + 1) :]}) ) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})) # usage with step subtable = query_table(table, slice(None, None, 2)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[::2], "b": _COL_B[::2], "c": _COL_C[::2]})) # empty ouput but no errors subtable = query_table(table, slice(-1, 0)) # usage with both negative and positive idx assert len(_COL_A[-1:0]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) subtable = query_table(table, slice(2, 1)) assert len(_COL_A[2:1]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) subtable = query_table(table, slice(n, n)) assert len(_COL_A[n:n]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) subtable = query_table(table, slice(n, n + 1)) assert len(_COL_A[n : n + 1]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) # it's not possible to get an error with a slice # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, slice(0, 1), indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) subtable = query_table(table, slice(n - 1, n), indices=indices) assert len(indices.column(0).to_pylist()[n - 1 : n]) == 0 self.assertTableEqual(subtable, pa_table.slice(0, 0)) def test_query_table_range(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C) # classical usage subtable = query_table(table, range(0, 1)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(0, 1)], "b": np_B[range(0, 1)], "c": np_C[range(0, 1)].tolist()}), ) subtable = query_table(table, range(1, 2)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(1, 2)], "b": np_B[range(1, 2)], "c": np_C[range(1, 2)].tolist()}), ) subtable = query_table(table, range(-2, -1)) self.assertTableEqual( subtable, pa.Table.from_pydict( {"a": np_A[range(-2, -1)], "b": np_B[range(-2, -1)], "c": np_C[range(-2, -1)].tolist()} ), ) # usage with both negative and positive idx subtable = query_table(table, range(-1, 0)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(-1, 0)], "b": np_B[range(-1, 0)], "c": np_C[range(-1, 0)].tolist()}), ) subtable = query_table(table, range(-1, n)) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[range(-1, n)], "b": np_B[range(-1, n)], "c": np_C[range(-1, n)].tolist()}), ) # usage with step subtable = query_table(table, range(0, n, 2)) self.assertTableEqual( subtable, pa.Table.from_pydict( {"a": np_A[range(0, n, 2)], "b": np_B[range(0, n, 2)], "c": np_C[range(0, n, 2)].tolist()} ), ) subtable = query_table(table, range(0, n + 1, 2 * n)) self.assertTableEqual( subtable, pa.Table.from_pydict( { "a": np_A[range(0, n + 1, 2 * n)], "b": np_B[range(0, n + 1, 2 * n)], "c": np_C[range(0, n + 1, 2 * n)].tolist(), } ), ) # empty ouput but no errors subtable = query_table(table, range(2, 1)) assert len(np_A[range(2, 1)]) == 0 self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) subtable = query_table(table, range(n, n)) assert len(np_A[range(n, n)]) == 0 self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) # raise an IndexError with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[range(0, n + 1)] query_table(table, range(0, n + 1)) with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[range(-(n + 1), -1)] query_table(table, range(-(n + 1), -1)) with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[range(n, n + 1)] query_table(table, range(n, n + 1)) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, range(0, 1), indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, range(len(indices), len(indices) + 1), indices=indices) def test_query_table_str(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) subtable = query_table(table, "a") self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A})) with self.assertRaises(KeyError): query_table(table, "z") indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, "a", indices=indices) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": [_COL_A[i] for i in _INDICES]})) def test_query_table_iterable(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C) # classical usage subtable = query_table(table, [0]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[0]], "b": np_B[[0]], "c": np_C[[0]].tolist()}) ) subtable = query_table(table, [1]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[1]], "b": np_B[[1]], "c": np_C[[1]].tolist()}) ) subtable = query_table(table, [-1]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[-1]], "b": np_B[[-1]], "c": np_C[[-1]].tolist()}) ) subtable = query_table(table, [0, -1, 1]) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}), ) # numpy iterable subtable = query_table(table, np.array([0, -1, 1])) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}), ) # empty ouput but no errors subtable = query_table(table, []) assert len(np_A[[]]) == 0 self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) # raise an IndexError with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[[n]] query_table(table, [n]) with self.assertRaises(IndexError): with self.assertRaises(IndexError): np_A[[-(n + 1)]] query_table(table, [-(n + 1)]) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, [0], indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, [len(indices)], indices=indices) def test_query_table_indexable_type(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) n = pa_table.num_rows # classical usage subtable = query_table(table, np.int64(0)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) subtable = query_table(table, np.int64(1)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) subtable = query_table(table, np.int64(-1)) self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) # raise an IndexError with self.assertRaises(IndexError): query_table(table, np.int64(n)) with self.assertRaises(IndexError): query_table(table, np.int64(-(n + 1))) # with indices indices = InMemoryTable(self._create_dummy_arrow_indices()) subtable = query_table(table, np.int64(0), indices=indices) self.assertTableEqual( subtable, pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), ) with self.assertRaises(IndexError): assert len(indices) < n query_table(table, np.int64(len(indices)), indices=indices) def test_query_table_invalid_key_type(self): pa_table = self._create_dummy_table() table = InMemoryTable(pa_table) with self.assertRaises(TypeError): query_table(table, 0.0) with self.assertRaises(TypeError): query_table(table, [0, "a"]) with self.assertRaises(TypeError): query_table(table, int) with self.assertRaises(TypeError): def iter_to_inf(start=0): while True: yield start start += 1 query_table(table, iter_to_inf()) @pytest.fixture(scope="session") def arrow_table(): return pa.Table.from_pydict({"col_int": [0, 1, 2], "col_float": [0.0, 1.0, 2.0]}) @require_tf @pytest.mark.parametrize( "cast_schema", [ None, [("col_int", pa.int64()), ("col_float", pa.float64())], [("col_int", pa.int32()), ("col_float", pa.float64())], [("col_int", pa.int64()), ("col_float", pa.float32())], ], ) def test_tf_formatter_sets_default_dtypes(cast_schema, arrow_table): import tensorflow as tf from datasets.formatting import TFFormatter if cast_schema: arrow_table = arrow_table.cast(pa.schema(cast_schema)) arrow_table_dict = arrow_table.to_pydict() list_int = arrow_table_dict["col_int"] list_float = arrow_table_dict["col_float"] formatter = TFFormatter() row = formatter.format_row(arrow_table) tf.debugging.assert_equal(row["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)[0]) tf.debugging.assert_equal(row["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)[0]) col = formatter.format_column(arrow_table) tf.debugging.assert_equal(col, tf.ragged.constant(list_int, dtype=tf.int64)) batch = formatter.format_batch(arrow_table) tf.debugging.assert_equal(batch["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)) tf.debugging.assert_equal(batch["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)) @require_numpy1_on_windows @require_torch @pytest.mark.parametrize( "cast_schema", [ None, [("col_int", pa.int64()), ("col_float", pa.float64())], [("col_int", pa.int32()), ("col_float", pa.float64())], [("col_int", pa.int64()), ("col_float", pa.float32())], ], ) def test_torch_formatter_sets_default_dtypes(cast_schema, arrow_table): import torch from datasets.formatting import TorchFormatter if cast_schema: arrow_table = arrow_table.cast(pa.schema(cast_schema)) arrow_table_dict = arrow_table.to_pydict() list_int = arrow_table_dict["col_int"] list_float = arrow_table_dict["col_float"] formatter = TorchFormatter() row = formatter.format_row(arrow_table) torch.testing.assert_close(row["col_int"], torch.tensor(list_int, dtype=torch.int64)[0]) torch.testing.assert_close(row["col_float"], torch.tensor(list_float, dtype=torch.float32)[0]) col = formatter.format_column(arrow_table) torch.testing.assert_close(col, torch.tensor(list_int, dtype=torch.int64)) batch = formatter.format_batch(arrow_table) torch.testing.assert_close(batch["col_int"], torch.tensor(list_int, dtype=torch.int64)) torch.testing.assert_close(batch["col_float"], torch.tensor(list_float, dtype=torch.float32)) def test_iterable_dataset_of_arrays_format_to_arrow(any_arrays_dataset: IterableDataset): formatted = any_arrays_dataset.with_format("arrow") assert all(isinstance(example, pa.Table) for example in formatted) def test_iterable_dataset_of_arrays_format_to_numpy(any_arrays_dataset: IterableDataset): formatted = any_arrays_dataset.with_format("np") assert all(isinstance(example["array"], np.ndarray) for example in formatted) @require_torch def test_iterable_dataset_of_arrays_format_to_torch(any_arrays_dataset: IterableDataset): import torch formatted = any_arrays_dataset.with_format("torch") assert all(isinstance(example["array"], torch.Tensor) for example in formatted) @require_tf def test_iterable_dataset_of_arrays_format_to_tf(any_arrays_dataset: IterableDataset): import tensorflow as tf formatted = any_arrays_dataset.with_format("tf") assert all(isinstance(example["array"], tf.Tensor) for example in formatted) @require_jax def test_iterable_dataset_of_arrays_format_to_jax(any_arrays_dataset: IterableDataset): import jax.numpy as jnp formatted = any_arrays_dataset.with_format("jax") assert all(isinstance(example["array"], jnp.ndarray) for example in formatted)
datasets/tests/test_formatting.py/0
{ "file_path": "datasets/tests/test_formatting.py", "repo_id": "datasets", "token_count": 21450 }
94
import copy import pickle from decimal import Decimal from functools import partial from typing import List, Union from unittest.mock import MagicMock import numpy as np import pyarrow as pa import pytest from datasets.features import Array2D, ClassLabel, Features, Image, LargeList, Sequence, Value from datasets.features.features import Array2DExtensionType, get_nested_type from datasets.table import ( ConcatenationTable, InMemoryTable, MemoryMappedTable, Table, TableBlock, _in_memory_arrow_table_from_buffer, _in_memory_arrow_table_from_file, _interpolation_search, _memory_mapped_arrow_table_from_file, array_cast, cast_array_to_feature, concat_tables, embed_array_storage, embed_table_storage, inject_arrow_table_documentation, table_cast, table_iter, ) from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, slow @pytest.fixture(scope="session") def in_memory_pa_table(arrow_file) -> pa.Table: return pa.ipc.open_stream(arrow_file).read_all() def _to_testing_blocks(table: TableBlock) -> List[List[TableBlock]]: assert len(table) > 2 blocks = [ [table.slice(0, 2)], [table.slice(2).drop([c for c in table.column_names if c != "tokens"]), table.slice(2).drop(["tokens"])], ] return blocks @pytest.fixture(scope="session") def in_memory_blocks(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) return _to_testing_blocks(table) @pytest.fixture(scope="session") def memory_mapped_blocks(arrow_file): table = MemoryMappedTable.from_file(arrow_file) return _to_testing_blocks(table) @pytest.fixture(scope="session") def mixed_in_memory_and_memory_mapped_blocks(in_memory_blocks, memory_mapped_blocks): return in_memory_blocks[:1] + memory_mapped_blocks[1:] def assert_deepcopy_without_bringing_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_doesnt_increase(): copied_table = copy.deepcopy(table) assert isinstance(copied_table, MemoryMappedTable) assert copied_table.table == table.table def assert_deepcopy_does_bring_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_increases(): copied_table = copy.deepcopy(table) assert isinstance(copied_table, MemoryMappedTable) assert copied_table.table == table.table def assert_pickle_without_bringing_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_doesnt_increase(): pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert isinstance(unpickled_table, MemoryMappedTable) assert unpickled_table.table == table.table def assert_pickle_does_bring_data_in_memory(table: MemoryMappedTable): with assert_arrow_memory_increases(): pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert isinstance(unpickled_table, MemoryMappedTable) assert unpickled_table.table == table.table def assert_index_attributes_equal(table: Table, other: Table): assert table._batches == other._batches np.testing.assert_array_equal(table._offsets, other._offsets) assert table._schema == other._schema def add_suffix_to_column_names(table, suffix): return table.rename_columns([f"{name}{suffix}" for name in table.column_names]) def test_inject_arrow_table_documentation(in_memory_pa_table): method = pa.Table.slice def function_to_wrap(*args): return method(*args) args = (0, 1) wrapped_method = inject_arrow_table_documentation(method)(function_to_wrap) assert method(in_memory_pa_table, *args) == wrapped_method(in_memory_pa_table, *args) assert "pyarrow.Table" not in wrapped_method.__doc__ assert "Table" in wrapped_method.__doc__ def test_in_memory_arrow_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_increases(): pa_table = _in_memory_arrow_table_from_file(arrow_file) assert in_memory_pa_table == pa_table def test_in_memory_arrow_table_from_buffer(in_memory_pa_table): with assert_arrow_memory_increases(): buf_writer = pa.BufferOutputStream() writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) writer.write_table(in_memory_pa_table) writer.close() buf_writer.close() pa_table = _in_memory_arrow_table_from_buffer(buf_writer.getvalue()) assert in_memory_pa_table == pa_table def test_memory_mapped_arrow_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_doesnt_increase(): pa_table = _memory_mapped_arrow_table_from_file(arrow_file) assert in_memory_pa_table == pa_table def test_table_init(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.table == in_memory_pa_table def test_table_validate(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.validate() == in_memory_pa_table.validate() def test_table_equals(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.equals(in_memory_pa_table) def test_table_to_batches(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_batches() == in_memory_pa_table.to_batches() def test_table_to_pydict(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_pydict() == in_memory_pa_table.to_pydict() def test_table_to_string(in_memory_pa_table): table = Table(in_memory_pa_table) assert table.to_string() == in_memory_pa_table.to_string() def test_table_field(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names table = Table(in_memory_pa_table) assert table.field("tokens") == in_memory_pa_table.field("tokens") def test_table_column(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names table = Table(in_memory_pa_table) assert table.column("tokens") == in_memory_pa_table.column("tokens") def test_table_itercolumns(in_memory_pa_table): table = Table(in_memory_pa_table) assert isinstance(table.itercolumns(), type(in_memory_pa_table.itercolumns())) assert list(table.itercolumns()) == list(in_memory_pa_table.itercolumns()) def test_table_getitem(in_memory_pa_table): table = Table(in_memory_pa_table) assert table[0] == in_memory_pa_table[0] def test_table_len(in_memory_pa_table): table = Table(in_memory_pa_table) assert len(table) == len(in_memory_pa_table) def test_table_str(in_memory_pa_table): table = Table(in_memory_pa_table) assert str(table) == str(in_memory_pa_table).replace("pyarrow.Table", "Table") assert repr(table) == repr(in_memory_pa_table).replace("pyarrow.Table", "Table") @pytest.mark.parametrize( "attribute", ["schema", "columns", "num_columns", "num_rows", "shape", "nbytes", "column_names"] ) def test_table_attributes(in_memory_pa_table, attribute): table = Table(in_memory_pa_table) assert getattr(table, attribute) == getattr(in_memory_pa_table, attribute) def test_in_memory_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_increases(): table = InMemoryTable.from_file(arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_buffer(in_memory_pa_table): with assert_arrow_memory_increases(): buf_writer = pa.BufferOutputStream() writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) writer.write_table(in_memory_pa_table) writer.close() buf_writer.close() table = InMemoryTable.from_buffer(buf_writer.getvalue()) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_pandas(in_memory_pa_table): df = in_memory_pa_table.to_pandas() with assert_arrow_memory_increases(): # with no schema it might infer another order of the fields in the schema table = InMemoryTable.from_pandas(df) assert isinstance(table, InMemoryTable) # by specifying schema we get the same order of features, and so the exact same table table = InMemoryTable.from_pandas(df, schema=in_memory_pa_table.schema) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_arrays(in_memory_pa_table): arrays = list(in_memory_pa_table.columns) names = list(in_memory_pa_table.column_names) table = InMemoryTable.from_arrays(arrays, names=names) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_from_pydict(in_memory_pa_table): pydict = in_memory_pa_table.to_pydict() with assert_arrow_memory_increases(): table = InMemoryTable.from_pydict(pydict) assert isinstance(table, InMemoryTable) assert table.table == pa.Table.from_pydict(pydict) def test_in_memory_table_from_pylist(in_memory_pa_table): pylist = InMemoryTable(in_memory_pa_table).to_pylist() table = InMemoryTable.from_pylist(pylist) assert isinstance(table, InMemoryTable) assert pylist == table.to_pylist() def test_in_memory_table_from_batches(in_memory_pa_table): batches = list(in_memory_pa_table.to_batches()) table = InMemoryTable.from_batches(batches) assert table.table == in_memory_pa_table assert isinstance(table, InMemoryTable) def test_in_memory_table_deepcopy(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) def test_in_memory_table_pickle(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert_index_attributes_equal(table, unpickled_table) @slow def test_in_memory_table_pickle_big_table(): big_table_4GB = InMemoryTable.from_pydict({"col": [0] * ((4 * 8 << 30) // 64)}) length = len(big_table_4GB) big_table_4GB = pickle.dumps(big_table_4GB) big_table_4GB = pickle.loads(big_table_4GB) assert len(big_table_4GB) == length def test_in_memory_table_slice(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, InMemoryTable) def test_in_memory_table_filter(in_memory_pa_table): mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = InMemoryTable(in_memory_pa_table).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, InMemoryTable) def test_in_memory_table_flatten(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, InMemoryTable) def test_in_memory_table_combine_chunks(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, InMemoryTable) def test_in_memory_table_cast(in_memory_pa_table): assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = InMemoryTable(in_memory_pa_table).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, InMemoryTable) def test_in_memory_table_cast_reorder_struct(): table = InMemoryTable( pa.Table.from_pydict( { "top": [ { "foo": "a", "bar": "b", } ] } ) ) schema = pa.schema({"top": pa.struct({"bar": pa.string(), "foo": pa.string()})}) assert table.cast(schema).schema == schema def test_in_memory_table_cast_with_hf_features(): table = InMemoryTable(pa.Table.from_pydict({"labels": [0, 1]})) features = Features({"labels": ClassLabel(names=["neg", "pos"])}) schema = features.arrow_schema assert table.cast(schema).schema == schema assert Features.from_arrow_schema(table.cast(schema).schema) == features def test_in_memory_table_replace_schema_metadata(in_memory_pa_table): metadata = {"huggingface": "{}"} table = InMemoryTable(in_memory_pa_table).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, InMemoryTable) def test_in_memory_table_add_column(in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).add_column(i, field_, column) assert table.table == in_memory_pa_table.add_column(i, field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_append_column(in_memory_pa_table): field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).append_column(field_, column) assert table.table == in_memory_pa_table.append_column(field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_remove_column(in_memory_pa_table): table = InMemoryTable(in_memory_pa_table).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, InMemoryTable) def test_in_memory_table_set_column(in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = InMemoryTable(in_memory_pa_table).set_column(i, field_, column) assert table.table == in_memory_pa_table.set_column(i, field_, column) assert isinstance(table, InMemoryTable) def test_in_memory_table_rename_columns(in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = InMemoryTable(in_memory_pa_table).rename_columns(names) assert table.table == in_memory_pa_table.rename_columns(names) assert isinstance(table, InMemoryTable) def test_in_memory_table_drop(in_memory_pa_table): names = [in_memory_pa_table.column_names[0]] table = InMemoryTable(in_memory_pa_table).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, InMemoryTable) def test_memory_mapped_table_init(arrow_file, in_memory_pa_table): table = MemoryMappedTable(_memory_mapped_arrow_table_from_file(arrow_file), arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, MemoryMappedTable) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_from_file(arrow_file, in_memory_pa_table): with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file) assert table.table == in_memory_pa_table assert isinstance(table, MemoryMappedTable) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_from_file_with_replay(arrow_file, in_memory_pa_table): replays = [("slice", (0, 1), {}), ("flatten", (), {})] with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file, replays=replays) assert len(table) == 1 for method, args, kwargs in replays: in_memory_pa_table = getattr(in_memory_pa_table, method)(*args, **kwargs) assert table.table == in_memory_pa_table assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_deepcopy(arrow_file): table = MemoryMappedTable.from_file(arrow_file) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert table.path == copied_table.path assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) def test_memory_mapped_table_pickle(arrow_file): table = MemoryMappedTable.from_file(arrow_file) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert unpickled_table.path == table.path assert_index_attributes_equal(table, unpickled_table) def test_memory_mapped_table_pickle_doesnt_fill_memory(arrow_file): with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file) assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_pickle_applies_replay(arrow_file): replays = [("slice", (0, 1), {}), ("flatten", (), {})] with assert_arrow_memory_doesnt_increase(): table = MemoryMappedTable.from_file(arrow_file, replays=replays) assert isinstance(table, MemoryMappedTable) assert table.replays == replays assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_slice(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, MemoryMappedTable) assert table.replays == [("slice", (1, 2), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_filter(arrow_file, in_memory_pa_table): mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = MemoryMappedTable.from_file(arrow_file).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, MemoryMappedTable) assert table.replays == [("filter", (mask,), {})] assert_deepcopy_without_bringing_data_in_memory(table) # filter DOES increase memory # assert_pickle_without_bringing_data_in_memory(table) assert_pickle_does_bring_data_in_memory(table) def test_memory_mapped_table_flatten(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, MemoryMappedTable) assert table.replays == [("flatten", (), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_combine_chunks(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, MemoryMappedTable) assert table.replays == [("combine_chunks", (), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_cast(arrow_file, in_memory_pa_table): assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = MemoryMappedTable.from_file(arrow_file).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, MemoryMappedTable) assert table.replays == [("cast", (schema,), {})] assert_deepcopy_without_bringing_data_in_memory(table) # cast DOES increase memory when converting integers precision for example # assert_pickle_without_bringing_data_in_memory(table) assert_pickle_does_bring_data_in_memory(table) def test_memory_mapped_table_replace_schema_metadata(arrow_file, in_memory_pa_table): metadata = {"huggingface": "{}"} table = MemoryMappedTable.from_file(arrow_file).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, MemoryMappedTable) assert table.replays == [("replace_schema_metadata", (metadata,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_add_column(arrow_file, in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).add_column(i, field_, column) assert table.table == in_memory_pa_table.add_column(i, field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("add_column", (i, field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_append_column(arrow_file, in_memory_pa_table): field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).append_column(field_, column) assert table.table == in_memory_pa_table.append_column(field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("append_column", (field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_remove_column(arrow_file, in_memory_pa_table): table = MemoryMappedTable.from_file(arrow_file).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, MemoryMappedTable) assert table.replays == [("remove_column", (0,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_set_column(arrow_file, in_memory_pa_table): i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) table = MemoryMappedTable.from_file(arrow_file).set_column(i, field_, column) assert table.table == in_memory_pa_table.set_column(i, field_, column) assert isinstance(table, MemoryMappedTable) assert table.replays == [("set_column", (i, field_, column), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_rename_columns(arrow_file, in_memory_pa_table): assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = MemoryMappedTable.from_file(arrow_file).rename_columns(names) assert table.table == in_memory_pa_table.rename_columns(names) assert isinstance(table, MemoryMappedTable) assert table.replays == [("rename_columns", (names,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) def test_memory_mapped_table_drop(arrow_file, in_memory_pa_table): names = [in_memory_pa_table.column_names[0]] table = MemoryMappedTable.from_file(arrow_file).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, MemoryMappedTable) assert table.replays == [("drop", (names,), {})] assert_deepcopy_without_bringing_data_in_memory(table) assert_pickle_without_bringing_data_in_memory(table) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_init( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = ( in_memory_blocks if blocks_type == "in_memory" else memory_mapped_blocks if blocks_type == "memory_mapped" else mixed_in_memory_and_memory_mapped_blocks ) table = ConcatenationTable(in_memory_pa_table, blocks) assert table.table == in_memory_pa_table assert table.blocks == blocks def test_concatenation_table_from_blocks(in_memory_pa_table, in_memory_blocks): assert len(in_memory_pa_table) > 2 in_memory_table = InMemoryTable(in_memory_pa_table) t1, t2 = in_memory_table.slice(0, 2), in_memory_table.slice(2) table = ConcatenationTable.from_blocks(in_memory_table) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks([t1, t2]) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks([[t1], [t2]]) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] table = ConcatenationTable.from_blocks(in_memory_blocks) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table assert table.blocks == [[in_memory_table]] @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_from_blocks_doesnt_increase_memory( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] with assert_arrow_memory_doesnt_increase(): table = ConcatenationTable.from_blocks(blocks) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table if blocks_type == "in_memory": assert table.blocks == [[InMemoryTable(in_memory_pa_table)]] else: assert table.blocks == blocks @pytest.mark.parametrize("axis", [0, 1]) def test_concatenation_table_from_tables(axis, in_memory_pa_table, arrow_file): in_memory_table = InMemoryTable(in_memory_pa_table) concatenation_table = ConcatenationTable.from_blocks(in_memory_table) memory_mapped_table = MemoryMappedTable.from_file(arrow_file) tables = [in_memory_pa_table, in_memory_table, concatenation_table, memory_mapped_table] if axis == 0: expected_table = pa.concat_tables([in_memory_pa_table] * len(tables)) else: # avoids error due to duplicate column names tables[1:] = [add_suffix_to_column_names(table, i) for i, table in enumerate(tables[1:], 1)] expected_table = in_memory_pa_table for table in tables[1:]: for name, col in zip(table.column_names, table.columns): expected_table = expected_table.append_column(name, col) with assert_arrow_memory_doesnt_increase(): table = ConcatenationTable.from_tables(tables, axis=axis) assert isinstance(table, ConcatenationTable) assert table.table == expected_table # because of consolidation, we end up with 1 InMemoryTable and 1 MemoryMappedTable assert len(table.blocks) == 1 if axis == 1 else 2 assert len(table.blocks[0]) == 1 if axis == 0 else 2 assert axis == 1 or len(table.blocks[1]) == 1 assert isinstance(table.blocks[0][0], InMemoryTable) assert isinstance(table.blocks[1][0] if axis == 0 else table.blocks[0][1], MemoryMappedTable) def test_concatenation_table_from_tables_axis1_misaligned_blocks(arrow_file): table = MemoryMappedTable.from_file(arrow_file) t1 = table.slice(0, 2) t2 = table.slice(0, 3).rename_columns([col + "_1" for col in table.column_names]) concatenated = ConcatenationTable.from_tables( [ ConcatenationTable.from_blocks([[t1], [t1], [t1]]), ConcatenationTable.from_blocks([[t2], [t2]]), ], axis=1, ) assert len(concatenated) == 6 assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] concatenated = ConcatenationTable.from_tables( [ ConcatenationTable.from_blocks([[t2], [t2]]), ConcatenationTable.from_blocks([[t1], [t1], [t1]]), ], axis=1, ) assert len(concatenated) == 6 assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_deepcopy( blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks) copied_table = copy.deepcopy(table) assert table.table == copied_table.table assert table.blocks == copied_table.blocks assert_index_attributes_equal(table, copied_table) # deepcopy must return the exact same arrow objects since they are immutable assert table.table is copied_table.table assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_pickle( blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks) pickled_table = pickle.dumps(table) unpickled_table = pickle.loads(pickled_table) assert unpickled_table.table == table.table assert unpickled_table.blocks == table.blocks assert_index_attributes_equal(table, unpickled_table) def test_concat_tables_with_features_metadata(arrow_file, in_memory_pa_table): input_features = Features.from_arrow_schema(in_memory_pa_table.schema) input_features["id"] = Value("int64", id="my_id") intput_schema = input_features.arrow_schema t0 = in_memory_pa_table.replace_schema_metadata(intput_schema.metadata) t1 = MemoryMappedTable.from_file(arrow_file) tables = [t0, t1] concatenated_table = concat_tables(tables, axis=0) output_schema = concatenated_table.schema output_features = Features.from_arrow_schema(output_schema) assert output_schema == intput_schema assert output_schema.metadata == intput_schema.metadata assert output_features == input_features assert output_features["id"].id == "my_id" @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_slice( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).slice(1, 2) assert table.table == in_memory_pa_table.slice(1, 2) assert isinstance(table, ConcatenationTable) def test_concatenation_table_slice_mixed_schemas_vertically(arrow_file): t1 = MemoryMappedTable.from_file(arrow_file) t2 = InMemoryTable.from_pydict({"additional_column": ["foo"]}) expected = pa.table( { **{column: values + [None] for column, values in t1.to_pydict().items()}, "additional_column": [None] * len(t1) + ["foo"], } ) blocks = [[t1], [t2]] table = ConcatenationTable.from_blocks(blocks) assert table.to_pydict() == expected.to_pydict() assert isinstance(table, ConcatenationTable) reloaded = pickle.loads(pickle.dumps(table)) assert reloaded.to_pydict() == expected.to_pydict() assert isinstance(reloaded, ConcatenationTable) reloaded = pickle.loads(pickle.dumps(table.slice(1, 2))) assert reloaded.to_pydict() == expected.slice(1, 2).to_pydict() assert isinstance(reloaded, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_filter( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) table = ConcatenationTable.from_blocks(blocks).filter(mask) assert table.table == in_memory_pa_table.filter(mask) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_flatten( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).flatten() assert table.table == in_memory_pa_table.flatten() assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_combine_chunks( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).combine_chunks() assert table.table == in_memory_pa_table.combine_chunks() assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_cast( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types assert pa.int64() in in_memory_pa_table.schema.types schema = pa.schema( { k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = ConcatenationTable.from_blocks(blocks).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, ConcatenationTable) schema = pa.schema( { k: v if v != pa.int64() else pa.int32() for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) } ) table = ConcatenationTable.from_blocks(blocks).cast(schema) assert table.table == in_memory_pa_table.cast(schema) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concat_tables_cast_with_features_metadata( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] input_features = Features.from_arrow_schema(in_memory_pa_table.schema) input_features["id"] = Value("int64", id="my_id") intput_schema = input_features.arrow_schema concatenated_table = ConcatenationTable.from_blocks(blocks).cast(intput_schema) output_schema = concatenated_table.schema output_features = Features.from_arrow_schema(output_schema) assert output_schema == intput_schema assert output_schema.metadata == intput_schema.metadata assert output_features == input_features assert output_features["id"].id == "my_id" @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_replace_schema_metadata( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] metadata = {"huggingface": "{}"} table = ConcatenationTable.from_blocks(blocks).replace_schema_metadata(metadata) assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_add_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).add_column(i, field_, column) # assert table.table == in_memory_pa_table.add_column(i, field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.add_column(i, field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_append_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).append_column(field_, column) # assert table.table == in_memory_pa_table.append_column(field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.append_column(field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_remove_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] table = ConcatenationTable.from_blocks(blocks).remove_column(0) assert table.table == in_memory_pa_table.remove_column(0) assert isinstance(table, ConcatenationTable) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_set_column( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] i = len(in_memory_pa_table.column_names) field_ = "new_field" column = pa.array(list(range(len(in_memory_pa_table)))) with pytest.raises(NotImplementedError): ConcatenationTable.from_blocks(blocks).set_column(i, field_, column) # assert table.table == in_memory_pa_table.set_column(i, field_, column) # unpickled_table = pickle.loads(pickle.dumps(table)) # assert unpickled_table.table == in_memory_pa_table.set_column(i, field_, column) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_rename_columns( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] assert "tokens" in in_memory_pa_table.column_names names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] table = ConcatenationTable.from_blocks(blocks).rename_columns(names) assert isinstance(table, ConcatenationTable) assert table.table == in_memory_pa_table.rename_columns(names) @pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) def test_concatenation_table_drop( blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks ): blocks = { "in_memory": in_memory_blocks, "memory_mapped": memory_mapped_blocks, "mixed": mixed_in_memory_and_memory_mapped_blocks, }[blocks_type] names = [in_memory_pa_table.column_names[0]] table = ConcatenationTable.from_blocks(blocks).drop(names) assert table.table == in_memory_pa_table.drop(names) assert isinstance(table, ConcatenationTable) def test_concat_tables(arrow_file, in_memory_pa_table): t0 = in_memory_pa_table t1 = InMemoryTable(t0) t2 = MemoryMappedTable.from_file(arrow_file) t3 = ConcatenationTable.from_blocks(t1) tables = [t0, t1, t2, t3] concatenated_table = concat_tables(tables, axis=0) assert concatenated_table.table == pa.concat_tables([t0] * 4) assert concatenated_table.table.shape == (40, 4) assert isinstance(concatenated_table, ConcatenationTable) assert len(concatenated_table.blocks) == 3 # t0 and t1 are consolidated as a single InMemoryTable assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) assert isinstance(concatenated_table.blocks[1][0], MemoryMappedTable) assert isinstance(concatenated_table.blocks[2][0], InMemoryTable) # add suffix to avoid error due to duplicate column names concatenated_table = concat_tables( [add_suffix_to_column_names(table, i) for i, table in enumerate(tables)], axis=1 ) assert concatenated_table.table.shape == (10, 16) assert len(concatenated_table.blocks[0]) == 3 # t0 and t1 are consolidated as a single InMemoryTable assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) assert isinstance(concatenated_table.blocks[0][1], MemoryMappedTable) assert isinstance(concatenated_table.blocks[0][2], InMemoryTable) def _interpolation_search_ground_truth(arr: List[int], x: int) -> Union[int, IndexError]: for i in range(len(arr) - 1): if arr[i] <= x < arr[i + 1]: return i return IndexError class _ListWithGetitemCounter(list): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.unique_getitem_calls = set() def __getitem__(self, i): out = super().__getitem__(i) self.unique_getitem_calls.add(i) return out @property def getitem_unique_count(self): return len(self.unique_getitem_calls) @pytest.mark.parametrize( "arr, x", [(np.arange(0, 14, 3), x) for x in range(-1, 22)] + [(list(np.arange(-5, 5)), x) for x in range(-6, 6)] + [([0, 1_000, 1_001, 1_003], x) for x in [-1, 0, 2, 100, 999, 1_000, 1_001, 1_002, 1_003, 1_004]] + [(list(range(1_000)), x) for x in [-1, 0, 1, 10, 666, 999, 1_000, 1_0001]], ) def test_interpolation_search(arr, x): ground_truth = _interpolation_search_ground_truth(arr, x) if isinstance(ground_truth, int): arr = _ListWithGetitemCounter(arr) output = _interpolation_search(arr, x) assert ground_truth == output # 4 maximum unique getitem calls is expected for the cases of this test # but it can be bigger for large and messy arrays. assert arr.getitem_unique_count <= 4 else: with pytest.raises(ground_truth): _interpolation_search(arr, x) def test_indexed_table_mixin(): n_rows_per_chunk = 10 n_chunks = 4 pa_table = pa.Table.from_pydict({"col": [0] * n_rows_per_chunk}) pa_table = pa.concat_tables([pa_table] * n_chunks) table = Table(pa_table) assert all(table._offsets.tolist() == np.cumsum([0] + [n_rows_per_chunk] * n_chunks)) assert table.fast_slice(5) == pa_table.slice(5) assert table.fast_slice(2, 13) == pa_table.slice(2, 13) def test_cast_integer_array_to_features(): arr = pa.array([[0, 1]]) assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False).type == pa.list_( pa.string() ) with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False) def test_cast_float_array_to_features(): arr = pa.array([[0.0, 1.0]]) assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False).type == pa.list_( pa.string() ) with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False) def test_cast_boolean_array_to_features(): arr = pa.array([[False, True]]) assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False).type == pa.list_( pa.string() ) with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False) def test_cast_decimal_array_to_features(): arr = pa.array([[Decimal(0), Decimal(1)]]) assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string()) assert cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False).type == pa.list_( pa.string() ) with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False) def test_cast_array_to_features_nested(): arr = pa.array([[{"foo": [0]}]]) assert cast_array_to_feature(arr, [{"foo": Sequence(Value("string"))}]).type == pa.list_( pa.struct({"foo": pa.list_(pa.string())}) ) def test_cast_array_to_features_to_nested_with_no_fields(): arr = pa.array([{}]) assert cast_array_to_feature(arr, {}).type == pa.struct({}) assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist() def test_cast_array_to_features_nested_with_nulls(): # same type arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) casted_array = cast_array_to_feature(arr, {"foo": [[Value("int64")]]}) assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}) assert casted_array.to_pylist() == arr.to_pylist() # different type arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]}) assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))}) assert casted_array.to_pylist() == [{"foo": [None, [0]]}] def test_cast_array_to_features_to_null_type(): # same type arr = pa.array([[None, None]]) assert cast_array_to_feature(arr, Sequence(Value("null"))).type == pa.list_(pa.null()) # different type arr = pa.array([[None, 1]]) with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(Value("null"))) def test_cast_array_to_features_array_xd(): # same storage type arr = pa.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], pa.list_(pa.list_(pa.int32(), 2), 2)) casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="int32")) assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="int32") # different storage type casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="float32")) assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="float32") def test_cast_array_to_features_sequence_classlabel(): arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) arr = pa.array([[], ["bar"], ["foo", "bar"]], pa.list_(pa.string())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) # Test empty arrays arr = pa.array([[], []], pa.list_(pa.int64())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) arr = pa.array([[], []], pa.list_(pa.string())) assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) # Test invalid class labels arr = pa.array([[2]], pa.list_(pa.int64())) with pytest.raises(ValueError): assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))) arr = pa.array([["baz"]], pa.list_(pa.string())) with pytest.raises(ValueError): assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))) @pytest.mark.parametrize( "arr", [ pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32(), 3)), ], ) @pytest.mark.parametrize("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)]) @pytest.mark.parametrize("target_value_feature", [Value("int64")]) def test_cast_fixed_size_list_array_to_features_sequence(arr, slice, target_value_feature): arr = arr if slice is None else arr[slice] # Fixed size list casted_array = cast_array_to_feature(arr, Sequence(target_value_feature, length=arr.type.list_size)) assert casted_array.type == get_nested_type(Sequence(target_value_feature, length=arr.type.list_size)) assert casted_array.to_pylist() == arr.to_pylist() with pytest.raises(TypeError): cast_array_to_feature(arr, Sequence(target_value_feature, length=arr.type.list_size + 1)) # Variable size list casted_array = cast_array_to_feature(arr, Sequence(target_value_feature)) assert casted_array.type == get_nested_type(Sequence(target_value_feature)) assert casted_array.to_pylist() == arr.to_pylist() casted_array = cast_array_to_feature(arr, [target_value_feature]) assert casted_array.type == get_nested_type([target_value_feature]) assert casted_array.to_pylist() == arr.to_pylist() @pytest.mark.parametrize( "arr", [ pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32())), ], ) @pytest.mark.parametrize("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)]) @pytest.mark.parametrize("target_value_feature", [Value("int64")]) def test_cast_list_array_to_features_sequence(arr, slice, target_value_feature): arr = arr if slice is None else arr[slice] # Variable size list casted_array = cast_array_to_feature(arr, Sequence(target_value_feature)) assert casted_array.type == get_nested_type(Sequence(target_value_feature)) assert casted_array.to_pylist() == arr.to_pylist() casted_array = cast_array_to_feature(arr, [target_value_feature]) assert casted_array.type == get_nested_type([target_value_feature]) assert casted_array.to_pylist() == arr.to_pylist() # Fixed size list list_size = arr.value_lengths().drop_null()[0].as_py() if arr.value_lengths().drop_null() else 2 casted_array = cast_array_to_feature(arr, Sequence(target_value_feature, length=list_size)) assert casted_array.type == get_nested_type(Sequence(target_value_feature, length=list_size)) assert casted_array.to_pylist() == arr.to_pylist() @pytest.mark.parametrize("sequence_feature_dtype", ["string", "int64"]) @pytest.mark.parametrize("from_list_type", ["list", "fixed_size_list", "large_list"]) @pytest.mark.parametrize("list_within_struct", [False, True]) def test_cast_array_to_feature_with_list_array_and_sequence_feature( list_within_struct, from_list_type, sequence_feature_dtype ): list_type = { "list": pa.list_, "fixed_size_list": partial(pa.list_, list_size=2), "large_list": pa.large_list, } primitive_type = { "string": pa.string(), "int64": pa.int64(), } to_type = "list" array_data = [0, 1] array_type = list_type[from_list_type](pa.int64()) sequence_feature = Value(sequence_feature_dtype) expected_array_type = list_type[to_type](primitive_type[sequence_feature_dtype]) if list_within_struct: array_data = {"col_1": array_data} array_type = pa.struct({"col_1": array_type}) sequence_feature = {"col_1": sequence_feature} expected_array_type = pa.struct({"col_1": expected_array_type}) feature = Sequence(sequence_feature) array = pa.array([array_data], type=array_type) cast_array = cast_array_to_feature(array, feature) assert cast_array.type == expected_array_type @pytest.mark.parametrize("large_list_feature_value_type", ["string", "int64"]) @pytest.mark.parametrize("from_list_type", ["list", "fixed_size_list", "large_list"]) def test_cast_array_to_feature_with_list_array_and_large_list_feature(from_list_type, large_list_feature_value_type): list_type = { "list": pa.list_, "fixed_size_list": partial(pa.list_, list_size=2), "large_list": pa.large_list, } primitive_type = { "string": pa.string(), "int64": pa.int64(), } to_type = "large_list" array_data = [0, 1] array_type = list_type[from_list_type](pa.int64()) large_list_feature_value = Value(large_list_feature_value_type) expected_array_type = list_type[to_type](primitive_type[large_list_feature_value_type]) feature = LargeList(large_list_feature_value) array = pa.array([array_data], type=array_type) cast_array = cast_array_to_feature(array, feature) assert cast_array.type == expected_array_type def test_cast_array_xd_to_features_sequence(): arr = np.random.randint(0, 10, size=(8, 2, 3)).tolist() arr = Array2DExtensionType(shape=(2, 3), dtype="int64").wrap_array(pa.array(arr, pa.list_(pa.list_(pa.int64())))) arr = pa.ListArray.from_arrays([0, None, 4, 8], arr) # Variable size list casted_array = cast_array_to_feature(arr, Sequence(Array2D(shape=(2, 3), dtype="int32"))) assert casted_array.type == get_nested_type(Sequence(Array2D(shape=(2, 3), dtype="int32"))) assert casted_array.to_pylist() == arr.to_pylist() # Fixed size list casted_array = cast_array_to_feature(arr, Sequence(Array2D(shape=(2, 3), dtype="int32"), length=4)) assert casted_array.type == get_nested_type(Sequence(Array2D(shape=(2, 3), dtype="int32"), length=4)) assert casted_array.to_pylist() == arr.to_pylist() def test_embed_array_storage(image_file): array = pa.array([{"bytes": None, "path": image_file}], type=Image.pa_type) embedded_images_array = embed_array_storage(array, Image()) assert isinstance(embedded_images_array.to_pylist()[0]["path"], str) assert embedded_images_array.to_pylist()[0]["path"] == "test_image_rgb.jpg" assert isinstance(embedded_images_array.to_pylist()[0]["bytes"], bytes) def test_embed_array_storage_nested(image_file): array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type)) embedded_images_array = embed_array_storage(array, [Image()]) assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes) array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type})) embedded_images_array = embed_array_storage(array, {"foo": Image()}) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes) @pytest.mark.parametrize( "array, feature, expected_embedded_array_type", [ ( pa.array([[{"path": "image_path"}]], type=pa.list_(Image.pa_type)), [Image()], pa.types.is_list, ), ( pa.array([[{"path": "image_path"}]], type=pa.large_list(Image.pa_type)), LargeList(Image()), pa.types.is_large_list, ), ( pa.array([[{"path": "image_path"}]], type=pa.list_(Image.pa_type)), Sequence(Image()), pa.types.is_list, ), ], ) def test_embed_array_storage_with_list_types(array, feature, expected_embedded_array_type, monkeypatch): mock_embed_storage = MagicMock( return_value=pa.StructArray.from_arrays( [pa.array([b"image_bytes"], type=pa.binary()), pa.array(["image_path"], type=pa.string())], ["bytes", "path"], ) ) monkeypatch.setattr(Image, "embed_storage", mock_embed_storage) embedded_images_array = embed_array_storage(array, feature) assert expected_embedded_array_type(embedded_images_array.type) assert embedded_images_array.to_pylist() == [[{"bytes": b"image_bytes", "path": "image_path"}]] def test_embed_table_storage(image_file): features = Features({"image": Image()}) table = table_cast(pa.table({"image": [image_file]}), features.arrow_schema) embedded_images_table = embed_table_storage(table) assert isinstance(embedded_images_table.to_pydict()["image"][0]["path"], str) assert isinstance(embedded_images_table.to_pydict()["image"][0]["bytes"], bytes) @pytest.mark.parametrize( "table", [ InMemoryTable(pa.table({"foo": range(10)})), InMemoryTable(pa.concat_tables([pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})])), InMemoryTable(pa.concat_tables([pa.table({"foo": [i]}) for i in range(10)])), ], ) @pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_table_iter(table, batch_size, drop_last_batch): num_rows = len(table) if not drop_last_batch else len(table) // batch_size * batch_size num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size subtables = list(table_iter(table, batch_size=batch_size, drop_last_batch=drop_last_batch)) assert len(subtables) == num_batches if drop_last_batch: assert all(len(subtable) == batch_size for subtable in subtables) else: assert all(len(subtable) == batch_size for subtable in subtables[:-1]) assert len(subtables[-1]) <= batch_size if num_rows > 0: reloaded = pa.concat_tables(subtables) assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() @pytest.mark.parametrize("to_type", ["list", "fixed_size_list", "large_list"]) @pytest.mark.parametrize("from_type", ["list", "fixed_size_list", "large_list"]) def test_array_cast(from_type, to_type): array_type = { "list": pa.list_(pa.int64()), "fixed_size_list": pa.list_(pa.int64(), 2), "large_list": pa.large_list(pa.int64()), } arr = pa.array([[0, 1]], type=array_type[from_type]) cast_arr = array_cast(arr, array_type[to_type]) assert cast_arr.type == array_type[to_type] assert cast_arr.values == arr.values
datasets/tests/test_table.py/0
{ "file_path": "datasets/tests/test_table.py", "repo_id": "datasets", "token_count": 24240 }
95
<jupyter_start><jupyter_text>Unit 4: Code your first Deep Reinforcement Learning Algorithm with PyTorch: Reinforce. And test its robustness 💪In this notebook, you'll code your first Deep Reinforcement Learning algorithm from scratch: Reinforce (also called Monte Carlo Policy Gradient).Reinforce is a *Policy-based method*: a Deep Reinforcement Learning algorithm that tries **to optimize the policy directly without using an action-value function**.More precisely, Reinforce is a *Policy-gradient method*, a subclass of *Policy-based methods* that aims **to optimize the policy directly by estimating the weights of the optimal policy using gradient ascent**.To test its robustness, we're going to train it in 2 different simple environments:- Cartpole-v1- PixelcopterEnv⬇️ Here is an example of what **you will achieve at the end of this notebook.** ⬇️ 🎮 Environments: - [CartPole-v1](https://www.gymlibrary.dev/environments/classic_control/cart_pole/)- [PixelCopter](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html) 📚 RL-Library: - Python- PyTorchWe're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to **code from scratch a Reinforce algorithm using PyTorch.**- Be able to **test the robustness of your agent using simple environments.**- Be able to **push your trained agent to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning Course In this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments** And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 [Study Policy Gradients by reading Unit 4](https://huggingface.co/deep-rl-course/unit4/introduction) Let's code Reinforce algorithm from scratch 🔥To validate this hands-on for the certification process, you need to push your trained models to the Hub.- Get a result of >= 350 for `Cartpole-v1`.- Get a result of >= 5 for `PixelCopter`.To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**. **If you don't see your model on the leaderboard, go at the bottom of the leaderboard page and click on the refresh button**.For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process An advice 💡It's better to run this colab in a copy on your Google Drive, so that **if it timeouts** you still have the saved notebook on your Google Drive and do not need to fill everything from scratch.To do that you can either do `Ctrl + S` or `File > Save a copy in Google Drive.` Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display 🖥During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>%%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip install pyvirtualdisplay !pip install pyglet==1.5.1 # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install the dependencies 🔽The first step is to install the dependencies. We’ll install multiple ones:- `gym`- `gym-games`: Extra gym environments made with PyGame.- `huggingface_hub`: 🤗 works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations, and other features that will allow you to easily collaborate with others.You may be wondering why we install gym and not gymnasium, a more recent version of gym? **Because the gym-games we are using are not updated yet with gymnasium**. The differences you'll encounter here:- In `gym` we don't have `terminated` and `truncated` but only `done`.- In `gym` using `env.step()` returns `state, reward, done, info`You can learn more about the differences between Gym and Gymnasium here 👉 https://gymnasium.farama.org/content/migration-guide/You can see here all the Reinforce models available 👉 https://huggingface.co/models?other=reinforceAnd you can find all the Deep Reinforcement Learning models here 👉 https://huggingface.co/models?pipeline_tag=reinforcement-learning<jupyter_code>!pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit4/requirements-unit4.txt<jupyter_output><empty_output><jupyter_text>Import the packages 📦In addition to import the installed libraries, we also import:- `imageio`: A library that will help us to generate a replay video<jupyter_code>import numpy as np from collections import deque import matplotlib.pyplot as plt %matplotlib inline # PyTorch import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical # Gym import gym import gym_pygame # Hugging Face Hub from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. import imageio<jupyter_output><empty_output><jupyter_text>Check if we have a GPU- Let's check if we have a GPU- If it's the case you should see `device:cuda0`<jupyter_code>device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device)<jupyter_output><empty_output><jupyter_text>We're now ready to implement our Reinforce algorithm 🔥 First agent: Playing CartPole-v1 🤖 Create the CartPole environment and understand how it works [The environment 🎮](https://www.gymlibrary.dev/environments/classic_control/cart_pole/) Why do we use a simple environment like CartPole-v1?As explained in [Reinforcement Learning Tips and Tricks](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html), when you implement your agent from scratch you need **to be sure that it works correctly and find bugs with easy environments before going deeper**. Since finding bugs will be much easier in simple environments.> Try to have some “sign of life” on toy problems> Validate the implementation by making it run on harder and harder envs (you can compare results against the RL zoo). You usually need to run hyperparameter optimization for that step.___ The CartPole-v1 environment> A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces in the left and right direction on the cart.So, we start with CartPole-v1. The goal is to push the cart left or right **so that the pole stays in the equilibrium.**The episode ends if:- The pole Angle is greater than ±12°- Cart Position is greater than ±2.4- Episode length is greater than 500We get a reward 💰 of +1 every timestep the Pole stays in the equilibrium.<jupyter_code>env_id = "CartPole-v1" # Create the env env = gym.make(env_id) # Create the evaluation env eval_env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape[0] a_size = env.action_space.n print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>Let's build the Reinforce ArchitectureThis implementation is based on two implementations:- [PyTorch official Reinforcement Learning example](https://github.com/pytorch/examples/blob/main/reinforcement_learning/reinforce.py)- [Udacity Reinforce](https://github.com/udacity/deep-reinforcement-learning/blob/master/reinforce/REINFORCE.ipynb)- [Improvement of the integration by Chris1nexus](https://github.com/huggingface/deep-rl-class/pull/95) So we want:- Two fully connected layers (fc1 and fc2).- Using ReLU as activation function of fc1- Using Softmax to output a probability distribution over actions<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Create two fully connected layers def forward(self, x): # Define the forward pass # state goes to fc1 then we apply ReLU activation function # fc1 outputs goes to fc2 # We output the softmax def act(self, state): """ Given a state, take action """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>I make a mistake, can you guess where?- To find out let's make a forward pass:<jupyter_code>debug_policy = Policy(s_size, a_size, 64).to(device) debug_policy.act(env.reset())<jupyter_output><empty_output><jupyter_text>- Here we see that the error says `ValueError: The value argument to log_prob must be a Tensor`- It means that `action` in `m.log_prob(action)` must be a Tensor **but it's not.**- Do you know why? Check the act function and try to see why it does not work. Advice 💡: Something is wrong in this implementation. Remember that we act function **we want to sample an action from the probability distribution over actions**. (Real) Solution<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>By using CartPole, it was easier to debug since **we know that the bug comes from our integration and not from our simple environment**. - Since **we want to sample an action from the probability distribution over actions**, we can't use `action = np.argmax(m)` since it will always output the action that have the highest probability.- We need to replace with `action = m.sample()` that will sample an action from the probability distribution P(.|s) Let's build the Reinforce Training AlgorithmThis is the Reinforce algorithm pseudocode: - When we calculate the return Gt (line 6) we see that we calculate the sum of discounted rewards **starting at timestep t**.- Why? Because our policy should only **reinforce actions on the basis of the consequences**: so rewards obtained before taking an action are useless (since they were not because of the action), **only the ones that come after the action matters**.- Before coding this you should read this section [don't let the past distract you](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.htmldon-t-let-the-past-distract-you) that explains why we use reward-to-go policy gradient.We use an interesting technique coded by [Chris1nexus](https://github.com/Chris1nexus) to **compute the return at each timestep efficiently**. The comments explained the procedure. Don't hesitate also [to check the PR explanation](https://github.com/huggingface/deep-rl-class/pull/95)But overall the idea is to **compute the return at each timestep efficiently**. The second question you may ask is **why do we minimize the loss**? You talked about Gradient Ascent not Gradient Descent?- We want to maximize our utility function $J(\theta)$ but in PyTorch like in Tensorflow it's better to **minimize an objective function.** - So let's say we want to reinforce action 3 at a certain timestep. Before training this action P is 0.25. - So we want to modify $\theta$ such that $\pi_\theta(a_3|s; \theta) > 0.25$ - Because all P must sum to 1, max $\pi_\theta(a_3|s; \theta)$ will **minimize other action probability.** - So we should tell PyTorch **to min $1 - \pi_\theta(a_3|s; \theta)$.** - This loss function approaches 0 as $\pi_\theta(a_3|s; \theta)$ nears 1. - So we are encouraging the gradient to max $\pi_\theta(a_3|s; \theta)$<jupyter_code>def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes+1): saved_log_probs = [] rewards = [] state = # TODO: reset the environment # Line 4 of pseudocode for t in range(max_t): action, log_prob = # TODO get the action saved_log_probs.append(log_prob) state, reward, done, _ = # TODO: take an env step rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as the sum of the gamma-discounted return at time t (G_t) + the reward at time t # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = (returns[0] if len(returns)>0 else 0) returns.appendleft( ) # TODO: complete here ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) return scores<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes+1): saved_log_probs = [] rewards = [] state = env.reset() # Line 4 of pseudocode for t in range(max_t): action, log_prob = policy.act(state) saved_log_probs.append(log_prob) state, reward, done, _ = env.step(action) rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as # the sum of the gamma-discounted return at time t (G_t) + the reward at time t # # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = (returns[0] if len(returns)>0 else 0) returns.appendleft( gamma*disc_return_t + rewards[t] ) ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) return scores<jupyter_output><empty_output><jupyter_text>Train it- We're now ready to train our agent.- But first, we define a variable containing all the training hyperparameters.- You can change the training parameters (and should 😉)<jupyter_code>cartpole_hyperparameters = { "h_size": 16, "n_training_episodes": 1000, "n_evaluation_episodes": 10, "max_t": 1000, "gamma": 1.0, "lr": 1e-2, "env_id": env_id, "state_space": s_size, "action_space": a_size, } # Create policy and place it to the device cartpole_policy = Policy(cartpole_hyperparameters["state_space"], cartpole_hyperparameters["action_space"], cartpole_hyperparameters["h_size"]).to(device) cartpole_optimizer = optim.Adam(cartpole_policy.parameters(), lr=cartpole_hyperparameters["lr"]) scores = reinforce(cartpole_policy, cartpole_optimizer, cartpole_hyperparameters["n_training_episodes"], cartpole_hyperparameters["max_t"], cartpole_hyperparameters["gamma"], 100)<jupyter_output><empty_output><jupyter_text>Define evaluation method 📝- Here we define the evaluation method that we're going to use to test our Reinforce agent.<jupyter_code>def evaluate_agent(env, max_steps, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The Reinforce agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 for step in range(max_steps): action, _ = policy.act(state) new_state, reward, done, info = env.step(action) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward<jupyter_output><empty_output><jupyter_text>Evaluate our agent 📈<jupyter_code>evaluate_agent(eval_env, cartpole_hyperparameters["max_t"], cartpole_hyperparameters["n_evaluation_episodes"], cartpole_policy)<jupyter_output><empty_output><jupyter_text>Publish our trained model on the Hub 🔥Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code.Here's an example of a Model Card: Push to the Hub Do not modify this code<jupyter_code>from huggingface_hub import HfApi, snapshot_download from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import json import imageio import tempfile import os def record_video(env, policy, out_directory, fps=30): """ Generate a replay video of the agent :param env :param Qtable: Qtable of our agent :param out_directory :param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1) """ images = [] done = False state = env.reset() img = env.render(mode='rgb_array') images.append(img) while not done: # Take the action (index) that have the maximum expected future reward given that state action, _ = policy.act(state) state, reward, done, info = env.step(action) # We directly put next_state = state for recording logic img = env.render(mode='rgb_array') images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def push_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30 ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the Hub :param repo_id: repo_id: id of the model repository from the Hugging Face Hub :param model: the pytorch model we want to save :param hyperparameters: training hyperparameters :param eval_env: evaluation environment :param video_fps: how many frame per seconds to record our video replay """ _, repo_name = repo_id.split("/") api = HfApi() # Step 1: Create the repo repo_url = api.create_repo( repo_id=repo_id, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: local_directory = Path(tmpdirname) # Step 2: Save the model torch.save(model, local_directory / "model.pt") # Step 3: Save the hyperparameters to JSON with open(local_directory / "hyperparameters.json", "w") as outfile: json.dump(hyperparameters, outfile) # Step 4: Evaluate the model and build JSON mean_reward, std_reward = evaluate_agent(eval_env, hyperparameters["max_t"], hyperparameters["n_evaluation_episodes"], model) # Get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters["env_id"], "mean_reward": mean_reward, "n_evaluation_episodes": hyperparameters["n_evaluation_episodes"], "eval_datetime": eval_form_datetime, } # Write a JSON file with open(local_directory / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 5: Create the model card env_name = hyperparameters["env_id"] metadata = {} metadata["tags"] = [ env_name, "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class" ] # Add metrics eval = metadata_eval_result( model_pretty_name=repo_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_name, dataset_id=env_name, ) # Merges both dictionaries metadata = {**metadata, **eval} model_card = f""" # **Reinforce** Agent playing **{env_id}** This is a trained model of a **Reinforce** agent playing **{env_id}** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction """ readme_path = local_directory / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) # Step 6: Record a video video_path = local_directory / "replay.mp4" record_video(env, model, video_path, video_fps) # Step 7. Push everything to the Hub api.upload_folder( repo_id=repo_id, folder_path=local_directory, path_in_repo=".", ) print(f"Your model is pushed to the Hub. You can view your model here: {repo_url}")<jupyter_output><empty_output><jupyter_text>.By using `push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the Hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share with the community an agent that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**<jupyter_code>notebook_login()<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function<jupyter_code>repo_id = "" #TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub(repo_id, cartpole_policy, # The model we want to save cartpole_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 )<jupyter_output><empty_output><jupyter_text>Now that we try the robustness of our implementation, let's try a more complex environment: PixelCopter 🚁 Second agent: PixelCopter 🚁 Study the PixelCopter environment 👀- [The Environment documentation](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html)<jupyter_code>env_id = "Pixelcopter-PLE-v0" env = gym.make(env_id) eval_env = gym.make(env_id) s_size = env.observation_space.shape[0] a_size = env.action_space.n print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The observation space (7) 👀:- player y position- player velocity- player distance to floor- player distance to ceiling- next block x distance to player- next blocks top y location- next blocks bottom y locationThe action space(2) 🎮:- Up (press accelerator) - Do nothing (don't press accelerator) The reward function 💰: - For each vertical block it passes through it gains a positive reward of +1. Each time a terminal state reached it receives a negative reward of -1. Define the new Policy 🧠- We need to have a deeper neural network since the environment is more complex<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Define the three layers here def forward(self, x): # Define the forward process here return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, h_size*2) self.fc3 = nn.Linear(h_size*2, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Define the hyperparameters ⚙️- Because this environment is more complex.- Especially for the hidden size, we need more neurons.<jupyter_code>pixelcopter_hyperparameters = { "h_size": 64, "n_training_episodes": 50000, "n_evaluation_episodes": 10, "max_t": 10000, "gamma": 0.99, "lr": 1e-4, "env_id": env_id, "state_space": s_size, "action_space": a_size, }<jupyter_output><empty_output><jupyter_text>Train it- We're now ready to train our agent 🔥.<jupyter_code># Create policy and place it to the device # torch.manual_seed(50) pixelcopter_policy = Policy(pixelcopter_hyperparameters["state_space"], pixelcopter_hyperparameters["action_space"], pixelcopter_hyperparameters["h_size"]).to(device) pixelcopter_optimizer = optim.Adam(pixelcopter_policy.parameters(), lr=pixelcopter_hyperparameters["lr"]) scores = reinforce(pixelcopter_policy, pixelcopter_optimizer, pixelcopter_hyperparameters["n_training_episodes"], pixelcopter_hyperparameters["max_t"], pixelcopter_hyperparameters["gamma"], 1000)<jupyter_output><empty_output><jupyter_text>Publish our trained model on the Hub 🔥<jupyter_code>repo_id = "" #TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub(repo_id, pixelcopter_policy, # The model we want to save pixelcopter_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 )<jupyter_output><empty_output>
deep-rl-class/notebooks/unit4/unit4.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit4/unit4.ipynb", "repo_id": "deep-rl-class", "token_count": 12740 }
96
# The Exploration/Exploitation trade-off [[exp-exp-tradeoff]] Finally, before looking at the different methods to solve Reinforcement Learning problems, we must cover one more very important topic: *the exploration/exploitation trade-off.* - *Exploration* is exploring the environment by trying random actions in order to **find more information about the environment.** - *Exploitation* is **exploiting known information to maximize the reward.** Remember, the goal of our RL agent is to maximize the expected cumulative reward. However, **we can fall into a common trap**. Let’s take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_1.jpg" alt="Exploration" width="100%"> In this game, our mouse can have an **infinite amount of small cheese** (+1 each). But at the top of the maze, there is a gigantic sum of cheese (+1000). However, if we only focus on exploitation, our agent will never reach the gigantic sum of cheese. Instead, it will only exploit **the nearest source of rewards,** even if this source is small (exploitation). But if our agent does a little bit of exploration, it can **discover the big reward** (the pile of big cheese). This is what we call the exploration/exploitation trade-off. We need to balance how much we **explore the environment** and how much we **exploit what we know about the environment.** Therefore, we must **define a rule that helps to handle this trade-off**. We’ll see the different ways to handle it in the future units. If it’s still confusing, **think of a real problem: the choice of picking a restaurant:** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_2.jpg" alt="Exploration"> <figcaption>Source: <a href="https://inst.eecs.berkeley.edu/~cs188/sp20/assets/lecture/lec15_6up.pdf"> Berkley AI Course</a> </figcaption> </figure> - *Exploitation*: You go to the same one that you know is good every day and **take the risk to miss another better restaurant.** - *Exploration*: Try restaurants you never went to before, with the risk of having a bad experience **but the probable opportunity of a fantastic experience.** To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%">
deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx", "repo_id": "deep-rl-class", "token_count": 699 }
97
# Monte Carlo vs Temporal Difference Learning [[mc-vs-td]] The last thing we need to discuss before diving into Q-Learning is the two learning strategies. Remember that an RL agent **learns by interacting with its environment.** The idea is that **given the experience and the received reward, the agent will update its value function or policy.** Monte Carlo and Temporal Difference Learning are two different **strategies on how to train our value function or our policy function.** Both of them **use experience to solve the RL problem.** On one hand, Monte Carlo uses **an entire episode of experience before learning.** On the other hand, Temporal Difference uses **only a step ( \\(S_t, A_t, R_{t+1}, S_{t+1}\\) ) to learn.** We'll explain both of them **using a value-based method example.** ## Monte Carlo: learning at the end of the episode [[monte-carlo]] Monte Carlo waits until the end of the episode, calculates \\(G_t\\) (return) and uses it as **a target for updating \\(V(S_t)\\).** So it requires a **complete episode of interaction before updating our value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/monte-carlo-approach.jpg" alt="Monte Carlo"/> If we take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-2.jpg" alt="Monte Carlo"/> - We always start the episode **at the same starting point.** - **The agent takes actions using the policy**. For instance, using an Epsilon Greedy Strategy, a policy that alternates between exploration (random actions) and exploitation. - We get **the reward and the next state.** - We terminate the episode if the cat eats the mouse or if the mouse moves > 10 steps. - At the end of the episode, **we have a list of State, Actions, Rewards, and Next States tuples** For instance [[State tile 3 bottom, Go Left, +1, State tile 2 bottom], [State tile 2 bottom, Go Left, +0, State tile 1 bottom]...] - **The agent will sum the total rewards \\(G_t\\)** (to see how well it did). - It will then **update \\(V(s_t)\\) based on the formula** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3.jpg" alt="Monte Carlo"/> - Then **start a new game with this new knowledge** By running more and more episodes, **the agent will learn to play better and better.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3p.jpg" alt="Monte Carlo"/> For instance, if we train a state-value function using Monte Carlo: - We initialize our value function **so that it returns 0 value for each state** - Our learning rate (lr) is 0.1 and our discount rate is 1 (= no discount) - Our mouse **explores the environment and takes random actions** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4.jpg" alt="Monte Carlo"/> - The mouse made more than 10 steps, so the episode ends . <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4p.jpg" alt="Monte Carlo"/> - We have a list of state, action, rewards, next_state, **we need to calculate the return \\(G{t=0}\\)** \\(G_t = R_{t+1} + R_{t+2} + R_{t+3} ...\\) (for simplicity, we don't discount the rewards) \\(G_0 = R_{1} + R_{2} + R_{3}…\\) \\(G_0 = 1 + 0 + 0 + 0 + 0 + 0 + 1 + 1 + 0 + 0\\) \\(G_0 = 3\\) - We can now compute the **new** \\(V(S_0)\\): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5.jpg" alt="Monte Carlo"/> \\(V(S_0) = V(S_0) + lr * [G_0 — V(S_0)]\\) \\(V(S_0) = 0 + 0.1 * [3 – 0]\\) \\(V(S_0) = 0.3\\) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5p.jpg" alt="Monte Carlo"/> ## Temporal Difference Learning: learning at each step [[td-learning]] **Temporal Difference, on the other hand, waits for only one interaction (one step) \\(S_{t+1}\\)** to form a TD target and update \\(V(S_t)\\) using \\(R_{t+1}\\) and \\( \gamma * V(S_{t+1})\\). The idea with **TD is to update the \\(V(S_t)\\) at each step.** But because we didn't experience an entire episode, we don't have \\(G_t\\) (expected return). Instead, **we estimate \\(G_t\\) by adding \\(R_{t+1}\\) and the discounted value of the next state.** This is called bootstrapping. It's called this **because TD bases its update in part on an existing estimate \\(V(S_{t+1})\\) and not a complete sample \\(G_t\\).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1.jpg" alt="Temporal Difference"/> This method is called TD(0) or **one-step TD (update the value function after any individual step).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1p.jpg" alt="Temporal Difference"/> If we take the same example, <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2.jpg" alt="Temporal Difference"/> - We initialize our value function so that it returns 0 value for each state. - Our learning rate (lr) is 0.1, and our discount rate is 1 (no discount). - Our mouse begins to explore the environment and takes a random action: **going to the left** - It gets a reward \\(R_{t+1} = 1\\) since **it eats a piece of cheese** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2p.jpg" alt="Temporal Difference"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3.jpg" alt="Temporal Difference"/> We can now update \\(V(S_0)\\): New \\(V(S_0) = V(S_0) + lr * [R_1 + \gamma * V(S_1) - V(S_0)]\\) New \\(V(S_0) = 0 + 0.1 * [1 + 1 * 0–0]\\) New \\(V(S_0) = 0.1\\) So we just updated our value function for State 0. Now we **continue to interact with this environment with our updated value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3p.jpg" alt="Temporal Difference"/> To summarize: - With *Monte Carlo*, we update the value function from a complete episode, and so we **use the actual accurate discounted return of this episode.** - With *TD Learning*, we update the value function from a step, and we replace \\(G_t\\), which we don't know, with **an estimated return called the TD target.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Summary.jpg" alt="Summary"/>
deep-rl-class/units/en/unit2/mc-vs-td.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/mc-vs-td.mdx", "repo_id": "deep-rl-class", "token_count": 2316 }
98
# Deep Q-Learning [[deep-q-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="Unit 3 thumbnail" width="100%"> In the last unit, we learned our first reinforcement learning algorithm: Q-Learning, **implemented it from scratch**, and trained it in two environments, FrozenLake-v1 ☃️ and Taxi-v3 🚕. We got excellent results with this simple algorithm, but these environments were relatively simple because the **state space was discrete and small** (16 different states for FrozenLake-v1 and 500 for Taxi-v3). For comparison, the state space in Atari games can **contain \\(10^{9}\\) to \\(10^{11}\\) states**. But as we'll see, producing and updating a **Q-table can become ineffective in large state space environments.** So in this unit, **we'll study our first Deep Reinforcement Learning agent**: Deep Q-Learning. Instead of using a Q-table, Deep Q-Learning uses a Neural Network that takes a state and approximates Q-values for each action based on that state. And **we'll train it to play Space Invaders and other Atari environments using [RL-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo)**, a training framework for RL using Stable-Baselines that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results, and recording videos. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> So let’s get started! 🚀
deep-rl-class/units/en/unit3/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit3/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 437 }
99
# How do Unity ML-Agents work? [[how-mlagents-works]] Before training our agent, we need to understand **what ML-Agents is and how it works**. ## What is Unity ML-Agents? [[what-is-mlagents]] [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents) is a toolkit for the game engine Unity that **allows us to create environments using Unity or use pre-made environments to train our agents**. It’s developed by [Unity Technologies](https://unity.com/), the developers of Unity, one of the most famous Game Engines used by the creators of Firewatch, Cuphead, and Cities: Skylines. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/firewatch.jpeg" alt="Firewatch"/> <figcaption>Firewatch was made with Unity</figcaption> </figure> ## The six components [[six-components]] With Unity ML-Agents, you have six essential components: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/mlagents-1.png" alt="MLAgents"/> <figcaption>Source: <a href="https://unity-technologies.github.io/ml-agents/">Unity ML-Agents Documentation</a> </figcaption> </figure> - The first is the *Learning Environment*, which contains **the Unity scene (the environment) and the environment elements** (game characters). - The second is the *Python Low-level API*, which contains **the low-level Python interface for interacting and manipulating the environment**. It’s the API we use to launch the training. - Then, we have the *External Communicator* that **connects the Learning Environment (made with C#) with the low level Python API (Python)**. - The *Python trainers*: the **Reinforcement algorithms made with PyTorch (PPO, SAC…)**. - The *Gym wrapper*: to encapsulate the RL environment in a gym wrapper. - The *PettingZoo wrapper*: PettingZoo is the multi-agents version of the gym wrapper. ## Inside the Learning Component [[inside-learning-component]] Inside the Learning Component, we have **two important elements**: - The first is the *agent component*, the actor of the scene. We’ll **train the agent by optimizing its policy** (which will tell us what action to take in each state). The policy is called the *Brain*. - Finally, there is the *Academy*. This component **orchestrates agents and their decision-making processes**. Think of this Academy as a teacher who handles Python API requests. To better understand its role, let’s remember the RL process. This can be modeled as a loop that works like this: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%"> <figcaption>The RL Process: a loop of state, action, reward and next state</figcaption> <figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption> </figure> Now, let’s imagine an agent learning to play a platform game. The RL process looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> - Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right. - The environment goes to a **new** **state \\(S_1\\)** — new frame. - The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*. This RL loop outputs a sequence of **state, action, reward and next state.** The goal of the agent is to **maximize the expected cumulative reward**. The Academy will be the one that will **send the order to our Agents and ensure that agents are in sync**: - Collect Observations - Select your action using your policy - Take the Action - Reset if you reached the max step or if you’re done. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/academy.png" alt="The MLAgents Academy" width="100%"> Now that we understand how ML-Agents works, **we’re ready to train our agents.**
deep-rl-class/units/en/unit5/how-mlagents-works.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/how-mlagents-works.mdx", "repo_id": "deep-rl-class", "token_count": 1276 }
100
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.png" alt="Thumbnail"/> Since the beginning of this course, we learned to train agents in a *single-agent system* where our agent was alone in its environment: it was **not cooperating or collaborating with other agents**. This worked great, and the single-agent system is useful for many applications. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/patchwork.jpg" alt="Patchwork"/> <figcaption> A patchwork of all the environments you’ve trained your agents on since the beginning of the course </figcaption> </figure> But, as humans, **we live in a multi-agent world**. Our intelligence comes from interaction with other agents. And so, our **goal is to create agents that can interact with other humans and other agents**. Consequently, we must study how to train deep reinforcement learning agents in a *multi-agents system* to build robust agents that can adapt, collaborate, or compete. So today we’re going to **learn the basics of the fascinating topic of multi-agents reinforcement learning (MARL)**. And the most exciting part is that, during this unit, you’re going to train your first agents in a multi-agents system: **a 2vs2 soccer team that needs to beat the opponent team**. And you’re going to participate in **AI vs. AI challenge** where your trained agent will compete against other classmates’ agents every day and be ranked on a [new leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> So let’s get started!
deep-rl-class/units/en/unit7/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 574 }
101
# Introduction [[introduction]] In this bonus unit, we'll reinforce what we learned in the first unit by teaching Huggy the Dog to fetch the stick and then [play with him directly in your browser](https://huggingface.co/spaces/ThomasSimonini/Huggy) 🐶 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit2/thumbnail.png" alt="Unit bonus 1 thumbnail" width="100%"> So let's get started 🚀
deep-rl-class/units/en/unitbonus1/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus1/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 138 }
102
# Brief introduction to RL documentation In this advanced topic, we address the question: **how should we monitor and keep track of powerful reinforcement learning agents that we are training in the real world and interfacing with humans?** As machine learning systems have increasingly impacted modern life, the **call for the documentation of these systems has grown**. Such documentation can cover aspects such as the training data used — where it is stored, when it was collected, who was involved, etc. — or the model optimization framework — the architecture, evaluation metrics, relevant papers, etc. — and more. Today, model cards and datasheets are becoming increasingly available. For example, on the Hub (see documentation [here](https://huggingface.co/docs/hub/model-cards)). If you click on a [popular model on the Hub](https://huggingface.co/models), you can learn about its creation process. These model and data specific logs are designed to be completed when the model or dataset are created, leaving them to go un-updated when these models are built into evolving systems in the future. ​ ## Motivating Reward Reports Reinforcement learning systems are fundamentally designed to optimize based on measurements of reward and time. While the notion of a reward function can be mapped nicely to many well-understood fields of supervised learning (via a loss function), understanding of how machine learning systems evolve over time is limited. To that end, the authors introduce [*Reward Reports for Reinforcement Learning*](https://www.notion.so/Brief-introduction-to-RL-documentation-b8cbda5a6f5242338e0756e6bef72af4) (the pithy naming is designed to mirror the popular papers *Model Cards for Model Reporting* and *Datasheets for Datasets*). The goal is to propose a type of documentation focused on the **human factors of reward** and **time-varying feedback systems**. Building on the documentation frameworks for [model cards](https://arxiv.org/abs/1810.03993) and [datasheets](https://arxiv.org/abs/1803.09010) proposed by Mitchell et al. and Gebru et al., we argue the need for Reward Reports for AI systems. **Reward Reports** are living documents for proposed RL deployments that demarcate design choices. However, many questions remain about the applicability of this framework to different RL applications, roadblocks to system interpretability, and the resonances between deployed supervised machine learning systems and the sequential decision-making utilized in RL. At a minimum, Reward Reports are an opportunity for RL practitioners to deliberate on these questions and begin the work of deciding how to resolve them in practice. ​ ## Capturing temporal behavior with documentation The core piece specific to documentation designed for RL and feedback-driven ML systems is a *change-log*. The change-log updates information from the designer (changed training parameters, data, etc.) along with noticed changes from the user (harmful behavior, unexpected responses, etc.). The change log is accompanied by update triggers that encourage monitoring these effects. ## Contributing Some of the most impactful RL-driven systems are multi-stakeholder in nature and behind the closed doors of private corporations. These corporations are largely without regulation, so the burden of documentation falls on the public. If you are interested in contributing, we are building Reward Reports for popular machine learning systems on a public record on [GitHub](https://github.com/RewardReports/reward-reports). ​ For further reading, you can visit the Reward Reports [paper](https://arxiv.org/abs/2204.10817) or look [an example report](https://github.com/RewardReports/reward-reports/tree/main/examples). ## Author This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
deep-rl-class/units/en/unitbonus3/rl-documentation.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/rl-documentation.mdx", "repo_id": "deep-rl-class", "token_count": 886 }
103
import argparse import sys sys.path.append(".") from base_classes import ControlNetBenchmark, ControlNetSDXLBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="lllyasviel/sd-controlnet-canny", choices=["lllyasviel/sd-controlnet-canny", "diffusers/controlnet-canny-sdxl-1.0"], ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=50) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = ( ControlNetBenchmark(args) if args.ckpt == "lllyasviel/sd-controlnet-canny" else ControlNetSDXLBenchmark(args) ) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_controlnet.py/0
{ "file_path": "diffusers/benchmarks/benchmark_controlnet.py", "repo_id": "diffusers", "token_count": 352 }
104
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Single files The [`~loaders.FromSingleFileMixin.from_single_file`] method allows you to load: * a model stored in a single file, which is useful if you're working with models from the diffusion ecosystem, like Automatic1111, and commonly rely on a single-file layout to store and share models * a model stored in their originally distributed layout, which is useful if you're working with models finetuned with other services, and want to load it directly into Diffusers model objects and pipelines > [!TIP] > Read the [Model files and layouts](../../using-diffusers/other-formats) guide to learn more about the Diffusers-multifolder layout versus the single-file layout, and how to load models stored in these different layouts. ## Supported pipelines - [`CogVideoXPipeline`] - [`StableDiffusionPipeline`] - [`StableDiffusionImg2ImgPipeline`] - [`StableDiffusionInpaintPipeline`] - [`StableDiffusionControlNetPipeline`] - [`StableDiffusionControlNetImg2ImgPipeline`] - [`StableDiffusionControlNetInpaintPipeline`] - [`StableDiffusionUpscalePipeline`] - [`StableDiffusionXLPipeline`] - [`StableDiffusionXLImg2ImgPipeline`] - [`StableDiffusionXLInpaintPipeline`] - [`StableDiffusionXLInstructPix2PixPipeline`] - [`StableDiffusionXLControlNetPipeline`] - [`StableDiffusionXLKDiffusionPipeline`] - [`StableDiffusion3Pipeline`] - [`LatentConsistencyModelPipeline`] - [`LatentConsistencyModelImg2ImgPipeline`] - [`StableDiffusionControlNetXSPipeline`] - [`StableDiffusionXLControlNetXSPipeline`] - [`LEditsPPPipelineStableDiffusion`] - [`LEditsPPPipelineStableDiffusionXL`] - [`PIAPipeline`] ## Supported models - [`UNet2DConditionModel`] - [`StableCascadeUNet`] - [`AutoencoderKL`] - [`AutoencoderKLCogVideoX`] - [`ControlNetModel`] - [`SD3Transformer2DModel`] - [`FluxTransformer2DModel`] ## FromSingleFileMixin [[autodoc]] loaders.single_file.FromSingleFileMixin ## FromOriginalModelMixin [[autodoc]] loaders.single_file_model.FromOriginalModelMixin
diffusers/docs/source/en/api/loaders/single_file.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/single_file.md", "repo_id": "diffusers", "token_count": 831 }
105
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Latent upscaler The Stable Diffusion latent upscaler model was created by [Katherine Crowson](https://github.com/crowsonkb/k-diffusion) in collaboration with [Stability AI](https://stability.ai/). It is used to enhance the output image resolution by a factor of 2 (see this demo [notebook](https://colab.research.google.com/drive/1o1qYJcFeywzCIdkfKJy7cTpgZTCM2EI4) for a demonstration of the original implementation). <Tip> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! </Tip> ## StableDiffusionLatentUpscalePipeline [[autodoc]] StableDiffusionLatentUpscalePipeline - all - __call__ - enable_sequential_cpu_offload - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/stable_diffusion/latent_upscale.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/latent_upscale.md", "repo_id": "diffusers", "token_count": 543 }
106
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Habana Gaudi 🤗 Diffusers is compatible with Habana Gaudi through 🤗 [Optimum](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion). Follow the [installation](https://docs.habana.ai/en/latest/Installation_Guide/index.html) guide to install the SynapseAI and Gaudi drivers, and then install Optimum Habana: ```bash python -m pip install --upgrade-strategy eager optimum[habana] ``` To generate images with Stable Diffusion 1 and 2 on Gaudi, you need to instantiate two instances: - [`~optimum.habana.diffusers.GaudiStableDiffusionPipeline`], a pipeline for text-to-image generation. - [`~optimum.habana.diffusers.GaudiDDIMScheduler`], a Gaudi-optimized scheduler. When you initialize the pipeline, you have to specify `use_habana=True` to deploy it on HPUs and to get the fastest possible generation, you should enable **HPU graphs** with `use_hpu_graphs=True`. Finally, specify a [`~optimum.habana.GaudiConfig`] which can be downloaded from the [Habana](https://huggingface.co/Habana) organization on the Hub. ```python from optimum.habana import GaudiConfig from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline model_name = "stabilityai/stable-diffusion-2-base" scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler") pipeline = GaudiStableDiffusionPipeline.from_pretrained( model_name, scheduler=scheduler, use_habana=True, use_hpu_graphs=True, gaudi_config="Habana/stable-diffusion-2", ) ``` Now you can call the pipeline to generate images by batches from one or several prompts: ```python outputs = pipeline( prompt=[ "High quality photo of an astronaut riding a horse in space", "Face of a yellow cat, high resolution, sitting on a park bench", ], num_images_per_prompt=10, batch_size=4, ) ``` For more information, check out 🤗 Optimum Habana's [documentation](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion) and the [example](https://github.com/huggingface/optimum-habana/tree/main/examples/stable-diffusion) provided in the official GitHub repository. ## Benchmark We benchmarked Habana's first-generation Gaudi and Gaudi2 with the [Habana/stable-diffusion](https://huggingface.co/Habana/stable-diffusion) and [Habana/stable-diffusion-2](https://huggingface.co/Habana/stable-diffusion-2) Gaudi configurations (mixed precision bf16/fp32) to demonstrate their performance. For [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) on 512x512 images: | | Latency (batch size = 1) | Throughput | | ---------------------- |:------------------------:|:---------------------------:| | first-generation Gaudi | 3.80s | 0.308 images/s (batch size = 8) | | Gaudi2 | 1.33s | 1.081 images/s (batch size = 8) | For [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) on 768x768 images: | | Latency (batch size = 1) | Throughput | | ---------------------- |:------------------------:|:-------------------------------:| | first-generation Gaudi | 10.2s | 0.108 images/s (batch size = 4) | | Gaudi2 | 3.17s | 0.379 images/s (batch size = 8) |
diffusers/docs/source/en/optimization/habana.md/0
{ "file_path": "diffusers/docs/source/en/optimization/habana.md", "repo_id": "diffusers", "token_count": 1399 }
107
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Distributed inference with multiple GPUs On distributed setups, you can run inference across multiple GPUs with 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) or [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html), which is useful for generating with multiple prompts in parallel. This guide will show you how to use 🤗 Accelerate and PyTorch Distributed for distributed inference. ## 🤗 Accelerate 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) is a library designed to make it easy to train or run inference across distributed setups. It simplifies the process of setting up the distributed environment, allowing you to focus on your PyTorch code. To begin, create a Python file and initialize an [`accelerate.PartialState`] to create a distributed environment; your setup is automatically detected so you don't need to explicitly define the `rank` or `world_size`. Move the [`DiffusionPipeline`] to `distributed_state.device` to assign a GPU to each process. Now use the [`~accelerate.PartialState.split_between_processes`] utility as a context manager to automatically distribute the prompts between the number of processes. ```py import torch from accelerate import PartialState from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) distributed_state = PartialState() pipeline.to(distributed_state.device) with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result = pipeline(prompt).images[0] result.save(f"result_{distributed_state.process_index}.png") ``` Use the `--num_processes` argument to specify the number of GPUs to use, and call `accelerate launch` to run the script: ```bash accelerate launch run_distributed.py --num_processes=2 ``` <Tip> Refer to this minimal example [script](https://gist.github.com/sayakpaul/cfaebd221820d7b43fae638b4dfa01ba) for running inference across multiple GPUs. To learn more, take a look at the [Distributed Inference with 🤗 Accelerate](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) guide. </Tip> ## PyTorch Distributed PyTorch supports [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) which enables data parallelism. To start, create a Python file and import `torch.distributed` and `torch.multiprocessing` to set up the distributed process group and to spawn the processes for inference on each GPU. You should also initialize a [`DiffusionPipeline`]: ```py import torch import torch.distributed as dist import torch.multiprocessing as mp from diffusers import DiffusionPipeline sd = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) ``` You'll want to create a function to run inference; [`init_process_group`](https://pytorch.org/docs/stable/distributed.html?highlight=init_process_group#torch.distributed.init_process_group) handles creating a distributed environment with the type of backend to use, the `rank` of the current process, and the `world_size` or the number of processes participating. If you're running inference in parallel over 2 GPUs, then the `world_size` is 2. Move the [`DiffusionPipeline`] to `rank` and use `get_rank` to assign a GPU to each process, where each process handles a different prompt: ```py def run_inference(rank, world_size): dist.init_process_group("nccl", rank=rank, world_size=world_size) sd.to(rank) if torch.distributed.get_rank() == 0: prompt = "a dog" elif torch.distributed.get_rank() == 1: prompt = "a cat" image = sd(prompt).images[0] image.save(f"./{'_'.join(prompt)}.png") ``` To run the distributed inference, call [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn) to run the `run_inference` function on the number of GPUs defined in `world_size`: ```py def main(): world_size = 2 mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True) if __name__ == "__main__": main() ``` Once you've completed the inference script, use the `--nproc_per_node` argument to specify the number of GPUs to use and call `torchrun` to run the script: ```bash torchrun run_distributed.py --nproc_per_node=2 ``` > [!TIP] > You can use `device_map` within a [`DiffusionPipeline`] to distribute its model-level components on multiple devices. Refer to the [Device placement](../tutorials/inference_with_big_models#device-placement) guide to learn more.
diffusers/docs/source/en/training/distributed_inference.md/0
{ "file_path": "diffusers/docs/source/en/training/distributed_inference.md", "repo_id": "diffusers", "token_count": 1621 }
108
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Working with big models A modern diffusion model, like [Stable Diffusion XL (SDXL)](../using-diffusers/sdxl), is not just a single model, but a collection of multiple models. SDXL has four different model-level components: * A variational autoencoder (VAE) * Two text encoders * A UNet for denoising Usually, the text encoders and the denoiser are much larger compared to the VAE. As models get bigger and better, it’s possible your model is so big that even a single copy won’t fit in memory. But that doesn’t mean it can’t be loaded. If you have more than one GPU, there is more memory available to store your model. In this case, it’s better to split your model checkpoint into several smaller *checkpoint shards*. When a text encoder checkpoint has multiple shards, like [T5-xxl for SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers/tree/main/text_encoder_3), it is automatically handled by the [Transformers](https://huggingface.co/docs/transformers/index) library as it is a required dependency of Diffusers when using the [`StableDiffusion3Pipeline`]. More specifically, Transformers will automatically handle the loading of multiple shards within the requested model class and get it ready so that inference can be performed. The denoiser checkpoint can also have multiple shards and supports inference thanks to the [Accelerate](https://huggingface.co/docs/accelerate/index) library. > [!TIP] > Refer to the [Handling big models for inference](https://huggingface.co/docs/accelerate/main/en/concept_guides/big_model_inference) guide for general guidance when working with big models that are hard to fit into memory. For example, let's save a sharded checkpoint for the [SDXL UNet](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/unet): ```python from diffusers import UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet" ) unet.save_pretrained("sdxl-unet-sharded", max_shard_size="5GB") ``` The size of the fp32 variant of the SDXL UNet checkpoint is ~10.4GB. Set the `max_shard_size` parameter to 5GB to create 3 shards. After saving, you can load them in [`StableDiffusionXLPipeline`]: ```python from diffusers import UNet2DConditionModel, StableDiffusionXLPipeline import torch unet = UNet2DConditionModel.from_pretrained( "sayakpaul/sdxl-unet-sharded", torch_dtype=torch.float16 ) pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16 ).to("cuda") image = pipeline("a cute dog running on the grass", num_inference_steps=30).images[0] image.save("dog.png") ``` If placing all the model-level components on the GPU at once is not feasible, use [`~DiffusionPipeline.enable_model_cpu_offload`] to help you: ```diff - pipeline.to("cuda") + pipeline.enable_model_cpu_offload() ``` In general, we recommend sharding when a checkpoint is more than 5GB (in fp32). ## Device placement On distributed setups, you can run inference across multiple GPUs with Accelerate. > [!WARNING] > This feature is experimental and its APIs might change in the future. With Accelerate, you can use the `device_map` to determine how to distribute the models of a pipeline across multiple devices. This is useful in situations where you have more than one GPU. For example, if you have two 8GB GPUs, then using [`~DiffusionPipeline.enable_model_cpu_offload`] may not work so well because: * it only works on a single GPU * a single model might not fit on a single GPU ([`~DiffusionPipeline.enable_sequential_cpu_offload`] might work but it will be extremely slow and it is also limited to a single GPU) To make use of both GPUs, you can use the "balanced" device placement strategy which splits the models across all available GPUs. > [!WARNING] > Only the "balanced" strategy is supported at the moment, and we plan to support additional mapping strategies in the future. ```diff from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, + "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, device_map="balanced" ) image = pipeline("a dog").images[0] image ``` You can also pass a dictionary to enforce the maximum GPU memory that can be used on each device: ```diff from diffusers import DiffusionPipeline import torch max_memory = {0:"1GB", 1:"1GB"} pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, device_map="balanced", + max_memory=max_memory ) image = pipeline("a dog").images[0] image ``` If a device is not present in `max_memory`, then it will be completely ignored and will not participate in the device placement. By default, Diffusers uses the maximum memory of all devices. If the models don't fit on the GPUs, they are offloaded to the CPU. If the CPU doesn't have enough memory, then you might see an error. In that case, you could defer to using [`~DiffusionPipeline.enable_sequential_cpu_offload`] and [`~DiffusionPipeline.enable_model_cpu_offload`]. Call [`~DiffusionPipeline.reset_device_map`] to reset the `device_map` of a pipeline. This is also necessary if you want to use methods like `to()`, [`~DiffusionPipeline.enable_sequential_cpu_offload`], and [`~DiffusionPipeline.enable_model_cpu_offload`] on a pipeline that was device-mapped. ```py pipeline.reset_device_map() ``` Once a pipeline has been device-mapped, you can also access its device map via `hf_device_map`: ```py print(pipeline.hf_device_map) ``` An example device map would look like so: ```bash {'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0} ```
diffusers/docs/source/en/tutorials/inference_with_big_models.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/inference_with_big_models.md", "repo_id": "diffusers", "token_count": 1972 }
109
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky [[open-in-colab]] The Kandinsky models are a series of multilingual text-to-image generation models. The Kandinsky 2.0 model uses two multilingual text encoders and concatenates those results for the UNet. [Kandinsky 2.1](../api/pipelines/kandinsky) changes the architecture to include an image prior model ([`CLIP`](https://huggingface.co/docs/transformers/model_doc/clip)) to generate a mapping between text and image embeddings. The mapping provides better text-image alignment and it is used with the text embeddings during training, leading to higher quality results. Finally, Kandinsky 2.1 uses a [Modulating Quantized Vectors (MoVQ)](https://huggingface.co/papers/2209.09002) decoder - which adds a spatial conditional normalization layer to increase photorealism - to decode the latents into images. [Kandinsky 2.2](../api/pipelines/kandinsky_v22) improves on the previous model by replacing the image encoder of the image prior model with a larger CLIP-ViT-G model to improve quality. The image prior model was also retrained on images with different resolutions and aspect ratios to generate higher-resolution images and different image sizes. [Kandinsky 3](../api/pipelines/kandinsky3) simplifies the architecture and shifts away from the two-stage generation process involving the prior model and diffusion model. Instead, Kandinsky 3 uses [Flan-UL2](https://huggingface.co/google/flan-ul2) to encode text, a UNet with [BigGan-deep](https://hf.co/papers/1809.11096) blocks, and [Sber-MoVQGAN](https://github.com/ai-forever/MoVQGAN) to decode the latents into images. Text understanding and generated image quality are primarily achieved by using a larger text encoder and UNet. This guide will show you how to use the Kandinsky models for text-to-image, image-to-image, inpainting, interpolation, and more. Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate ``` <Tip warning={true}> Kandinsky 2.1 and 2.2 usage is very similar! The only difference is Kandinsky 2.2 doesn't accept `prompt` as an input when decoding the latents. Instead, Kandinsky 2.2 only accepts `image_embeds` during decoding. <br> Kandinsky 3 has a more concise architecture and it doesn't require a prior model. This means it's usage is identical to other diffusion models like [Stable Diffusion XL](sdxl). </Tip> ## Text-to-image To use the Kandinsky models for any task, you always start by setting up the prior pipeline to encode the prompt and generate the image embeddings. The prior pipeline also generates `negative_image_embeds` that correspond to the negative prompt `""`. For better results, you can pass an actual `negative_prompt` to the prior pipeline, but this'll increase the effective batch size of the prior pipeline by 2x. <hfoptions id="text-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import KandinskyPriorPipeline, KandinskyPipeline import torch prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16).to("cuda") pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16).to("cuda") prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" # optional to include a negative prompt, but results are usually better image_embeds, negative_image_embeds = prior_pipeline(prompt, negative_prompt, guidance_scale=1.0).to_tuple() ``` Now pass all the prompts and embeddings to the [`KandinskyPipeline`] to generate an image: ```py image = pipeline(prompt, image_embeds=image_embeds, negative_prompt=negative_prompt, negative_image_embeds=negative_image_embeds, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/cheeseburger.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline import torch prior_pipeline = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16).to("cuda") pipeline = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16).to("cuda") prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" # optional to include a negative prompt, but results are usually better image_embeds, negative_image_embeds = prior_pipeline(prompt, guidance_scale=1.0).to_tuple() ``` Pass the `image_embeds` and `negative_image_embeds` to the [`KandinskyV22Pipeline`] to generate an image: ```py image = pipeline(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-text-to-image.png"/> </div> </hfoption> <hfoption id="Kandinsky 3"> Kandinsky 3 doesn't require a prior model so you can directly load the [`Kandinsky3Pipeline`] and pass a prompt to generate an image: ```py from diffusers import Kandinsky3Pipeline import torch pipeline = Kandinsky3Pipeline.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" image = pipeline(prompt).images[0] image ``` </hfoption> </hfoptions> 🤗 Diffusers also provides an end-to-end API with the [`KandinskyCombinedPipeline`] and [`KandinskyV22CombinedPipeline`], meaning you don't have to separately load the prior and text-to-image pipeline. The combined pipeline automatically loads both the prior model and the decoder. You can still set different values for the prior pipeline with the `prior_guidance_scale` and `prior_num_inference_steps` parameters if you want. Use the [`AutoPipelineForText2Image`] to automatically call the combined pipelines under the hood: <hfoptions id="text-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_scale=1.0, guidance_scale=4.0, height=768, width=768).images[0] image ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_scale=1.0, guidance_scale=4.0, height=768, width=768).images[0] image ``` </hfoption> </hfoptions> ## Image-to-image For image-to-image, pass the initial image and text prompt to condition the image to the pipeline. Start by loading the prior pipeline: <hfoptions id="image-to-image"> <hfoption id="Kandinsky 2.1"> ```py import torch from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyImg2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py import torch from diffusers import KandinskyV22Img2ImgPipeline, KandinskyPriorPipeline prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyV22Img2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> <hfoption id="Kandinsky 3"> Kandinsky 3 doesn't require a prior model so you can directly load the image-to-image pipeline: ```py from diffusers import Kandinsky3Img2ImgPipeline from diffusers.utils import load_image import torch pipeline = Kandinsky3Img2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() ``` </hfoption> </hfoptions> Download an image to condition on: ```py from diffusers.utils import load_image # download image url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" original_image = load_image(url) original_image = original_image.resize((768, 512)) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"/> </div> Generate the `image_embeds` and `negative_image_embeds` with the prior pipeline: ```py prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" image_embeds, negative_image_embeds = prior_pipeline(prompt, negative_prompt).to_tuple() ``` Now pass the original image, and all the prompts and embeddings to the pipeline to generate an image: <hfoptions id="image-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers.utils import make_image_grid image = pipeline(prompt, negative_prompt=negative_prompt, image=original_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/img2img_fantasyland.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers.utils import make_image_grid image = pipeline(image=original_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-image-to-image.png"/> </div> </hfoption> <hfoption id="Kandinsky 3"> ```py image = pipeline(prompt, negative_prompt=negative_prompt, image=image, strength=0.75, num_inference_steps=25).images[0] image ``` </hfoption> </hfoptions> 🤗 Diffusers also provides an end-to-end API with the [`KandinskyImg2ImgCombinedPipeline`] and [`KandinskyV22Img2ImgCombinedPipeline`], meaning you don't have to separately load the prior and image-to-image pipeline. The combined pipeline automatically loads both the prior model and the decoder. You can still set different values for the prior pipeline with the `prior_guidance_scale` and `prior_num_inference_steps` parameters if you want. Use the [`AutoPipelineForImage2Image`] to automatically call the combined pipelines under the hood: <hfoptions id="image-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image import torch pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True) pipeline.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" original_image = load_image(url) original_image.thumbnail((768, 768)) image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=original_image, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image import torch pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" original_image = load_image(url) original_image.thumbnail((768, 768)) image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=original_image, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` </hfoption> </hfoptions> ## Inpainting <Tip warning={true}> ⚠️ The Kandinsky models use ⬜️ **white pixels** to represent the masked area now instead of black pixels. If you are using [`KandinskyInpaintPipeline`] in production, you need to change the mask to use white pixels: ```py # For PIL input import PIL.ImageOps mask = PIL.ImageOps.invert(mask) # For PyTorch and NumPy input mask = 1 - mask ``` </Tip> For inpainting, you'll need the original image, a mask of the area to replace in the original image, and a text prompt of what to inpaint. Load the prior pipeline: <hfoptions id="inpaint"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline from diffusers.utils import load_image, make_image_grid import torch import numpy as np from PIL import Image prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyInpaintPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline from diffusers.utils import load_image, make_image_grid import torch import numpy as np from PIL import Image prior_pipeline = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyV22InpaintPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> </hfoptions> Load an initial image and create a mask: ```py init_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") mask = np.zeros((768, 768), dtype=np.float32) # mask area above cat's head mask[:250, 250:-250] = 1 ``` Generate the embeddings with the prior pipeline: ```py prompt = "a hat" prior_output = prior_pipeline(prompt) ``` Now pass the initial image, mask, and prompt and embeddings to the pipeline to generate an image: <hfoptions id="inpaint"> <hfoption id="Kandinsky 2.1"> ```py output_image = pipeline(prompt, image=init_image, mask_image=mask, **prior_output, height=768, width=768, num_inference_steps=150).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/inpaint_cat_hat.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py output_image = pipeline(image=init_image, mask_image=mask, **prior_output, height=768, width=768, num_inference_steps=150).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinskyv22-inpaint.png"/> </div> </hfoption> </hfoptions> You can also use the end-to-end [`KandinskyInpaintCombinedPipeline`] and [`KandinskyV22InpaintCombinedPipeline`] to call the prior and decoder pipelines together under the hood. Use the [`AutoPipelineForInpainting`] for this: <hfoptions id="inpaint"> <hfoption id="Kandinsky 2.1"> ```py import torch import numpy as np from PIL import Image from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipe = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() init_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") mask = np.zeros((768, 768), dtype=np.float32) # mask area above cat's head mask[:250, 250:-250] = 1 prompt = "a hat" output_image = pipe(prompt=prompt, image=init_image, mask_image=mask).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py import torch import numpy as np from PIL import Image from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipe = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() init_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") mask = np.zeros((768, 768), dtype=np.float32) # mask area above cat's head mask[:250, 250:-250] = 1 prompt = "a hat" output_image = pipe(prompt=prompt, image=original_image, mask_image=mask).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` </hfoption> </hfoptions> ## Interpolation Interpolation allows you to explore the latent space between the image and text embeddings which is a cool way to see some of the prior model's intermediate outputs. Load the prior pipeline and two images you'd like to interpolate: <hfoptions id="interpolate"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import KandinskyPriorPipeline, KandinskyPipeline from diffusers.utils import load_image, make_image_grid import torch prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") img_1 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") img_2 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/starry_night.jpeg") make_image_grid([img_1.resize((512,512)), img_2.resize((512,512))], rows=1, cols=2) ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline from diffusers.utils import load_image, make_image_grid import torch prior_pipeline = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") img_1 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") img_2 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/starry_night.jpeg") make_image_grid([img_1.resize((512,512)), img_2.resize((512,512))], rows=1, cols=2) ``` </hfoption> </hfoptions> <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">a cat</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/starry_night.jpeg"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Van Gogh's Starry Night painting</figcaption> </div> </div> Specify the text or images to interpolate, and set the weights for each text or image. Experiment with the weights to see how they affect the interpolation! ```py images_texts = ["a cat", img_1, img_2] weights = [0.3, 0.3, 0.4] ``` Call the `interpolate` function to generate the embeddings, and then pass them to the pipeline to generate the image: <hfoptions id="interpolate"> <hfoption id="Kandinsky 2.1"> ```py # prompt can be left empty prompt = "" prior_out = prior_pipeline.interpolate(images_texts, weights) pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline(prompt, **prior_out, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/starry_cat.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py # prompt can be left empty prompt = "" prior_out = prior_pipeline.interpolate(images_texts, weights) pipeline = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline(prompt, **prior_out, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinskyv22-interpolate.png"/> </div> </hfoption> </hfoptions> ## ControlNet <Tip warning={true}> ⚠️ ControlNet is only supported for Kandinsky 2.2! </Tip> ControlNet enables conditioning large pretrained diffusion models with additional inputs such as a depth map or edge detection. For example, you can condition Kandinsky 2.2 with a depth map so the model understands and preserves the structure of the depth image. Let's load an image and extract it's depth map: ```py from diffusers.utils import load_image img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png" ).resize((768, 768)) img ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png"/> </div> Then you can use the `depth-estimation` [`~transformers.Pipeline`] from 🤗 Transformers to process the image and retrieve the depth map: ```py import torch import numpy as np from transformers import pipeline def make_hint(image, depth_estimator): image = depth_estimator(image)["depth"] image = np.array(image) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) detected_map = torch.from_numpy(image).float() / 255.0 hint = detected_map.permute(2, 0, 1) return hint depth_estimator = pipeline("depth-estimation") hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") ``` ### Text-to-image [[controlnet-text-to-image]] Load the prior pipeline and the [`KandinskyV22ControlnetPipeline`]: ```py from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline prior_pipeline = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline = KandinskyV22ControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ).to("cuda") ``` Generate the image embeddings from a prompt and negative prompt: ```py prompt = "A robot, 4k photo" negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" generator = torch.Generator(device="cuda").manual_seed(43) image_emb, zero_image_emb = prior_pipeline( prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ).to_tuple() ``` Finally, pass the image embeddings and the depth image to the [`KandinskyV22ControlnetPipeline`] to generate an image: ```py image = pipeline(image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, num_inference_steps=50, generator=generator, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat_text2img.png"/> </div> ### Image-to-image [[controlnet-image-to-image]] For image-to-image with ControlNet, you'll need to use the: - [`KandinskyV22PriorEmb2EmbPipeline`] to generate the image embeddings from a text prompt and an image - [`KandinskyV22ControlnetImg2ImgPipeline`] to generate an image from the initial image and the image embeddings Process and extract a depth map of an initial image of a cat with the `depth-estimation` [`~transformers.Pipeline`] from 🤗 Transformers: ```py import torch import numpy as np from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline from diffusers.utils import load_image from transformers import pipeline img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png" ).resize((768, 768)) def make_hint(image, depth_estimator): image = depth_estimator(image)["depth"] image = np.array(image) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) detected_map = torch.from_numpy(image).float() / 255.0 hint = detected_map.permute(2, 0, 1) return hint depth_estimator = pipeline("depth-estimation") hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") ``` Load the prior pipeline and the [`KandinskyV22ControlnetImg2ImgPipeline`]: ```py prior_pipeline = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ).to("cuda") ``` Pass a text prompt and the initial image to the prior pipeline to generate the image embeddings: ```py prompt = "A robot, 4k photo" negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" generator = torch.Generator(device="cuda").manual_seed(43) img_emb = prior_pipeline(prompt=prompt, image=img, strength=0.85, generator=generator) negative_emb = prior_pipeline(prompt=negative_prior_prompt, image=img, strength=1, generator=generator) ``` Now you can run the [`KandinskyV22ControlnetImg2ImgPipeline`] to generate an image from the initial image and the image embeddings: ```py image = pipeline(image=img, strength=0.5, image_embeds=img_emb.image_embeds, negative_image_embeds=negative_emb.image_embeds, hint=hint, num_inference_steps=50, generator=generator, height=768, width=768).images[0] make_image_grid([img.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat.png"/> </div> ## Optimizations Kandinsky is unique because it requires a prior pipeline to generate the mappings, and a second pipeline to decode the latents into an image. Optimization efforts should be focused on the second pipeline because that is where the bulk of the computation is done. Here are some tips to improve Kandinsky during inference. 1. Enable [xFormers](../optimization/xformers) if you're using PyTorch < 2.0: ```diff from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) + pipe.enable_xformers_memory_efficient_attention() ``` 2. Enable `torch.compile` if you're using PyTorch >= 2.0 to automatically use scaled dot-product attention (SDPA): ```diff pipe.unet.to(memory_format=torch.channels_last) + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` This is the same as explicitly setting the attention processor to use [`~models.attention_processor.AttnAddedKVProcessor2_0`]: ```py from diffusers.models.attention_processor import AttnAddedKVProcessor2_0 pipe.unet.set_attn_processor(AttnAddedKVProcessor2_0()) ``` 3. Offload the model to the CPU with [`~KandinskyPriorPipeline.enable_model_cpu_offload`] to avoid out-of-memory errors: ```diff from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload() ``` 4. By default, the text-to-image pipeline uses the [`DDIMScheduler`] but you can replace it with another scheduler like [`DDPMScheduler`] to see how that affects the tradeoff between inference speed and image quality: ```py from diffusers import DDPMScheduler from diffusers import DiffusionPipeline scheduler = DDPMScheduler.from_pretrained("kandinsky-community/kandinsky-2-1", subfolder="ddpm_scheduler") pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True).to("cuda") ```
diffusers/docs/source/en/using-diffusers/kandinsky.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/kandinsky.md", "repo_id": "diffusers", "token_count": 10810 }
110
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Video Diffusion [[open-in-colab]] [Stable Video Diffusion (SVD)](https://huggingface.co/papers/2311.15127) is a powerful image-to-video generation model that can generate 2-4 second high resolution (576x1024) videos conditioned on an input image. This guide will show you how to use SVD to generate short videos from images. Before you begin, make sure you have the following libraries installed: ```py # Colab에서 필요한 라이브러리를 설치하기 위해 주석을 제외하세요 !pip install -q -U diffusers transformers accelerate ``` The are two variants of this model, [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid) and [SVD-XT](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt). The SVD checkpoint is trained to generate 14 frames and the SVD-XT checkpoint is further finetuned to generate 25 frames. You'll use the SVD-XT checkpoint for this guide. ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Load the conditioning image image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"source image of a rocket"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"generated video from source image"</figcaption> </div> </div> ## torch.compile You can gain a 20-25% speedup at the expense of slightly increased memory by [compiling](../optimization/torch2.0#torchcompile) the UNet. ```diff - pipe.enable_model_cpu_offload() + pipe.to("cuda") + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` ## Reduce memory usage Video generation is very memory intensive because you're essentially generating `num_frames` all at once, similar to text-to-image generation with a high batch size. To reduce the memory requirement, there are multiple options that trade-off inference speed for lower memory requirement: - enable model offloading: each component of the pipeline is offloaded to the CPU once it's not needed anymore. - enable feed-forward chunking: the feed-forward layer runs in a loop instead of running a single feed-forward with a huge batch size. - reduce `decode_chunk_size`: the VAE decodes frames in chunks instead of decoding them all together. Setting `decode_chunk_size=1` decodes one frame at a time and uses the least amount of memory (we recommend adjusting this value based on your GPU memory) but the video might have some flickering. ```diff - pipe.enable_model_cpu_offload() - frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] + pipe.enable_model_cpu_offload() + pipe.unet.enable_forward_chunking() + frames = pipe(image, decode_chunk_size=2, generator=generator, num_frames=25).frames[0] ``` Using all these tricks together should lower the memory requirement to less than 8GB VRAM. ## Micro-conditioning Stable Diffusion Video also accepts micro-conditioning, in addition to the conditioning image, which allows more control over the generated video: - `fps`: the frames per second of the generated video. - `motion_bucket_id`: the motion bucket id to use for the generated video. This can be used to control the motion of the generated video. Increasing the motion bucket id increases the motion of the generated video. - `noise_aug_strength`: the amount of noise added to the conditioning image. The higher the values the less the video resembles the conditioning image. Increasing this value also increases the motion of the generated video. For example, to generate a video with more motion, use the `motion_bucket_id` and `noise_aug_strength` micro-conditioning parameters: ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Load the conditioning image image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator, motion_bucket_id=180, noise_aug_strength=0.1).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket_with_conditions.gif)
diffusers/docs/source/en/using-diffusers/svd.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/svd.md", "repo_id": "diffusers", "token_count": 1832 }
111
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Diffusers에 기여하는 방법 🧨 [[how-to-contribute-to-diffusers-]] 오픈 소스 커뮤니티에서의 기여를 환영합니다! 누구나 참여할 수 있으며, 코드뿐만 아니라 질문에 답변하거나 문서를 개선하는 등 모든 유형의 참여가 가치 있고 감사히 여겨집니다. 질문에 답변하고 다른 사람들을 도와주며 소통하고 문서를 개선하는 것은 모두 커뮤니티에게 큰 도움이 됩니다. 따라서 관심이 있다면 두려워하지 말고 참여해보세요! 누구나 우리의 공개 Discord 채널에서 👋 인사하며 시작할 수 있도록 장려합니다. 우리는 diffusion 모델의 최신 동향을 논의하고 질문을 하며 개인 프로젝트를 자랑하고 기여에 대해 서로 도와주거나 그냥 어울리기 위해 모이는 곳입니다☕. <a href="https://Discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a> 어떤 방식으로든 기여하려는 경우, 우리는 개방적이고 환영하며 친근한 커뮤니티의 일부가 되기 위해 노력하고 있습니다. 우리의 [행동 강령](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md)을 읽고 상호 작용 중에 이를 존중하도록 주의해주시기 바랍니다. 또한 프로젝트를 안내하는 [윤리 지침](https://huggingface.co/docs/diffusers/conceptual/ethical_guidelines)에 익숙해지고 동일한 투명성과 책임성의 원칙을 준수해주시기를 부탁드립니다. 우리는 커뮤니티로부터의 피드백을 매우 중요하게 생각하므로, 라이브러리를 개선하는 데 도움이 될 가치 있는 피드백이 있다고 생각되면 망설이지 말고 의견을 제시해주세요 - 모든 메시지, 댓글, 이슈, Pull Request(PR)는 읽히고 고려됩니다. ## 개요 [[overview]] 이슈에 있는 질문에 답변하는 것에서부터 코어 라이브러리에 새로운 diffusion 모델을 추가하는 것까지 다양한 방법으로 기여를 할 수 있습니다. 이어지는 부분에서 우리는 다양한 방법의 기여에 대한 개요를 난이도에 따라 오름차순으로 정리하였습니다. 모든 기여는 커뮤니티에게 가치가 있습니다. 1. [Diffusers 토론 포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers)이나 [Discord](https://discord.gg/G7tWnz98XR)에서 질문에 대답하거나 질문을 할 수 있습니다. 2. [GitHub Issues 탭](https://github.com/huggingface/diffusers/issues/new/choose)에서 새로운 이슈를 열 수 있습니다. 3. [GitHub Issues 탭](https://github.com/huggingface/diffusers/issues)에서 이슈에 대답할 수 있습니다. 4. "Good first issue" 라벨이 지정된 간단한 이슈를 수정할 수 있습니다. [여기](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)를 참조하세요. 5. [문서](https://github.com/huggingface/diffusers/tree/main/docs/source)에 기여할 수 있습니다. 6. [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples)에 기여할 수 있습니다. 7. [예제](https://github.com/huggingface/diffusers/tree/main/examples)에 기여할 수 있습니다. 8. "Good second issue" 라벨이 지정된 어려운 이슈를 수정할 수 있습니다. [여기](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22)를 참조하세요. 9. 새로운 파이프라인, 모델 또는 스케줄러를 추가할 수 있습니다. ["새로운 파이프라인/모델"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) 및 ["새로운 스케줄러"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) 이슈를 참조하세요. 이 기여에 대해서는 [디자인 철학](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md)을 확인해주세요. 앞서 말한 대로, **모든 기여는 커뮤니티에게 가치가 있습니다**. 이어지는 부분에서 각 기여에 대해 조금 더 자세히 설명하겠습니다. 4부터 9까지의 모든 기여에는 Pull Request을 열어야 합니다. [Pull Request 열기](#how-to-open-a-pr)에서 자세히 설명되어 있습니다. ### 1. Diffusers 토론 포럼이나 Diffusers Discord에서 질문하고 답변하기 [[1-asking-and-answering-questions-on-the-diffusers-discussion-forum-or-on-the-diffusers-discord]] Diffusers 라이브러리와 관련된 모든 질문이나 의견은 [토론 포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)이나 [Discord](https://discord.gg/G7tWnz98XR)에서 할 수 있습니다. 이러한 질문과 의견에는 다음과 같은 내용이 포함됩니다(하지만 이에 국한되지는 않습니다): - 지식을 공유하기 위해서 훈련 또는 추론 실험에 대한 결과 보고 - 개인 프로젝트 소개 - 비공식 훈련 예제에 대한 질문 - 프로젝트 제안 - 일반적인 피드백 - 논문 요약 - Diffusers 라이브러리를 기반으로 하는 개인 프로젝트에 대한 도움 요청 - 일반적인 질문 - Diffusion 모델에 대한 윤리적 질문 - ... 포럼이나 Discord에서 질문을 하면 커뮤니티가 지식을 공개적으로 공유하도록 장려되며, 향후 동일한 질문을 가진 초보자에게도 도움이 될 수 있습니다. 따라서 궁금한 질문은 언제든지 하시기 바랍니다. 또한, 이러한 질문에 답변하는 것은 커뮤니티에게 매우 큰 도움이 됩니다. 왜냐하면 이렇게 하면 모두가 학습할 수 있는 공개적인 지식을 문서화하기 때문입니다. **주의**하십시오. 질문이나 답변에 투자하는 노력이 많을수록 공개적으로 문서화된 지식의 품질이 높아집니다. 마찬가지로, 잘 정의되고 잘 답변된 질문은 모두에게 접근 가능한 고품질 지식 데이터베이스를 만들어줍니다. 반면에 잘못된 질문이나 답변은 공개 지식 데이터베이스의 전반적인 품질을 낮출 수 있습니다. 간단히 말해서, 고품질의 질문이나 답변은 *명확하고 간결하며 관련성이 있으며 이해하기 쉽고 접근 가능하며 잘 형식화되어 있어야* 합니다. 자세한 내용은 [좋은 이슈 작성 방법](#how-to-write-a-good-issue) 섹션을 참조하십시오. **채널에 대한 참고사항**: [*포럼*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)은 구글과 같은 검색 엔진에서 더 잘 색인화됩니다. 게시물은 인기에 따라 순위가 매겨지며, 시간순으로 정렬되지 않습니다. 따라서 이전에 게시한 질문과 답변을 쉽게 찾을 수 있습니다. 또한, 포럼에 게시된 질문과 답변은 쉽게 링크할 수 있습니다. 반면 *Discord*는 채팅 형식으로 되어 있어 빠른 대화를 유도합니다. 질문에 대한 답변을 빠르게 받을 수는 있겠지만, 시간이 지나면 질문이 더 이상 보이지 않습니다. 또한, Discord에서 이전에 게시된 정보를 찾는 것은 훨씬 어렵습니다. 따라서 포럼을 사용하여 고품질의 질문과 답변을 하여 커뮤니티를 위한 오래 지속되는 지식을 만들기를 권장합니다. Discord에서의 토론이 매우 흥미로운 답변과 결론을 이끌어내는 경우, 해당 정보를 포럼에 게시하여 향후 독자들에게 더 쉽게 액세스할 수 있도록 권장합니다. ### 2. GitHub 이슈 탭에서 새로운 이슈 열기 [[2-opening-new-issues-on-the-github-issues-tab]] 🧨 Diffusers 라이브러리는 사용자들이 마주치는 문제를 알려주는 덕분에 견고하고 신뢰할 수 있습니다. 따라서 이슈를 보고해주셔서 감사합니다. 기억해주세요, GitHub 이슈는 Diffusers 라이브러리와 직접적으로 관련된 기술적인 질문, 버그 리포트, 기능 요청 또는 라이브러리 디자인에 대한 피드백에 사용됩니다. 간단히 말해서, Diffusers 라이브러리의 **코드와 관련되지 않은** 모든 것(문서 포함)은 GitHub가 아닌 [포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)이나 [Discord](https://discord.gg/G7tWnz98XR)에서 질문해야 합니다. **새로운 이슈를 열 때 다음 가이드라인을 고려해주세요**: - 이미 같은 이슈가 있는지 검색했는지 확인해주세요(GitHub의 이슈 탭에서 검색 기능을 사용하세요). - 다른(관련된) 이슈에 새로운 이슈를 보고하지 말아주세요. 다른 이슈와 관련이 높다면, 새로운 이슈를 열고 관련 이슈에 링크를 걸어주세요. - 이슈를 영어로 작성해주세요. 영어에 익숙하지 않다면, [DeepL](https://www.deepl.com/translator)과 같은 뛰어난 무료 온라인 번역 서비스를 사용하여 모국어에서 영어로 번역해주세요. - 이슈가 최신 Diffusers 버전으로 업데이트하면 해결될 수 있는지 확인해주세요. 이슈를 게시하기 전에 `python -c "import diffusers; print(diffusers.__version__)"` 명령을 실행하여 현재 사용 중인 Diffusers 버전이 최신 버전과 일치하거나 더 높은지 확인해주세요. - 새로운 이슈를 열 때 투자하는 노력이 많을수록 답변의 품질이 높아지고 Diffusers 이슈 전체의 품질도 향상됩니다. #### 2.1 재현 가능한 최소한의 버그 리포트 [[21-reproducible-minimal-bug-reports]] 버그 리포트는 항상 재현 가능한 코드 조각을 포함하고 가능한 한 최소한이어야 하며 간결해야 합니다. 자세히 말하면: - 버그를 가능한 한 좁혀야 합니다. **전체 코드 파일을 그냥 던지지 마세요**. - 코드의 서식을 지정해야 합니다. - Diffusers가 의존하는 외부 라이브러리를 제외한 다른 외부 라이브러리는 포함하지 마십시오. - **항상** 사용자 환경에 대한 모든 필요한 정보를 제공하세요. 이를 위해 쉘에서 `diffusers-cli env`를 실행하고 표시된 정보를 이슈에 복사하여 붙여넣을 수 있습니다. - 이슈를 설명해야 합니다. 독자가 문제가 무엇인지, 왜 문제가 되는지 모른다면 이슈를 해결할 수 없습니다. - **항상** 독자가 가능한 한 적은 노력으로 문제를 재현할 수 있어야 합니다. 코드 조각이 라이브러리가 없거나 정의되지 않은 변수 때문에 실행되지 않는 경우 독자가 도움을 줄 수 없습니다. 재현 가능한 코드 조각이 가능한 한 최소화되고 간단한 Python 셸에 복사하여 붙여넣을 수 있도록 해야 합니다. - 문제를 재현하기 위해 모델과/또는 데이터셋이 필요한 경우 독자가 해당 모델이나 데이터셋에 접근할 수 있도록 해야 합니다. 모델이나 데이터셋을 [Hub](https://huggingface.co)에 업로드하여 쉽게 다운로드할 수 있도록 할 수 있습니다. 문제 재현을 가능한 한 쉽게하기 위해 모델과 데이터셋을 가능한 한 작게 유지하려고 노력하세요. 자세한 내용은 [좋은 이슈 작성 방법](#how-to-write-a-good-issue) 섹션을 참조하세요. 버그 리포트를 열려면 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&projects=&template=bug-report.yml)를 클릭하세요. #### 2.2. 기능 요청 [[22-feature-requests]] 세계적인 기능 요청은 다음 사항을 다룹니다: 1. 먼저 동기부여: * 라이브러리와 관련된 문제/불만이 있나요? 그렇다면 왜 그런지 설명해주세요. 문제를 보여주는 코드 조각을 제공하는 것이 가장 좋습니다. * 프로젝트에 필요한 기능인가요? 우리는 그에 대해 듣고 싶습니다! * 커뮤니티에 도움이 될 수 있는 것을 작업했고 그것에 대해 생각하고 있는가요? 멋지네요! 어떤 문제를 해결했는지 알려주세요. 2. 기능을 *상세히 설명하는* 문단을 작성해주세요; 3. 향후 사용을 보여주는 **코드 조각**을 제공해주세요; 4. 논문과 관련된 내용인 경우 링크를 첨부해주세요; 5. 도움이 될 수 있다고 생각되는 추가 정보(그림, 스크린샷 등)를 첨부해주세요. 기능 요청은 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=)에서 열 수 있습니다. #### 2.3 피드백 [[23-feedback]] 라이브러리 디자인과 그것이 왜 좋은지 또는 나쁜지에 대한 이유에 대한 피드백은 핵심 메인테이너가 사용자 친화적인 라이브러리를 만드는 데 엄청난 도움이 됩니다. 현재 디자인 철학을 이해하려면 [여기](https://huggingface.co/docs/diffusers/conceptual/philosophy)를 참조해 주세요. 특정 디자인 선택이 현재 디자인 철학과 맞지 않는다고 생각되면, 그 이유와 어떻게 변경되어야 하는지 설명해 주세요. 반대로 특정 디자인 선택이 디자인 철학을 너무 따르기 때문에 사용 사례를 제한한다고 생각되면, 그 이유와 어떻게 변경되어야 하는지 설명해 주세요. 특정 디자인 선택이 매우 유용하다고 생각되면, 향후 디자인 결정에 큰 도움이 되므로 이에 대한 의견을 남겨 주세요. 피드백에 관한 이슈는 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=)에서 열 수 있습니다. #### 2.4 기술적인 질문 [[24-technical-questions]] 기술적인 질문은 주로 라이브러리의 특정 코드가 왜 특정 방식으로 작성되었는지 또는 코드의 특정 부분이 무엇을 하는지에 대한 질문입니다. 질문하신 코드 부분에 대한 링크를 제공하고 해당 코드 부분이 이해하기 어려운 이유에 대한 자세한 설명을 해주시기 바랍니다. 기술적인 질문에 관한 이슈를 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&template=bug-report.yml)에서 열 수 있습니다. #### 2.5 새로운 모델, 스케줄러 또는 파이프라인 추가 제안 [[25-proposal-to-add-a-new-model-scheduler-or-pipeline]] 만약 diffusion 모델 커뮤니티에서 Diffusers 라이브러리에 추가하고 싶은 새로운 모델, 파이프라인 또는 스케줄러가 있다면, 다음 정보를 제공해주세요: * Diffusion 파이프라인, 모델 또는 스케줄러에 대한 간단한 설명과 논문 또는 공개된 버전의 링크 * 해당 모델의 오픈 소스 구현에 대한 링크 * 모델 가중치가 있는 경우, 가중치의 링크 직접 모델에 기여하고 싶다면, 가장 잘 안내해드릴 수 있습니다. 또한, 가능하다면 구성 요소(모델, 스케줄러, 파이프라인 등)의 원저자를 GitHub 핸들로 태그하는 것을 잊지 마세요. 모델/파이프라인/스케줄러에 대한 요청을 [여기](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=New+model%2Fpipeline%2Fscheduler&template=new-model-addition.yml)에서 열 수 있습니다. ### 3. GitHub 이슈 탭에서 문제에 대한 답변하기 [[3-answering-issues-on-the-github-issues-tab]] GitHub에서 이슈에 대한 답변을 하기 위해서는 Diffusers에 대한 기술적인 지식이 필요할 수 있지만, 정확한 답변이 아니더라도 모두가 시도해기를 권장합니다. 이슈에 대한 고품질 답변을 제공하기 위한 몇 가지 팁: - 가능한 한 간결하고 최소한으로 유지합니다. - 주제에 집중합니다. 이슈에 대한 답변은 해당 이슈에 관련된 내용에만 집중해야 합니다. - 자신의 주장을 증명하거나 장려하는 코드, 논문 또는 기타 출처는 링크를 제공하세요. - 코드로 답변합니다. 간단한 코드 조각이 이슈에 대한 답변이거나 이슈를 해결하는 방법을 보여준다면, 완전히 재현 가능한 코드 조각을 제공해주세요. 또한, 많은 이슈들은 단순히 주제와 무관하거나 다른 이슈의 중복이거나 관련이 없는 경우가 많습니다. 이러한 이슈들에 대한 답변을 제공하고, 이슈 작성자에게 더 정확한 정보를 제공하거나, 중복된 이슈에 대한 링크를 제공하거나, [포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) 이나 [Discord](https://discord.gg/G7tWnz98XR)로 리디렉션하는 것은 메인테이너에게 큰 도움이 됩니다. 이슈가 올바른 버그 보고서이고 소스 코드에서 수정이 필요하다고 확인한 경우, 다음 섹션을 살펴보세요. 다음 모든 기여에 대해서는 PR을 열여야 합니다. [Pull Request 열기](#how-to-open-a-pr) 섹션에서 자세히 설명되어 있습니다. ### 4. "Good first issue" 고치기 [[4-fixing-a-good-first-issue]] *Good first issues*는 [Good first issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) 라벨로 표시됩니다. 일반적으로, 이슈는 이미 잠재적인 해결책이 어떻게 보이는지 설명하고 있어서 수정하기 쉽습니다. 만약 이슈가 아직 닫히지 않았고 이 문제를 해결해보고 싶다면, "이 이슈를 해결해보고 싶습니다."라는 메시지를 남기면 됩니다. 일반적으로 세 가지 시나리오가 있습니다: - a.) 이슈 설명에 이미 수정 사항을 제안하는 경우, 해결책이 이해되고 합리적으로 보인다면, PR 또는 드래프트 PR을 열어서 수정할 수 있습니다. - b.) 이슈 설명에 수정 사항이 제안되어 있지 않은 경우, 제안한 수정 사항이 가능할지 물어볼 수 있고, Diffusers 팀의 누군가가 곧 답변해줄 것입니다. 만약 어떻게 수정할지 좋은 아이디어가 있다면, 직접 PR을 열어도 됩니다. - c.) 이미 이 문제를 해결하기 위해 열린 PR이 있지만, 이슈가 아직 닫히지 않았습니다. PR이 더 이상 진행되지 않았다면, 새로운 PR을 열고 이전 PR에 링크를 걸면 됩니다. PR은 종종 원래 기여자가 갑자기 시간을 내지 못해 더 이상 진행하지 못하는 경우에 더 이상 진행되지 않게 됩니다. 이는 오픈 소스에서 자주 발생하는 일이며 매우 정상적인 상황입니다. 이 경우, 커뮤니티는 새로 시도하고 기존 PR의 지식을 활용해주면 매우 기쁠 것입니다. 이미 PR이 있고 활성화되어 있다면, 제안을 해주거나 PR을 검토하거나 PR에 기여할 수 있는지 물어보는 등 작성자를 도와줄 수 있습니다. ### 5. 문서에 기여하기 [[5-contribute-to-the-documentation]] 좋은 라이브러리는 항상 좋은 문서를 갖고 있습니다! 공식 문서는 라이브러리를 처음 사용하는 사용자들에게 첫 번째 접점 중 하나이며, 따라서 문서에 기여하는 것은 매우 가치 있는 기여입니다. 라이브러리에 기여하는 방법은 다양합니다: - 맞춤법이나 문법 오류를 수정합니다. - 공식 문서가 이상하게 표시되거나 링크가 깨진 경우, 올바르게 수정하는 데 시간을 내주시면 매우 기쁠 것입니다. - 문서의 입력 또는 출력 텐서의 모양이나 차원을 수정합니다. - 이해하기 어렵거나 잘못된 문서를 명확하게 합니다. - 오래된 코드 예제를 업데이트합니다. - 문서를 다른 언어로 번역합니다. [공식 Diffusers 문서 페이지](https://huggingface.co/docs/diffusers/index)에 표시된 모든 내용은 공식 문서의 일부이며, 해당 [문서 소스](https://github.com/huggingface/diffusers/tree/main/docs/source)에서 수정할 수 있습니다. 문서에 대한 변경 사항을 로컬에서 확인하는 방법은 [이 페이지](https://github.com/huggingface/diffusers/tree/main/docs)를 참조해주세요. ### 6. 커뮤니티 파이프라인에 기여하기 [[6-contribute-a-community-pipeline]] > [!TIP] > 커뮤니티 파이프라인에 대해 자세히 알아보려면 [커뮤니티 파이프라인](../using-diffusers/custom_pipeline_overview#community-pipelines) 가이드를 읽어보세요. 커뮤니티 파이프라인이 왜 필요한지 궁금하다면 GitHub 이슈 [#841](https://github.com/huggingface/diffusers/issues/841)를 확인해보세요 (기본적으로, 우리는 diffusion 모델이 추론에 사용될 수 있는 모든 방법을 유지할 수 없지만 커뮤니티가 이를 구축하는 것을 방해하고 싶지 않습니다). 커뮤니티 파이프라인에 기여하는 것은 창의성과 작업을 커뮤니티와 공유하는 좋은 방법입니다. [`DiffusionPipeline`]을 기반으로 빌드하여 `custom_pipeline` 매개변수를 설정함으로써 누구나 로드하고 사용할 수 있도록 할 수 있습니다. 이 섹션에서는 UNet이 단일 순방향 패스만 수행하고 스케줄러를 한 번 호출하는 간단한 파이프라인 (단계별 파이프라인)을 만드는 방법을 안내합니다. 1. 커뮤니티 파이프라인을 위한 one_step_unet.py 파일을 생성하세요. 이 파일은 사용자에 의해 설치되는 패키지를 포함할 수 있지만, [`DiffusionPipeline`]에서 모델 가중치와 스케줄러 구성을 로드하기 위해 하나의 파이프라인 클래스만 있어야 합니다. `__init__` 함수에 UNet과 스케줄러를 추가하세요. 또한 [`~DiffusionPipeline.save_pretrained`]를 사용하여 파이프라인과 그 구성 요소를 저장할 수 있도록 `register_modules` 함수를 추가해야 합니다. ```py from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) ``` 1. forward 패스에서 (`__call__`로 정의하는 것을 추천합니다), 원하는 어떤 기능이든 추가할 수 있습니다. "one-step" 파이프라인의 경우, 무작위 이미지를 생성하고 `timestep=1`로 설정하여 UNet과 스케줄러를 한 번 호출합니다. ```py from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) def __call__(self): image = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), ) timestep = 1 model_output = self.unet(image, timestep).sample scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample return scheduler_output ``` 이제 UNet과 스케줄러를 전달하여 파이프라인을 실행하거나, 파이프라인 구조가 동일한 경우 사전 학습된 가중치를 로드할 수 있습니다. ```py from diffusers import DDPMScheduler, UNet2DModel scheduler = DDPMScheduler() unet = UNet2DModel() pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler) output = pipeline() # load pretrained weights pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) output = pipeline() ``` 파이프라인을 GitHub 커뮤니티 파이프라인 또는 Hub 커뮤니티 파이프라인으로 공유할 수 있습니다. <hfoptions id="pipeline type"> <hfoption id="GitHub pipeline"> GitHub 파이프라인을 공유하려면 Diffusers [저장소](https://github.com/huggingface/diffusers)에서 Pull Request를 열고 one_step_unet.py 파일을 [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) 하위 폴더에 추가하세요. </hfoption> <hfoption id="Hub pipeline"> Hub 파이프라인을 공유하려면, 허브에 모델 저장소를 생성하고 one_step_unet.py 파일을 업로드하세요. </hfoption> </hfoptions> ### 7. 훈련 예제에 기여하기 [[7-contribute-to-training-examples]] Diffusers 예제는 [examples](https://github.com/huggingface/diffusers/tree/main/examples) 폴더에 있는 훈련 스크립트의 모음입니다. 두 가지 유형의 훈련 예제를 지원합니다: - 공식 훈련 예제 - 연구용 훈련 예제 연구용 훈련 예제는 [examples/research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects)에 위치하며, 공식 훈련 예제는 `research_projects` 및 `community` 폴더를 제외한 [examples](https://github.com/huggingface/diffusers/tree/main/examples)의 모든 폴더를 포함합니다. 공식 훈련 예제는 Diffusers의 핵심 메인테이너가 유지 관리하며, 연구용 훈련 예제는 커뮤니티가 유지 관리합니다. 이는 공식 파이프라인 vs 커뮤니티 파이프라인에 대한 [6. 커뮤니티 파이프라인 기여하기](#6-contribute-a-community-pipeline)에서 제시한 이유와 동일합니다: 핵심 메인테이너가 diffusion 모델의 모든 가능한 훈련 방법을 유지 관리하는 것은 현실적으로 불가능합니다. Diffusers 핵심 메인테이너와 커뮤니티가 특정 훈련 패러다임을 너무 실험적이거나 충분히 대중적이지 않다고 판단한다면, 해당 훈련 코드는 `research_projects` 폴더에 넣고 작성자에 의해 관리되어야 합니다. 공식 훈련 및 연구 예제는 하나 이상의 훈련 스크립트, requirements.txt 파일 및 README.md 파일을 포함하는 디렉토리로 구성됩니다. 사용자가 훈련 예제를 사용하려면 리포지토리를 복제해야 합니다: ```bash git clone https://github.com/huggingface/diffusers ``` 그리고 훈련에 필요한 모든 추가적인 의존성도 설치해야 합니다: ```bash pip install -r /examples/<your-example-folder>/requirements.txt ``` 따라서 예제를 추가할 때, `requirements.txt` 파일은 훈련 예제에 필요한 모든 pip 종속성을 정의해야 합니다. 이렇게 설치된 모든 종속성을 사용하여 사용자가 예제의 훈련 스크립트를 실행할 수 있어야 합니다. 예를 들어, [DreamBooth `requirements.txt` 파일](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt)을 참조하세요. Diffusers 라이브러리의 훈련 예제는 다음 철학을 따라야 합니다: - 예제를 실행하는 데 필요한 모든 코드는 하나의 Python 파일에 있어야 합니다. - 사용자는 명령 줄에서 `python <your-example>.py --args`와 같이 예제를 실행할 수 있어야 합니다. - 예제는 간단하게 유지되어야 하며, Diffusers를 사용한 훈련 방법을 보여주는 **예시**로 사용되어야 합니다. 예제 스크립트의 목적은 최첨단 diffusion 모델을 만드는 것이 아니라, 너무 많은 사용자 정의 로직을 추가하지 않고 이미 알려진 훈련 방법을 재현하는 것입니다. 이 점의 부산물로서, 예제는 좋은 교육 자료로써의 역할을 하기 위해 노력합니다. 예제에 기여하기 위해서는, 이미 존재하는 예제인 [dreambooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)와 같은 예제를 참고하여 어떻게 보여야 하는지에 대한 아이디어를 얻는 것이 매우 권장됩니다. Diffusers와 긴밀하게 통합되어 있기 때문에, 기여자들이 [Accelerate 라이브러리](https://github.com/huggingface/accelerate)를 사용하는 것을 강력히 권장합니다. 예제 스크립트가 작동하는 경우, 반드시 예제를 정확하게 사용하는 방법을 설명하는 포괄적인 `README.md`를 추가해야 합니다. 이 README에는 다음이 포함되어야 합니다: - [여기](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#running-locally-with-pytorch)에 표시된 예제 스크립트를 실행하는 방법에 대한 예제 명령어. - [여기](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5)에 표시된 훈련 결과 (로그, 모델 등)에 대한 링크로 사용자가 기대할 수 있는 내용을 보여줍니다. - 비공식/연구용 훈련 예제를 추가하는 경우, **반드시** git 핸들을 포함하여 이 훈련 예제를 유지 관리할 것임을 명시하는 문장을 추가해야 합니다. [여기](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations)에 표시된 것과 같습니다. 만약 공식 훈련 예제에 기여하는 경우, [examples/test_examples.py](https://github.com/huggingface/diffusers/blob/main/examples/test_examples.py)에 테스트를 추가하는 것도 확인해주세요. 비공식 훈련 예제에는 이 작업이 필요하지 않습니다. ### 8. "Good second issue" 고치기 [[8-fixing-a-good-second-issue]] "Good second issue"는 [Good second issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22) 라벨로 표시됩니다. Good second issue는 [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)보다 해결하기가 더 복잡합니다. 이슈 설명은 일반적으로 이슈를 해결하는 방법에 대해 덜 구체적이며, 관심 있는 기여자는 라이브러리에 대한 꽤 깊은 이해가 필요합니다. Good second issue를 해결하고자 하는 경우, 해당 이슈를 해결하기 위해 PR을 열고 PR을 이슈에 링크하세요. 이미 해당 이슈에 대한 PR이 열려있지만 병합되지 않은 경우, 왜 병합되지 않았는지 이해하기 위해 살펴보고 개선된 PR을 열어보세요. Good second issue는 일반적으로 Good first issue 이슈보다 병합하기가 더 어려우므로, 핵심 메인테이너에게 도움을 요청하는 것이 좋습니다. PR이 거의 완료된 경우, 핵심 메인테이너는 PR에 참여하여 커밋하고 병합을 진행할 수 있습니다. ### 9. 파이프라인, 모델, 스케줄러 추가하기 [[9-adding-pipelines-models-schedulers]] 파이프라인, 모델, 스케줄러는 Diffusers 라이브러리에서 가장 중요한 부분입니다. 이들은 최첨단 diffusion 기술에 쉽게 접근하도록 하며, 따라서 커뮤니티가 강력한 생성형 AI 애플리케이션을 만들 수 있도록 합니다. 새로운 모델, 파이프라인 또는 스케줄러를 추가함으로써, 사용자 인터페이스에 새로운 강력한 사용 사례를 활성화할 수 있으며, 이는 전체 생성형 AI 생태계에 매우 중요한 가치를 제공할 수 있습니다. Diffusers에는 세 가지 구성 요소에 대한 여러 개발 요청이 있습니다. 특정 구성 요소를 아직 정확히 어떤 것을 추가하고 싶은지 모르는 경우, 다음 링크를 참조하세요: - [모델 또는 파이프라인](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) - [스케줄러](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) 세 가지 구성 요소를 추가하기 전에, [철학 가이드](philosophy)를 읽어보는 것을 강력히 권장합니다. 세 가지 구성 요소 중 어느 것을 추가하든, 디자인 철학과 관련된 API 일관성을 유지하기 위해 우리의 디자인 철학과 크게 다른 구성 요소는 병합할 수 없습니다. 디자인 선택에 근본적으로 동의하지 않는 경우, [피드백 이슈](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=)를 열어 해당 디자인 패턴/선택이 라이브러리 전체에서 변경되어야 하는지, 디자인 철학을 업데이트해야 하는지에 대해 논의할 수 있습니다. 라이브러리 전체의 일관성은 우리에게 매우 중요합니다. PR에 원본 코드베이스/논문 링크를 추가하고, 가능하면 PR에서 원래 작성자에게 직접 알림을 보내어 진행 상황을 따라갈 수 있도록 해주세요. PR에서 막힌 경우나 도움이 필요한 경우, 첫 번째 리뷰나 도움을 요청하는 메시지를 남기는 것을 주저하지 마세요. #### Copied from mechanism [[copied-from-mechanism]] `# Copied from mechanism` 은 파이프라인, 모델 또는 스케줄러 코드를 추가할 때 이해해야 할 독특하고 중요한 기능입니다. 이것은 Diffusers 코드베이스 전반에서 볼 수 있으며, 이를 사용하는 이유는 코드베이스를 이해하고 유지 관리하기 쉽게 만들기 위해서입니다. `# Copied from mechanism` 으로 표시된 코드는 복사한 코드와 정확히 동일하도록 강제됩니다. 이렇게 하면 `make fix-copies`를 실행할 때마다 여러 파일에 걸쳐 변경 사항을 쉽게 업데이트하고 전파할 수 있습니다. 예를 들어, 아래 코드 예제에서 [`~diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput`]은 원래 코드이며, `AltDiffusionPipelineOutput`은 `# Copied from mechanism`을 사용하여 복사합니다. 유일한 차이점은 클래스 접두사를 `Stable`에서 `Alt`로 변경한 것입니다. ```py # Copied from diffusers.pipelines.stable_diffusion.pipeline_output.StableDiffusionPipelineOutput with Stable->Alt class AltDiffusionPipelineOutput(BaseOutput): """ Output class for Alt Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`List[bool]`) List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ ``` 더 자세히 알고 싶다면 [~Don't~ Repeat Yourself*](https://huggingface.co/blog/transformers-design-philosophy#4-machine-learning-models-are-static) 블로그 포스트의 이 섹션을 읽어보세요. ## 좋은 이슈 작성 방법 [[how-to-write-a-good-issue]] **이슈를 잘 작성할수록 빠르게 해결될 가능성이 높아집니다.** 1. 이슈에 적절한 템플릿을 사용했는지 확인하세요. [새 이슈를 열 때](https://github.com/huggingface/diffusers/issues/new/choose) 올바른 템플릿을 선택해야 합니다. *버그 보고서*, *기능 요청*, *API 디자인에 대한 피드백*, *새로운 모델/파이프라인/스케줄러 추가*, *포럼*, 또는 빈 이슈 중에서 선택하세요. 이슈를 열 때 올바른 템플릿을 선택하는 것이 중요합니다. 2. **명확성**: 이슈에 적합한 제목을 지정하세요. 이슈 설명을 가능한 간단하게 작성하세요. 이슈를 이해하고 해결하는 데 걸리는 시간을 줄이기 위해 가능한 한 명확하게 작성하세요. 하나의 이슈에 대해 여러 문제를 포함하지 않도록 주의하세요. 여러 문제를 발견한 경우, 각각의 이슈를 개별적으로 열어주세요. 버그인 경우, 어떤 버그인지 가능한 한 정확하게 설명해야 합니다. "diffusers에서 오류"와 같이 간단히 작성하지 마세요. 3. **재현 가능성**: 재현 가능한 코드 조각이 없으면 해결할 수 없습니다. 버그를 발견한 경우, 유지 관리자는 그 버그를 재현할 수 있어야 합니다. 이슈에 재현 가능한 코드 조각을 포함해야 합니다. 코드 조각은 Python 인터프리터에 복사하여 붙여넣을 수 있는 형태여야 합니다. 코드 조각이 작동해야 합니다. 즉, 누락된 import나 이미지에 대한 링크가 없어야 합니다. 이슈에는 오류 메시지와 정확히 동일한 오류 메시지를 재현하기 위해 수정하지 않고 복사하여 붙여넣을 수 있는 코드 조각이 포함되어야 합니다. 이슈에 사용자의 로컬 모델 가중치나 로컬 데이터를 사용하는 경우, 독자가 액세스할 수 없는 경우 이슈를 해결할 수 없습니다. 데이터나 모델을 공유할 수 없는 경우, 더미 모델이나 더미 데이터를 만들어 사용해보세요. 4. **간결성**: 가능한 한 간결하게 유지하여 독자가 문제를 빠르게 이해할 수 있도록 도와주세요. 문제와 관련이 없는 코드나 정보는 모두 제거해주세요. 버그를 발견한 경우, 문제를 설명하는 가장 간단한 코드 예제를 만들어보세요. 버그를 발견한 후에는 작업 흐름 전체를 문제에 던지는 것이 아니라, 에러가 발생하는 훈련 코드의 어느 부분이 문제인지 먼저 이해하고 몇 줄로 재현해보세요. 전체 데이터셋 대신 더미 데이터를 사용해보세요. 5. 링크 추가하기. 특정한 이름, 메서드, 또는 모델을 참조하는 경우, 독자가 더 잘 이해할 수 있도록 링크를 제공해주세요. 특정 PR이나 이슈를 참조하는 경우, 해당 이슈에 링크를 걸어주세요. 독자가 무엇을 말하는지 알고 있다고 가정하지 마세요. 이슈에 링크를 추가할수록 좋습니다. 6. 포맷팅. 코드를 파이썬 코드 구문으로, 에러 메시지를 일반 코드 구문으로 형식화하여 이슈를 깔끔하게 작성하세요. 자세한 내용은 [GitHub 공식 포맷팅 문서](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax)를 참조하세요. 7. 여러분의 이슈를 단순히 해결해야 할 티켓으로 생각하지 말고, 잘 작성된 백과사전 항목으로 생각해보세요. 추가된 모든 이슈는 공개적으로 이용 가능한 지식에 대한 기여입니다. 잘 작성된 이슈를 추가함으로써 메인테이너가 여러분의 이슈를 더 쉽게 해결할 수 있게 할 뿐만 아니라, 전체 커뮤니티가 라이브러리의 특정 측면을 더 잘 이해할 수 있도록 도움을 주게 됩니다. ## 좋은 PR 작성 방법 [[how-to-write-a-good-pr]] 1. 카멜레온이 되세요. 기존의 디자인 패턴과 구문을 이해하고, 여러분이 추가하는 코드가 기존 코드베이스와 자연스럽게 어우러지도록 해야 합니다. 기존 디자인 패턴이나 사용자 인터페이스와 크게 다른 Pull Request들은 병합되지 않습니다. 2. 레이저처럼 집중하세요. Pull Request는 하나의 문제, 오직 하나의 문제만 해결해야 합니다. "이왕 추가하는 김에 다른 문제도 고치자"는 함정에 빠지지 않도록 주의하세요. 여러 개의 관련 없는 문제를 해결하는 한 번에 해결하는 Pull Request들은 검토하기가 훨씬 더 어렵습니다. 3. 도움이 되는 경우, 추가한 내용이 어떻게 사용되는지 예제 코드 조각을 추가해보세요. 4. Pull Request의 제목은 기여 내용을 요약해야 합니다. 5. Pull Request가 이슈를 해결하는 경우, Pull Request의 설명에 이슈 번호를 언급하여 연결되도록 해주세요 (이슈를 참조하는 사람들이 작업 중임을 알 수 있도록). 6. 진행 중인 작업을 나타내려면 제목에 `[WIP]`를 접두사로 붙여주세요. 이는 중복 작업을 피하고, 병합 준비가 된 PR과 구분할 수 있도록 도움이 됩니다. 7. [좋은 이슈를 작성하는 방법](#how-to-write-a-good-issue)에 설명된 대로 텍스트를 구성하고 형식을 지정해보세요. 8. 기존 테스트가 통과하는지 확인하세요 9. 높은 커버리지를 가진 테스트를 추가하세요. 품질 테스트가 없으면 병합할 수 없습니다. - 새로운 `@slow` 테스트를 추가하는 경우, 다음 명령을 사용하여 통과하는지 확인하세요. `RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`. CircleCI는 느린 테스트를 실행하지 않지만, GitHub Actions는 매일 실행합니다! 10. 모든 공개 메서드는 마크다운과 잘 작동하는 정보성 docstring을 가져야 합니다. 예시로 [`pipeline_latent_diffusion.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py)를 참조하세요. 11. 리포지토리가 빠르게 성장하고 있기 때문에, 리포지토리에 큰 부담을 주는 파일이 추가되지 않도록 주의해야 합니다. 이미지, 비디오 및 기타 텍스트가 아닌 파일을 포함합니다. 이러한 파일을 배치하기 위해 hf.co 호스팅 `dataset`인 [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) 또는 [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images)를 활용하는 것이 우선입니다. 외부 기여인 경우, 이미지를 PR에 추가하고 Hugging Face 구성원에게 이미지를 이 데이터셋으로 이동하도록 요청하세요. ## PR을 열기 위한 방법 [[how-to-open-a-pr]] 코드를 작성하기 전에, 이미 누군가가 같은 작업을 하고 있는지 확인하기 위해 기존의 PR이나 이슈를 검색하는 것이 좋습니다. 확실하지 않은 경우, 피드백을 받기 위해 이슈를 열어보는 것이 항상 좋은 아이디어입니다. 🧨 Diffusers에 기여하기 위해서는 기본적인 `git` 사용법을 알아야 합니다. `git`은 가장 쉬운 도구는 아니지만, 가장 훌륭한 매뉴얼을 가지고 있습니다. 셸에서 `git --help`을 입력하고 즐기세요. 책을 선호하는 경우, [Pro Git](https://git-scm.com/book/en/v2)은 매우 좋은 참고 자료입니다. 다음 단계를 따라 기여를 시작하세요 ([지원되는 Python 버전](https://github.com/huggingface/diffusers/blob/main/setup.py#L244)): 1. 저장소 페이지에서 'Fork' 버튼을 클릭하여 [저장소](https://github.com/huggingface/diffusers)를 포크합니다. 이렇게 하면 코드의 사본이 GitHub 사용자 계정에 생성됩니다. 2. 포크한 저장소를 로컬 디스크에 클론하고, 기본 저장소를 원격으로 추가하세요: ```bash $ git clone git@github.com:<your GitHub handle>/diffusers.git $ cd diffusers $ git remote add upstream https://github.com/huggingface/diffusers.git ``` 3. 개발 변경 사항을 보관할 새로운 브랜치를 생성하세요: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` `main` 브랜치 위에서 **절대** 작업하지 마세요. 4. 가상 환경에서 다음 명령을 실행하여 개발 환경을 설정하세요: ```bash $ pip install -e ".[dev]" ``` 만약 저장소를 이미 클론한 경우, 가장 최신 변경 사항을 가져오기 위해 `git pull`을 실행해야 할 수도 있습니다. 5. 기능을 브랜치에서 개발하세요. 기능을 작업하는 동안 테스트 스위트가 통과되는지 확인해야 합니다. 다음과 같이 변경 사항에 영향을 받는 테스트를 실행해야 합니다: ```bash $ pytest tests/<TEST_TO_RUN>.py ``` 테스트를 실행하기 전에 테스트를 위해 필요한 의존성들을 설치하였는지 확인하세요. 다음의 커맨드를 통해서 확인할 수 있습니다: ```bash $ pip install -e ".[test]" ``` 다음 명령어로 전체 테스트 묶음 실행할 수도 있지만, Diffusers가 많이 성장하였기 때문에 결과를 적당한 시간 내에 생성하기 위해서는 강력한 컴퓨터가 필요합니다. 다음은 해당 명령어입니다: ```bash $ make test ``` 🧨 Diffusers는 소스 코드를 일관되게 포맷팅하기 위해 `black`과 `isort`를 사용합니다. 변경 사항을 적용한 후에는 다음과 같이 자동 스타일 수정 및 코드 검증을 적용할 수 있습니다: ```bash $ make style ``` 🧨 Diffusers `ruff`와 몇개의 커스텀 스크립트를 이용하여 코딩 실수를 확인합니다. 품질 제어는 CI에서 작동하지만, 동일한 검사를 다음을 통해서도 할 수 있습니다: ```bash $ make quality ``` 변경사항에 대해 만족한다면 `git add`를 사용하여 변경된 파일을 추가하고 `git commit`을 사용하여 변경사항에 대해 로컬상으로 저장한다: ```bash $ git add modified_file.py $ git commit -m "A descriptive message about your changes." ``` 코드를 정기적으로 원본 저장소와 동기화하는 것은 좋은 아이디어입니다. 이렇게 하면 변경 사항을 빠르게 반영할 수 있습니다: ```bash $ git pull upstream main ``` 변경 사항을 계정에 푸시하려면 다음을 사용하세요: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. 만족하셨다면, GitHub에서 포크한 웹페이지로 이동하여 'Pull request'를 클릭하여 변경사항을 프로젝트 메인테이너에게 검토를 요청합니다. 7. 메인테이너가 변경 사항을 요청하는 것은 괜찮습니다. 핵심 기여자들에게도 일어나는 일입니다! 따라서 변경 사항을 Pull request에서 볼 수 있도록 로컬 브랜치에서 작업하고 변경 사항을 포크에 푸시하면 자동으로 Pull request에 나타납니다. ### 테스트 [[tests]] 라이브러리 동작과 여러 예제를 테스트하기 위해 포괄적인 테스트 묶음이 포함되어 있습니다. 라이브러리 테스트는 [tests 폴더](https://github.com/huggingface/diffusers/tree/main/tests)에서 찾을 수 있습니다. `pytest`와 `pytest-xdist`를 선호하는 이유는 더 빠르기 때문입니다. 루트 디렉토리에서 라이브러리를 위해 `pytest`로 테스트를 실행하는 방법은 다음과 같습니다: ```bash $ python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` 사실, `make test`는 이렇게 구현되어 있습니다! 작업 중인 기능만 테스트하기 위해 더 작은 테스트 세트를 지정할 수 있습니다. 기본적으로 느린 테스트는 건너뜁니다. `RUN_SLOW` 환경 변수를 `yes`로 설정하여 실행할 수 있습니다. 이는 많은 기가바이트의 모델을 다운로드합니다. 충분한 디스크 공간과 좋은 인터넷 연결 또는 많은 인내심이 필요합니다! ```bash $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` `unittest`는 완전히 지원됩니다. 다음은 `unittest`를 사용하여 테스트를 실행하는 방법입니다: ```bash $ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v ``` ### upstream(HuggingFace) main과 forked main 동기화하기 [[syncing-forked-main-with-upstream-huggingface-main]] upstream 저장소에 불필요한 참조 노트를 추가하고 관련 개발자에게 알림을 보내는 것을 피하기 위해, forked 저장소의 main 브랜치를 동기화할 때 다음 단계를 따르세요: 1. 가능한 경우, forked 저장소에서 브랜치와 PR을 사용하여 upstream과 동기화하는 것을 피하세요. 대신 forked main으로 직접 병합하세요. 2. PR이 절대적으로 필요한 경우, 브랜치를 체크아웃한 후 다음 단계를 사용하세요: ```bash $ git checkout -b your-branch-for-syncing $ git pull --squash --no-commit upstream main $ git commit -m '<your message without GitHub references>' $ git push --set-upstream origin your-branch-for-syncing ``` ### 스타일 가이드 [[style-guide]] Documentation string에 대해서는, 🧨 Diffusers는 [Google 스타일](https://google.github.io/styleguide/pyguide.html)을 따릅니다.
diffusers/docs/source/ko/conceptual/contribution.md/0
{ "file_path": "diffusers/docs/source/ko/conceptual/contribution.md", "repo_id": "diffusers", "token_count": 35978 }
112
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # 훑어보기 Diffusion 모델은 이미지나 오디오와 같은 관심 샘플들을 생성하기 위해 랜덤 가우시안 노이즈를 단계별로 제거하도록 학습됩니다. 이로 인해 생성 AI에 대한 관심이 매우 높아졌으며, 인터넷에서 diffusion 생성 이미지의 예를 본 적이 있을 것입니다. 🧨 Diffusers는 누구나 diffusion 모델들을 널리 이용할 수 있도록 하기 위한 라이브러리입니다. 개발자든 일반 사용자든 이 훑어보기를 통해 🧨 Diffusers를 소개하고 빠르게 생성할 수 있도록 도와드립니다! 알아야 할 라이브러리의 주요 구성 요소는 크게 세 가지입니다: * [`DiffusionPipeline`]은 추론을 위해 사전 학습된 diffusion 모델에서 샘플을 빠르게 생성하도록 설계된 높은 수준의 엔드투엔드 클래스입니다. * Diffusion 시스템 생성을 위한 빌딩 블록으로 사용할 수 있는 널리 사용되는 사전 학습된 [model](./api/models) 아키텍처 및 모듈. * 다양한 [schedulers](./api/schedulers/overview) - 학습을 위해 노이즈를 추가하는 방법과 추론 중에 노이즈 제거된 이미지를 생성하는 방법을 제어하는 알고리즘입니다. 훑어보기에서는 추론을 위해 [`DiffusionPipeline`]을 사용하는 방법을 보여준 다음, 모델과 스케줄러를 결합하여 [`DiffusionPipeline`] 내부에서 일어나는 일을 복제하는 방법을 안내합니다. <Tip> 훑어보기는 간결한 버전의 🧨 Diffusers 소개로서 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) 빠르게 시작할 수 있도록 도와드립니다. 디퓨저의 목표, 디자인 철학, 핵심 API에 대한 추가 세부 정보를 자세히 알아보려면 노트북을 확인하세요! </Tip> 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```py # 주석 풀어서 Colab에 필요한 라이브러리 설치하기. #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index)는 추론 및 학습을 위한 모델 로딩 속도를 높여줍니다. - [🤗 Transformers](https://huggingface.co/docs/transformers/index)는 [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)과 같이 가장 많이 사용되는 diffusion 모델을 실행하는 데 필요합니다. ## DiffusionPipeline [`DiffusionPipeline`] 은 추론을 위해 사전 학습된 diffusion 시스템을 사용하는 가장 쉬운 방법입니다. 모델과 스케줄러를 포함하는 엔드 투 엔드 시스템입니다. 다양한 작업에 [`DiffusionPipeline`]을 바로 사용할 수 있습니다. 아래 표에서 지원되는 몇 가지 작업을 살펴보고, 지원되는 작업의 전체 목록은 [🧨 Diffusers Summary](./api/pipelines/overview#diffusers-summary) 표에서 확인할 수 있습니다. | **Task** | **Description** | **Pipeline** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) | 먼저 [`DiffusionPipeline`]의 인스턴스를 생성하고 다운로드할 파이프라인 체크포인트를 지정합니다. 허깅페이스 허브에 저장된 모든 [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads)에 대해 [`DiffusionPipeline`]을 사용할 수 있습니다. 이 훑어보기에서는 text-to-image 생성을 위한 [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 체크포인트를 로드합니다. <Tip warning={true}> [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) 모델의 경우, 모델을 실행하기 전에 [라이선스](https://huggingface.co/spaces/CompVis/stable-diffusion-license)를 먼저 주의 깊게 읽어주세요. 🧨 Diffusers는 불쾌하거나 유해한 콘텐츠를 방지하기 위해 [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py)를 구현하고 있지만, 모델의 향상된 이미지 생성 기능으로 인해 여전히 잠재적으로 유해한 콘텐츠가 생성될 수 있습니다. </Tip> [`~DiffusionPipeline.from_pretrained`] 방법으로 모델 로드하기: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") ``` The [`DiffusionPipeline`]은 모든 모델링, 토큰화, 스케줄링 컴포넌트를 다운로드하고 캐시합니다. Stable Diffusion Pipeline은 무엇보다도 [`UNet2DConditionModel`]과 [`PNDMScheduler`]로 구성되어 있음을 알 수 있습니다: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` 이 모델은 약 14억 개의 파라미터로 구성되어 있으므로 GPU에서 파이프라인을 실행할 것을 강력히 권장합니다. PyTorch에서와 마찬가지로 제너레이터 객체를 GPU로 이동할 수 있습니다: ```python >>> pipeline.to("cuda") ``` 이제 `파이프라인`에 텍스트 프롬프트를 전달하여 이미지를 생성한 다음 노이즈가 제거된 이미지에 액세스할 수 있습니다. 기본적으로 이미지 출력은 [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) 객체로 감싸집니다. ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> `save`를 호출하여 이미지를 저장합니다: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### 로컬 파이프라인 파이프라인을 로컬에서 사용할 수도 있습니다. 유일한 차이점은 가중치를 먼저 다운로드해야 한다는 점입니다: ```bash !git lfs install !git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` 그런 다음 저장된 가중치를 파이프라인에 로드합니다: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") ``` 이제 위 섹션에서와 같이 파이프라인을 실행할 수 있습니다. ### 스케줄러 교체 스케줄러마다 노이즈 제거 속도와 품질이 서로 다릅니다. 자신에게 가장 적합한 스케줄러를 찾는 가장 좋은 방법은 직접 사용해 보는 것입니다! 🧨 Diffusers의 주요 기능 중 하나는 스케줄러 간에 쉽게 전환이 가능하다는 것입니다. 예를 들어, 기본 스케줄러인 [`PNDMScheduler`]를 [`EulerDiscreteScheduler`]로 바꾸려면, [`~diffusers.ConfigMixin.from_config`] 메서드를 사용하여 로드하세요: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` 새 스케줄러로 이미지를 생성해보고 어떤 차이가 있는지 확인해 보세요! 다음 섹션에서는 모델과 스케줄러라는 [`DiffusionPipeline`]을 구성하는 컴포넌트를 자세히 살펴보고 이러한 컴포넌트를 사용하여 고양이 이미지를 생성하는 방법을 배워보겠습니다. ## 모델 대부분의 모델은 노이즈가 있는 샘플을 가져와 각 시간 간격마다 노이즈가 적은 이미지와 입력 이미지 사이의 차이인 *노이즈 잔차*(다른 모델은 이전 샘플을 직접 예측하거나 속도 또는 [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)을 예측하는 학습을 합니다)을 예측합니다. 모델을 믹스 앤 매치하여 다른 diffusion 시스템을 만들 수 있습니다. 모델은 [`~ModelMixin.from_pretrained`] 메서드로 시작되며, 이 메서드는 모델 가중치를 로컬에 캐시하여 다음에 모델을 로드할 때 더 빠르게 로드할 수 있습니다. 훑어보기에서는 고양이 이미지에 대해 학습된 체크포인트가 있는 기본적인 unconditional 이미지 생성 모델인 [`UNet2DModel`]을 로드합니다: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id) ``` 모델 매개변수에 액세스하려면 `model.config`를 호출합니다: ```py >>> model.config ``` 모델 구성은 🧊 고정된 🧊 딕셔너리로, 모델이 생성된 후에는 해당 매개 변수들을 변경할 수 없습니다. 이는 의도적인 것으로, 처음에 모델 아키텍처를 정의하는 데 사용된 매개변수는 동일하게 유지하면서 다른 매개변수는 추론 중에 조정할 수 있도록 하기 위한 것입니다. 가장 중요한 매개변수들은 다음과 같습니다: * `sample_size`: 입력 샘플의 높이 및 너비 치수입니다. * `in_channels`: 입력 샘플의 입력 채널 수입니다. * `down_block_types` 및 `up_block_types`: UNet 아키텍처를 생성하는 데 사용되는 다운 및 업샘플링 블록의 유형. * `block_out_channels`: 다운샘플링 블록의 출력 채널 수. 업샘플링 블록의 입력 채널 수에 역순으로 사용되기도 합니다. * `layers_per_block`: 각 UNet 블록에 존재하는 ResNet 블록의 수입니다. 추론에 모델을 사용하려면 랜덤 가우시안 노이즈로 이미지 모양을 만듭니다. 모델이 여러 개의 무작위 노이즈를 수신할 수 있으므로 'batch' 축, 입력 채널 수에 해당하는 'channel' 축, 이미지의 높이와 너비를 나타내는 'sample_size' 축이 있어야 합니다: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` 추론을 위해 모델에 노이즈가 있는 이미지와 `timestep`을 전달합니다. 'timestep'은 입력 이미지의 노이즈 정도를 나타내며, 시작 부분에 더 많은 노이즈가 있고 끝 부분에 더 적은 노이즈가 있습니다. 이를 통해 모델이 diffusion 과정에서 시작 또는 끝에 더 가까운 위치를 결정할 수 있습니다. `sample` 메서드를 사용하여 모델 출력을 얻습니다: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` 하지만 실제 예를 생성하려면 노이즈 제거 프로세스를 안내할 스케줄러가 필요합니다. 다음 섹션에서는 모델을 스케줄러와 결합하는 방법에 대해 알아봅니다. ## 스케줄러 스케줄러는 모델 출력이 주어졌을 때 노이즈가 많은 샘플에서 노이즈가 적은 샘플로 전환하는 것을 관리합니다 - 이 경우 'noisy_residual'. <Tip> 🧨 Diffusers는 Diffusion 시스템을 구축하기 위한 툴박스입니다. [`DiffusionPipeline`]을 사용하면 미리 만들어진 Diffusion 시스템을 편리하게 시작할 수 있지만, 모델과 스케줄러 구성 요소를 개별적으로 선택하여 사용자 지정 Diffusion 시스템을 구축할 수도 있습니다. </Tip> 훑어보기의 경우, [`~diffusers.ConfigMixin.from_config`] 메서드를 사용하여 [`DDPMScheduler`]를 인스턴스화합니다: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 스케줄러가 구성에서 어떻게 인스턴스화되는지 주목하세요. 모델과 달리 스케줄러에는 학습 가능한 가중치가 없으며 매개변수도 없습니다! </Tip> 가장 중요한 매개변수는 다음과 같습니다: * `num_train_timesteps`: 노이즈 제거 프로세스의 길이, 즉 랜덤 가우스 노이즈를 데이터 샘플로 처리하는 데 필요한 타임스텝 수입니다. * `beta_schedule`: 추론 및 학습에 사용할 노이즈 스케줄 유형입니다. * `beta_start` 및 `beta_end`: 노이즈 스케줄의 시작 및 종료 노이즈 값입니다. 노이즈가 약간 적은 이미지를 예측하려면 스케줄러의 [`~diffusers.DDPMScheduler.step`] 메서드에 모델 출력, `timestep`, 현재 `sample`을 전달하세요. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` `less_noisy_sample`을 다음 `timestep`으로 넘기면 노이즈가 더 줄어듭니다! 이제 이 모든 것을 한데 모아 전체 노이즈 제거 과정을 시각화해 보겠습니다. 먼저 노이즈 제거된 이미지를 후처리하여 `PIL.Image`로 표시하는 함수를 만듭니다: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` 노이즈 제거 프로세스의 속도를 높이려면 입력과 모델을 GPU로 옮기세요: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` 이제 노이즈가 적은 샘플의 잔차를 예측하고 스케줄러로 노이즈가 적은 샘플을 계산하는 노이즈 제거 루프를 생성합니다: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` 가만히 앉아서 고양이가 소음으로만 생성되는 것을 지켜보세요!😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## 다음 단계 이번 훑어보기에서 🧨 Diffusers로 멋진 이미지를 만들어 보셨기를 바랍니다! 다음 단계로 넘어가세요: * [training](./tutorials/basic_training) 튜토리얼에서 모델을 학습하거나 파인튜닝하여 나만의 이미지를 생성할 수 있습니다. * 다양한 사용 사례는 공식 및 커뮤니티 [학습 또는 파인튜닝 스크립트](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) 예시를 참조하세요. * 스케줄러 로드, 액세스, 변경 및 비교에 대한 자세한 내용은 [다른 스케줄러 사용](./using-diffusers/schedulers) 가이드에서 확인하세요. * [Stable Diffusion](./stable_diffusion) 가이드에서 프롬프트 엔지니어링, 속도 및 메모리 최적화, 고품질 이미지 생성을 위한 팁과 요령을 살펴보세요. * [GPU에서 파이토치 최적화](./optimization/fp16) 가이드와 [애플 실리콘(M1/M2)에서의 Stable Diffusion](./optimization/mps) 및 [ONNX 런타임](./optimization/onnx) 실행에 대한 추론 가이드를 통해 🧨 Diffuser 속도를 높이는 방법을 더 자세히 알아보세요.
diffusers/docs/source/ko/quicktour.md/0
{ "file_path": "diffusers/docs/source/ko/quicktour.md", "repo_id": "diffusers", "token_count": 11428 }
113
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 조건부 이미지 생성 [[open-in-colab]] 조건부 이미지 생성을 사용하면 텍스트 프롬프트에서 이미지를 생성할 수 있습니다. 텍스트는 임베딩으로 변환되며, 임베딩은 노이즈에서 이미지를 생성하도록 모델을 조건화하는 데 사용됩니다. [`DiffusionPipeline`]은 추론을 위해 사전 훈련된 diffusion 시스템을 사용하는 가장 쉬운 방법입니다. 먼저 [`DiffusionPipeline`]의 인스턴스를 생성하고 다운로드할 파이프라인 [체크포인트](https://huggingface.co/models?library=diffusers&sort=downloads)를 지정합니다. 이 가이드에서는 [잠재 Diffusion](https://huggingface.co/CompVis/ldm-text2im-large-256)과 함께 텍스트-이미지 생성에 [`DiffusionPipeline`]을 사용합니다: ```python >>> from diffusers import DiffusionPipeline >>> generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") ``` [`DiffusionPipeline`]은 모든 모델링, 토큰화, 스케줄링 구성 요소를 다운로드하고 캐시합니다. 이 모델은 약 14억 개의 파라미터로 구성되어 있기 때문에 GPU에서 실행할 것을 강력히 권장합니다. PyTorch에서와 마찬가지로 생성기 객체를 GPU로 이동할 수 있습니다: ```python >>> generator.to("cuda") ``` 이제 텍스트 프롬프트에서 `생성기`를 사용할 수 있습니다: ```python >>> image = generator("An image of a squirrel in Picasso style").images[0] ``` 출력값은 기본적으로 [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) 객체로 래핑됩니다. 호출하여 이미지를 저장할 수 있습니다: ```python >>> image.save("image_of_squirrel_painting.png") ``` 아래 스페이스를 사용해보고 안내 배율 매개변수를 자유롭게 조정하여 이미지 품질에 어떤 영향을 미치는지 확인해 보세요! <iframe src="https://stabilityai-stable-diffusion.hf.space" frameborder="0" width="850" height="500" ></iframe>
diffusers/docs/source/ko/using-diffusers/conditional_image_generation.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/conditional_image_generation.md", "repo_id": "diffusers", "token_count": 1550 }
114
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Video Diffusion [[open-in-colab]] [Stable Video Diffusion (SVD)](https://huggingface.co/papers/2311.15127)은 입력 이미지에 맞춰 2~4초 분량의 고해상도(576x1024) 비디오를 생성할 수 있는 강력한 image-to-video 생성 모델입니다. 이 가이드에서는 SVD를 사용하여 이미지에서 짧은 동영상을 생성하는 방법을 설명합니다. 시작하기 전에 다음 라이브러리가 설치되어 있는지 확인하세요: ```py !pip install -q -U diffusers transformers accelerate ``` 이 모델에는 [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid)와 [SVD-XT](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt) 두 가지 종류가 있습니다. SVD 체크포인트는 14개의 프레임을 생성하도록 학습되었고, SVD-XT 체크포인트는 25개의 프레임을 생성하도록 파인튜닝되었습니다. 이 가이드에서는 SVD-XT 체크포인트를 사용합니다. ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Conditioning 이미지 불러오기 image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"source image of a rocket"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"generated video from source image"</figcaption> </div> </div> ## torch.compile UNet을 [컴파일](../optimization/torch2.0#torchcompile)하면 메모리 사용량이 살짝 증가하지만, 20~25%의 속도 향상을 얻을 수 있습니다. ```diff - pipe.enable_model_cpu_offload() + pipe.to("cuda") + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` ## 메모리 사용량 줄이기 비디오 생성은 기본적으로 배치 크기가 큰 text-to-image 생성과 유사하게 'num_frames'를 한 번에 생성하기 때문에 메모리 사용량이 매우 높습니다. 메모리 사용량을 줄이기 위해 추론 속도와 메모리 사용량을 절충하는 여러 가지 옵션이 있습니다: - 모델 오프로링 활성화: 파이프라인의 각 구성 요소가 더 이상 필요하지 않을 때 CPU로 오프로드됩니다. - Feed-forward chunking 활성화: feed-forward 레이어가 배치 크기가 큰 단일 feed-forward를 실행하는 대신 루프로 반복해서 실행됩니다. - `decode_chunk_size` 감소: VAE가 프레임들을 한꺼번에 디코딩하는 대신 chunk 단위로 디코딩합니다. `decode_chunk_size=1`을 설정하면 한 번에 한 프레임씩 디코딩하고 최소한의 메모리만 사용하지만(GPU 메모리에 따라 이 값을 조정하는 것이 좋습니다), 동영상에 약간의 깜박임이 발생할 수 있습니다. ```diff - pipe.enable_model_cpu_offload() - frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0] + pipe.enable_model_cpu_offload() + pipe.unet.enable_forward_chunking() + frames = pipe(image, decode_chunk_size=2, generator=generator, num_frames=25).frames[0] ``` 이러한 모든 방법들을 사용하면 메모리 사용량이 8GAM VRAM보다 적을 것입니다. ## Micro-conditioning Stable Diffusion Video는 또한 이미지 conditoning 외에도 micro-conditioning을 허용하므로 생성된 비디오를 더 잘 제어할 수 있습니다: - `fps`: 생성된 비디오의 초당 프레임 수입니다. - `motion_bucket_id`: 생성된 동영상에 사용할 모션 버킷 아이디입니다. 생성된 동영상의 모션을 제어하는 데 사용할 수 있습니다. 모션 버킷 아이디를 늘리면 생성되는 동영상의 모션이 증가합니다. - `noise_aug_strength`: Conditioning 이미지에 추가되는 노이즈의 양입니다. 값이 클수록 비디오가 conditioning 이미지와 덜 유사해집니다. 이 값을 높이면 생성된 비디오의 움직임도 증가합니다. 예를 들어, 모션이 더 많은 동영상을 생성하려면 `motion_bucket_id` 및 `noise_aug_strength` micro-conditioning 파라미터를 사용합니다: ```python import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipe.enable_model_cpu_offload() # Conditioning 이미지 불러오기 image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png") image = image.resize((1024, 576)) generator = torch.manual_seed(42) frames = pipe(image, decode_chunk_size=8, generator=generator, motion_bucket_id=180, noise_aug_strength=0.1).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/output_rocket_with_conditions.gif)
diffusers/docs/source/ko/using-diffusers/svd.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/svd.md", "repo_id": "diffusers", "token_count": 3466 }
115
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F import torch.utils.model_zoo from einops import rearrange, repeat from gmflow.gmflow import GMFlow from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor2_0 from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.models.unets.unet_2d_condition import UNet2DConditionOutput from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.controlnet.pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import is_compiled_module, randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def clear_cache(): gc.collect() torch.cuda.empty_cache() def coords_grid(b, h, w, homogeneous=False, device=None): y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W] stacks = [x, y] if homogeneous: ones = torch.ones_like(x) # [H, W] stacks.append(ones) grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W] grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W] if device is not None: grid = grid.to(device) return grid def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False): # img: [B, C, H, W] # sample_coords: [B, 2, H, W] in image scale if sample_coords.size(1) != 2: # [B, H, W, 2] sample_coords = sample_coords.permute(0, 3, 1, 2) b, _, h, w = sample_coords.shape # Normalize to [-1, 1] x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1 y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1 grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2] img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True) if return_mask: mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W] return img, mask return img class Dilate: def __init__(self, kernel_size=7, channels=1, device="cpu"): self.kernel_size = kernel_size self.channels = channels gaussian_kernel = torch.ones(1, 1, self.kernel_size, self.kernel_size) gaussian_kernel = gaussian_kernel.repeat(self.channels, 1, 1, 1) self.mean = (self.kernel_size - 1) // 2 gaussian_kernel = gaussian_kernel.to(device) self.gaussian_filter = gaussian_kernel def __call__(self, x): x = F.pad(x, (self.mean, self.mean, self.mean, self.mean), "replicate") return torch.clamp(F.conv2d(x, self.gaussian_filter, bias=None), 0, 1) def flow_warp(feature, flow, mask=False, mode="bilinear", padding_mode="zeros"): b, c, h, w = feature.size() assert flow.size(1) == 2 grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W] grid = grid.to(feature.dtype) return bilinear_sample(feature, grid, mode=mode, padding_mode=padding_mode, return_mask=mask) def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5): # fwd_flow, bwd_flow: [B, 2, H, W] # alpha and beta values are following UnFlow # (https://arxiv.org/abs/1711.07837) assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4 assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2 flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W] warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W] warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W] diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W] diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1) threshold = alpha * flow_mag + beta fwd_occ = (diff_fwd > threshold).float() # [B, H, W] bwd_occ = (diff_bwd > threshold).float() return fwd_occ, bwd_occ def numpy2tensor(img): x0 = torch.from_numpy(img.copy()).float().cuda() / 255.0 * 2.0 - 1.0 x0 = torch.stack([x0], dim=0) # einops.rearrange(x0, 'b h w c -> b c h w').clone() return x0.permute(0, 3, 1, 2) def calc_mean_std(feat, eps=1e-5, chunk=1): size = feat.size() assert len(size) == 4 if chunk == 2: feat = torch.cat(feat.chunk(2), dim=3) N, C = size[:2] feat_var = feat.view(N // chunk, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N // chunk, C, -1).mean(dim=2).view(N // chunk, C, 1, 1) return feat_mean.repeat(chunk, 1, 1, 1), feat_std.repeat(chunk, 1, 1, 1) def adaptive_instance_normalization(content_feat, style_feat, chunk=1): assert content_feat.size()[:2] == style_feat.size()[:2] size = content_feat.size() style_mean, style_std = calc_mean_std(style_feat, chunk) content_mean, content_std = calc_mean_std(content_feat) normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) return normalized_feat * style_std.expand(size) + style_mean.expand(size) def optimize_feature( sample, flows, occs, correlation_matrix=[], intra_weight=1e2, iters=20, unet_chunk_size=2, optimize_temporal=True ): """ FRESO-guided latent feature optimization * optimize spatial correspondence (match correlation_matrix) * optimize temporal correspondence (match warped_image) """ if (flows is None or occs is None or (not optimize_temporal)) and ( intra_weight == 0 or len(correlation_matrix) == 0 ): return sample # flows=[fwd_flows, bwd_flows]: (N-1)*2*H1*W1 # occs=[fwd_occs, bwd_occs]: (N-1)*H1*W1 # sample: 2N*C*H*W torch.cuda.empty_cache() video_length = sample.shape[0] // unet_chunk_size latent = rearrange(sample.to(torch.float32), "(b f) c h w -> b f c h w", f=video_length) cs = torch.nn.Parameter((latent.detach().clone())) optimizer = torch.optim.Adam([cs], lr=0.2) # unify resolution if flows is not None and occs is not None: scale = sample.shape[2] * 1.0 / flows[0].shape[2] kernel = int(1 / scale) bwd_flow_ = F.interpolate(flows[1] * scale, scale_factor=scale, mode="bilinear").repeat( unet_chunk_size, 1, 1, 1 ) bwd_occ_ = F.max_pool2d(occs[1].unsqueeze(1), kernel_size=kernel).repeat( unet_chunk_size, 1, 1, 1 ) # 2(N-1)*1*H1*W1 fwd_flow_ = F.interpolate(flows[0] * scale, scale_factor=scale, mode="bilinear").repeat( unet_chunk_size, 1, 1, 1 ) fwd_occ_ = F.max_pool2d(occs[0].unsqueeze(1), kernel_size=kernel).repeat( unet_chunk_size, 1, 1, 1 ) # 2(N-1)*1*H1*W1 # match frame 0,1,2,3 and frame 1,2,3,0 reshuffle_list = list(range(1, video_length)) + [0] # attention_probs is the GRAM matrix of the normalized feature attention_probs = None for tmp in correlation_matrix: if sample.shape[2] * sample.shape[3] == tmp.shape[1]: attention_probs = tmp # 2N*HW*HW break n_iter = [0] while n_iter[0] < iters: def closure(): optimizer.zero_grad() loss = 0 # temporal consistency loss if optimize_temporal and flows is not None and occs is not None: c1 = rearrange(cs[:, :], "b f c h w -> (b f) c h w") c2 = rearrange(cs[:, reshuffle_list], "b f c h w -> (b f) c h w") warped_image1 = flow_warp(c1, bwd_flow_) warped_image2 = flow_warp(c2, fwd_flow_) loss = ( abs((c2 - warped_image1) * (1 - bwd_occ_)) + abs((c1 - warped_image2) * (1 - fwd_occ_)) ).mean() * 2 # spatial consistency loss if attention_probs is not None and intra_weight > 0: cs_vector = rearrange(cs, "b f c h w -> (b f) (h w) c") # attention_scores = torch.bmm(cs_vector, cs_vector.transpose(-1, -2)) # cs_attention_probs = attention_scores.softmax(dim=-1) cs_vector = cs_vector / ((cs_vector**2).sum(dim=2, keepdims=True) ** 0.5) cs_attention_probs = torch.bmm(cs_vector, cs_vector.transpose(-1, -2)) tmp = F.l1_loss(cs_attention_probs, attention_probs) * intra_weight loss = tmp + loss loss.backward() n_iter[0] += 1 return loss optimizer.step(closure) torch.cuda.empty_cache() return adaptive_instance_normalization(rearrange(cs.data.to(sample.dtype), "b f c h w -> (b f) c h w"), sample) @torch.no_grad() def warp_tensor(sample, flows, occs, saliency, unet_chunk_size): """ Warp images or features based on optical flow Fuse the warped imges or features based on occusion masks and saliency map """ scale = sample.shape[2] * 1.0 / flows[0].shape[2] kernel = int(1 / scale) bwd_flow_ = F.interpolate(flows[1] * scale, scale_factor=scale, mode="bilinear") bwd_occ_ = F.max_pool2d(occs[1].unsqueeze(1), kernel_size=kernel) # (N-1)*1*H1*W1 if scale == 1: bwd_occ_ = Dilate(kernel_size=13, device=sample.device)(bwd_occ_) fwd_flow_ = F.interpolate(flows[0] * scale, scale_factor=scale, mode="bilinear") fwd_occ_ = F.max_pool2d(occs[0].unsqueeze(1), kernel_size=kernel) # (N-1)*1*H1*W1 if scale == 1: fwd_occ_ = Dilate(kernel_size=13, device=sample.device)(fwd_occ_) scale2 = sample.shape[2] * 1.0 / saliency.shape[2] saliency = F.interpolate(saliency, scale_factor=scale2, mode="bilinear") latent = sample.to(torch.float32) video_length = sample.shape[0] // unet_chunk_size warp_saliency = flow_warp(saliency, bwd_flow_) warp_saliency_ = flow_warp(saliency[0:1], fwd_flow_[video_length - 1 : video_length]) for j in range(unet_chunk_size): for ii in range(video_length - 1): i = video_length * j + ii warped_image = flow_warp(latent[i : i + 1], bwd_flow_[ii : ii + 1]) mask = (1 - bwd_occ_[ii : ii + 1]) * saliency[ii + 1 : ii + 2] * warp_saliency[ii : ii + 1] latent[i + 1 : i + 2] = latent[i + 1 : i + 2] * (1 - mask) + warped_image * mask i = video_length * j ii = video_length - 1 warped_image = flow_warp(latent[i : i + 1], fwd_flow_[ii : ii + 1]) mask = (1 - fwd_occ_[ii : ii + 1]) * saliency[ii : ii + 1] * warp_saliency_ latent[ii + i : ii + i + 1] = latent[ii + i : ii + i + 1] * (1 - mask) + warped_image * mask return latent.to(sample.dtype) def my_forward( self, steps=[], layers=[0, 1, 2, 3], flows=None, occs=None, correlation_matrix=[], intra_weight=1e2, iters=20, optimize_temporal=True, saliency=None, ): """ Hacked pipe.unet.forward() copied from https://github.com/huggingface/diffusers/blob/v0.19.3/src/diffusers/models/unet_2d_condition.py#L700 if you are using a new version of diffusers, please copy the source code and modify it accordingly (find [HACK] in the code) * restore and return the decoder features * optimize the decoder features * perform background smoothing """ def forward( sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DConditionOutput, Tuple]: r""" The [`UNet2DConditionModel`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. encoder_attention_mask (`torch.Tensor`): A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. added_cond_kwargs: (`dict`, *optional*): A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that are passed along to the UNet blocks. Returns: [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) # `Timesteps` does not contain any weights and will always return f32 tensors # there might be better ways to encapsulate this. class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_image": # Kandinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) elif self.config.addition_embed_type == "text_time": # SDXL - style if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) elif self.config.addition_embed_type == "image": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") aug_emb = self.add_embedding(image_embs) elif self.config.addition_embed_type == "image_hint": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") hint = added_cond_kwargs.get("hint") aug_emb, hint = self.add_embedding(image_embs, hint) sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": # Kadinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(image_embeds) # 2. pre-process sample = self.conv_in(sample) # 3. down is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D additional_residuals = {} if is_adapter and len(down_block_additional_residuals) > 0: additional_residuals["additional_residuals"] = down_block_additional_residuals.pop(0) sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, **additional_residuals, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) down_block_res_samples += res_samples if is_controlnet: new_down_block_res_samples = () for down_block_res_sample, down_block_additional_residual in zip( down_block_res_samples, down_block_additional_residuals ): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples # 4. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) if is_controlnet: sample = sample + mid_block_additional_residual # 5. up """ [HACK] restore the decoder features in up_samples """ up_samples = () # down_samples = () for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] """ [HACK] restore the decoder features in up_samples [HACK] optimize the decoder features [HACK] perform background smoothing """ if i in layers: up_samples += (sample,) if timestep in steps and i in layers: sample = optimize_feature( sample, flows, occs, correlation_matrix, intra_weight, iters, optimize_temporal=optimize_temporal ) if saliency is not None: sample = warp_tensor(sample, flows, occs, saliency, 2) # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) """ [HACK] return the output feature as well as the decoder features """ if not return_dict: return (sample,) + up_samples return UNet2DConditionOutput(sample=sample) return forward @torch.no_grad() def get_single_mapping_ind(bwd_flow, bwd_occ, imgs, scale=1.0): """ FLATTEN: Optical fLow-guided attention (Temoporal-guided attention) Find the correspondence between every pixels in a pair of frames [input] bwd_flow: 1*2*H*W bwd_occ: 1*H*W i.e., f2 = warp(f1, bwd_flow) * bwd_occ imgs: 2*3*H*W i.e., [f1,f2] [output] mapping_ind: pixel index correspondence unlinkedmask: indicate whether a pixel has no correspondence i.e., f2 = f1[mapping_ind] * unlinkedmask """ flows = F.interpolate(bwd_flow, scale_factor=1.0 / scale, mode="bilinear")[0][[1, 0]] / scale # 2*H*W _, H, W = flows.shape masks = torch.logical_not(F.interpolate(bwd_occ[None], scale_factor=1.0 / scale, mode="bilinear") > 0.5)[ 0 ] # 1*H*W frames = F.interpolate(imgs, scale_factor=1.0 / scale, mode="bilinear").view(2, 3, -1) # 2*3*HW grid = torch.stack(torch.meshgrid([torch.arange(H), torch.arange(W)]), dim=0).to(flows.device) # 2*H*W warp_grid = torch.round(grid + flows) mask = torch.logical_and( torch.logical_and( torch.logical_and(torch.logical_and(warp_grid[0] >= 0, warp_grid[0] < H), warp_grid[1] >= 0), warp_grid[1] < W, ), masks[0], ).view(-1) # HW warp_grid = warp_grid.view(2, -1) # 2*HW warp_ind = (warp_grid[0] * W + warp_grid[1]).to(torch.long) # HW mapping_ind = torch.zeros_like(warp_ind) - 1 # HW for f0ind, f1ind in enumerate(warp_ind): if mask[f0ind]: if mapping_ind[f1ind] == -1: mapping_ind[f1ind] = f0ind else: targetv = frames[0, :, f1ind] pref0ind = mapping_ind[f1ind] prev = frames[1, :, pref0ind] v = frames[1, :, f0ind] if ((prev - targetv) ** 2).mean() > ((v - targetv) ** 2).mean(): mask[pref0ind] = False mapping_ind[f1ind] = f0ind else: mask[f0ind] = False unusedind = torch.arange(len(mask)).to(mask.device)[~mask] unlinkedmask = mapping_ind == -1 mapping_ind[unlinkedmask] = unusedind return mapping_ind, unlinkedmask @torch.no_grad() def get_mapping_ind(bwd_flows, bwd_occs, imgs, scale=1.0): """ FLATTEN: Optical fLow-guided attention (Temoporal-guided attention) Find pixel correspondence between every consecutive frames in a batch [input] bwd_flow: (N-1)*2*H*W bwd_occ: (N-1)*H*W imgs: N*3*H*W [output] fwd_mappings: N*1*HW bwd_mappings: N*1*HW flattn_mask: HW*1*N*N i.e., imgs[i,:,fwd_mappings[i]] corresponds to imgs[0] i.e., imgs[i,:,fwd_mappings[i]][:,bwd_mappings[i]] restore the original imgs[i] """ N, H, W = imgs.shape[0], int(imgs.shape[2] // scale), int(imgs.shape[3] // scale) iterattn_mask = torch.ones(H * W, N, N, dtype=torch.bool).to(imgs.device) for i in range(len(imgs) - 1): one_mask = torch.ones(N, N, dtype=torch.bool).to(imgs.device) one_mask[: i + 1, i + 1 :] = False one_mask[i + 1 :, : i + 1] = False mapping_ind, unlinkedmask = get_single_mapping_ind( bwd_flows[i : i + 1], bwd_occs[i : i + 1], imgs[i : i + 2], scale ) if i == 0: fwd_mapping = [torch.arange(len(mapping_ind)).to(mapping_ind.device)] bwd_mapping = [torch.arange(len(mapping_ind)).to(mapping_ind.device)] iterattn_mask[unlinkedmask[fwd_mapping[-1]]] = torch.logical_and( iterattn_mask[unlinkedmask[fwd_mapping[-1]]], one_mask ) fwd_mapping += [mapping_ind[fwd_mapping[-1]]] bwd_mapping += [torch.sort(fwd_mapping[-1])[1]] fwd_mappings = torch.stack(fwd_mapping, dim=0).unsqueeze(1) bwd_mappings = torch.stack(bwd_mapping, dim=0).unsqueeze(1) return fwd_mappings, bwd_mappings, iterattn_mask.unsqueeze(1) def apply_FRESCO_opt( pipe, steps=[], layers=[0, 1, 2, 3], flows=None, occs=None, correlation_matrix=[], intra_weight=1e2, iters=20, optimize_temporal=True, saliency=None, ): """ Apply FRESCO-based optimization to a StableDiffusionPipeline """ pipe.unet.forward = my_forward( pipe.unet, steps, layers, flows, occs, correlation_matrix, intra_weight, iters, optimize_temporal, saliency ) @torch.no_grad() def get_intraframe_paras(pipe, imgs, frescoProc, prompt_embeds, do_classifier_free_guidance=True, generator=None): """ Get parameters for spatial-guided attention and optimization * perform one step denoising * collect attention feature, stored in frescoProc.controller.stored_attn['decoder_attn'] * compute the gram matrix of the normalized feature for spatial consistency loss """ noise_scheduler = pipe.scheduler timestep = noise_scheduler.timesteps[-1] device = pipe._execution_device B, C, H, W = imgs.shape frescoProc.controller.disable_controller() apply_FRESCO_opt(pipe) frescoProc.controller.clear_store() frescoProc.controller.enable_store() latents = pipe.prepare_latents( imgs.to(pipe.unet.dtype), timestep, B, 1, prompt_embeds.dtype, device, generator=generator, repeat_noise=False ) latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents model_output = pipe.unet( latent_model_input, timestep, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=None, return_dict=False, ) frescoProc.controller.disable_store() # gram matrix of the normalized feature for spatial consistency loss correlation_matrix = [] for tmp in model_output[1:]: latent_vector = rearrange(tmp, "b c h w -> b (h w) c") latent_vector = latent_vector / ((latent_vector**2).sum(dim=2, keepdims=True) ** 0.5) attention_probs = torch.bmm(latent_vector, latent_vector.transpose(-1, -2)) correlation_matrix += [attention_probs.detach().clone().to(torch.float32)] del attention_probs, latent_vector, tmp del model_output clear_cache() return correlation_matrix @torch.no_grad() def get_flow_and_interframe_paras(flow_model, imgs): """ Get parameters for temporal-guided attention and optimization * predict optical flow and occlusion mask * compute pixel index correspondence for FLATTEN """ images = torch.stack([torch.from_numpy(img).permute(2, 0, 1).float() for img in imgs], dim=0).cuda() imgs_torch = torch.cat([numpy2tensor(img) for img in imgs], dim=0) reshuffle_list = list(range(1, len(images))) + [0] results_dict = flow_model( images, images[reshuffle_list], attn_splits_list=[2], corr_radius_list=[-1], prop_radius_list=[-1], pred_bidir_flow=True, ) flow_pr = results_dict["flow_preds"][-1] # [2*B, 2, H, W] fwd_flows, bwd_flows = flow_pr.chunk(2) # [B, 2, H, W] fwd_occs, bwd_occs = forward_backward_consistency_check(fwd_flows, bwd_flows) # [B, H, W] warped_image1 = flow_warp(images, bwd_flows) bwd_occs = torch.clamp( bwd_occs + (abs(images[reshuffle_list] - warped_image1).mean(dim=1) > 255 * 0.25).float(), 0, 1 ) warped_image2 = flow_warp(images[reshuffle_list], fwd_flows) fwd_occs = torch.clamp(fwd_occs + (abs(images - warped_image2).mean(dim=1) > 255 * 0.25).float(), 0, 1) attn_mask = [] for scale in [8.0, 16.0, 32.0]: bwd_occs_ = F.interpolate(bwd_occs[:-1].unsqueeze(1), scale_factor=1.0 / scale, mode="bilinear") attn_mask += [ torch.cat((bwd_occs_[0:1].reshape(1, -1) > -1, bwd_occs_.reshape(bwd_occs_.shape[0], -1) > 0.5), dim=0) ] fwd_mappings = [] bwd_mappings = [] interattn_masks = [] for scale in [8.0, 16.0]: fwd_mapping, bwd_mapping, interattn_mask = get_mapping_ind(bwd_flows, bwd_occs, imgs_torch, scale=scale) fwd_mappings += [fwd_mapping] bwd_mappings += [bwd_mapping] interattn_masks += [interattn_mask] interattn_paras = {} interattn_paras["fwd_mappings"] = fwd_mappings interattn_paras["bwd_mappings"] = bwd_mappings interattn_paras["interattn_masks"] = interattn_masks clear_cache() return [fwd_flows, bwd_flows], [fwd_occs, bwd_occs], attn_mask, interattn_paras class AttentionControl: """ Control FRESCO-based attention * enable/diable spatial-guided attention * enable/diable temporal-guided attention * enable/diable cross-frame attention * collect intermediate attention feature (for spatial-guided attention) """ def __init__(self): self.stored_attn = self.get_empty_store() self.store = False self.index = 0 self.attn_mask = None self.interattn_paras = None self.use_interattn = False self.use_cfattn = False self.use_intraattn = False self.intraattn_bias = 0 self.intraattn_scale_factor = 0.2 self.interattn_scale_factor = 0.2 @staticmethod def get_empty_store(): return { "decoder_attn": [], } def clear_store(self): del self.stored_attn torch.cuda.empty_cache() gc.collect() self.stored_attn = self.get_empty_store() self.disable_intraattn() # store attention feature of the input frame for spatial-guided attention def enable_store(self): self.store = True def disable_store(self): self.store = False # spatial-guided attention def enable_intraattn(self): self.index = 0 self.use_intraattn = True self.disable_store() if len(self.stored_attn["decoder_attn"]) == 0: self.use_intraattn = False def disable_intraattn(self): self.index = 0 self.use_intraattn = False self.disable_store() def disable_cfattn(self): self.use_cfattn = False # cross frame attention def enable_cfattn(self, attn_mask=None): if attn_mask: if self.attn_mask: del self.attn_mask torch.cuda.empty_cache() self.attn_mask = attn_mask self.use_cfattn = True else: if self.attn_mask: self.use_cfattn = True else: print("Warning: no valid cross-frame attention parameters available!") self.disable_cfattn() def disable_interattn(self): self.use_interattn = False # temporal-guided attention def enable_interattn(self, interattn_paras=None): if interattn_paras: if self.interattn_paras: del self.interattn_paras torch.cuda.empty_cache() self.interattn_paras = interattn_paras self.use_interattn = True else: if self.interattn_paras: self.use_interattn = True else: print("Warning: no valid temporal-guided attention parameters available!") self.disable_interattn() def disable_controller(self): self.disable_intraattn() self.disable_interattn() self.disable_cfattn() def enable_controller(self, interattn_paras=None, attn_mask=None): self.enable_intraattn() self.enable_interattn(interattn_paras) self.enable_cfattn(attn_mask) def forward(self, context): if self.store: self.stored_attn["decoder_attn"].append(context.detach()) if self.use_intraattn and len(self.stored_attn["decoder_attn"]) > 0: tmp = self.stored_attn["decoder_attn"][self.index] self.index = self.index + 1 if self.index >= len(self.stored_attn["decoder_attn"]): self.index = 0 self.disable_store() return tmp return context def __call__(self, context): context = self.forward(context) return context class FRESCOAttnProcessor2_0: """ Hack self attention to FRESCO-based attention * adding spatial-guided attention * adding temporal-guided attention * adding cross-frame attention Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). Usage frescoProc = FRESCOAttnProcessor2_0(2, attn_mask) attnProc = AttnProcessor2_0() attn_processor_dict = {} for k in pipe.unet.attn_processors.keys(): if k.startswith("up_blocks.2") or k.startswith("up_blocks.3"): attn_processor_dict[k] = frescoProc else: attn_processor_dict[k] = attnProc pipe.unet.set_attn_processor(attn_processor_dict) """ def __init__(self, unet_chunk_size=2, controller=None): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.unet_chunk_size = unet_chunk_size self.controller = controller def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) crossattn = False if encoder_hidden_states is None: encoder_hidden_states = hidden_states if self.controller and self.controller.store: self.controller(hidden_states.detach().clone()) else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) # BC * HW * 8D key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query_raw, key_raw = None, None if self.controller and self.controller.use_interattn and (not crossattn): query_raw, key_raw = query.clone(), key.clone() inner_dim = key.shape[-1] # 8D head_dim = inner_dim // attn.heads # D """for efficient cross-frame attention""" if self.controller and self.controller.use_cfattn and (not crossattn): video_length = key.size()[0] // self.unet_chunk_size former_frame_index = [0] * video_length attn_mask = None if self.controller.attn_mask is not None: for m in self.controller.attn_mask: if m.shape[1] == key.shape[1]: attn_mask = m # BC * HW * 8D --> B * C * HW * 8D key = rearrange(key, "(b f) d c -> b f d c", f=video_length) # B * C * HW * 8D --> B * C * HW * 8D if attn_mask is None: key = key[:, former_frame_index] else: key = repeat(key[:, attn_mask], "b d c -> b f d c", f=video_length) # B * C * HW * 8D --> BC * HW * 8D key = rearrange(key, "b f d c -> (b f) d c").detach() value = rearrange(value, "(b f) d c -> b f d c", f=video_length) if attn_mask is None: value = value[:, former_frame_index] else: value = repeat(value[:, attn_mask], "b d c -> b f d c", f=video_length) value = rearrange(value, "b f d c -> (b f) d c").detach() # BC * HW * 8D --> BC * HW * 8 * D --> BC * 8 * HW * D query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # BC * 8 * HW2 * D key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # BC * 8 * HW2 * D2 value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) """for spatial-guided intra-frame attention""" if self.controller and self.controller.use_intraattn and (not crossattn): ref_hidden_states = self.controller(None) assert ref_hidden_states.shape == encoder_hidden_states.shape query_ = attn.to_q(ref_hidden_states) key_ = attn.to_k(ref_hidden_states) # BC * 8 * HW * D query_ = query_.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_ = key_.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) query = F.scaled_dot_product_attention( query_, key_ * self.controller.intraattn_scale_factor, query, attn_mask=torch.eye(query_.size(-2), key_.size(-2), dtype=query.dtype, device=query.device) * self.controller.intraattn_bias, ).detach() del query_, key_ torch.cuda.empty_cache() # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 # output: BC * 8 * HW * D2 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) """for temporal-guided inter-frame attention (FLATTEN)""" if self.controller and self.controller.use_interattn and (not crossattn): del query, key, value torch.cuda.empty_cache() bwd_mapping = None fwd_mapping = None for i, f in enumerate(self.controller.interattn_paras["fwd_mappings"]): if f.shape[2] == hidden_states.shape[2]: fwd_mapping = f bwd_mapping = self.controller.interattn_paras["bwd_mappings"][i] interattn_mask = self.controller.interattn_paras["interattn_masks"][i] video_length = key_raw.size()[0] // self.unet_chunk_size # BC * HW * 8D --> C * 8BD * HW key = rearrange(key_raw, "(b f) d c -> f (b c) d", f=video_length) query = rearrange(query_raw, "(b f) d c -> f (b c) d", f=video_length) # BC * 8 * HW * D --> C * 8BD * HW # key = rearrange(hidden_states, "(b f) h d c -> f (b h c) d", f=video_length) ######## # query = rearrange(hidden_states, "(b f) h d c -> f (b h c) d", f=video_length) ####### value = rearrange(hidden_states, "(b f) h d c -> f (b h c) d", f=video_length) key = torch.gather(key, 2, fwd_mapping.expand(-1, key.shape[1], -1)) query = torch.gather(query, 2, fwd_mapping.expand(-1, query.shape[1], -1)) value = torch.gather(value, 2, fwd_mapping.expand(-1, value.shape[1], -1)) # C * 8BD * HW --> BHW, C, 8D key = rearrange(key, "f (b c) d -> (b d) f c", b=self.unet_chunk_size) query = rearrange(query, "f (b c) d -> (b d) f c", b=self.unet_chunk_size) value = rearrange(value, "f (b c) d -> (b d) f c", b=self.unet_chunk_size) # BHW * C * 8D --> BHW * C * 8 * D--> BHW * 8 * C * D query = query.view(-1, video_length, attn.heads, head_dim).transpose(1, 2).detach() key = key.view(-1, video_length, attn.heads, head_dim).transpose(1, 2).detach() value = value.view(-1, video_length, attn.heads, head_dim).transpose(1, 2).detach() hidden_states_ = F.scaled_dot_product_attention( query, key * self.controller.interattn_scale_factor, value, # .to(query.dtype)-1.0) * 1e6 - attn_mask=(interattn_mask.repeat(self.unet_chunk_size, 1, 1, 1)), # torch.eye(interattn_mask.shape[2]).to(query.device).to(query.dtype) * 1e4, ) # BHW * 8 * C * D --> C * 8BD * HW hidden_states_ = rearrange(hidden_states_, "(b d) h f c -> f (b h c) d", b=self.unet_chunk_size) hidden_states_ = torch.gather( hidden_states_, 2, bwd_mapping.expand(-1, hidden_states_.shape[1], -1) ).detach() # C * 8BD * HW --> BC * 8 * HW * D hidden_states = rearrange( hidden_states_, "f (b h c) d -> (b f) h d c", b=self.unet_chunk_size, h=attn.heads ) # BC * 8 * HW * D --> BC * HW * 8D hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def apply_FRESCO_attn(pipe): """ Apply FRESCO-guided attention to a StableDiffusionPipeline """ frescoProc = FRESCOAttnProcessor2_0(2, AttentionControl()) attnProc = AttnProcessor2_0() attn_processor_dict = {} for k in pipe.unet.attn_processors.keys(): if k.startswith("up_blocks.2") or k.startswith("up_blocks.3"): attn_processor_dict[k] = frescoProc else: attn_processor_dict[k] = attnProc pipe.unet.set_attn_processor(attn_processor_dict) return frescoProc def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image class FrescoV2VPipeline(StableDiffusionControlNetImg2ImgPipeline): r""" Pipeline for video-to-video translation using Stable Diffusion with FRESCO Algorithm. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): Provides additional conditioning to the `unet` during the denoising process. If you set multiple ControlNets as a list, the outputs from each ControlNet are added together to create one combined additional conditioning. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, image_encoder, requires_safety_checker, ) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.register_to_config(requires_safety_checker=requires_safety_checker) frescoProc = FRESCOAttnProcessor2_0(2, AttentionControl()) attnProc = AttnProcessor2_0() attn_processor_dict = {} for k in self.unet.attn_processors.keys(): if k.startswith("up_blocks.2") or k.startswith("up_blocks.3"): attn_processor_dict[k] = frescoProc else: attn_processor_dict[k] = attnProc self.unet.set_attn_processor(attn_processor_dict) self.frescoProc = frescoProc flow_model = GMFlow( feature_channels=128, num_scales=1, upsample_factor=8, num_head=1, attention_type="swin", ffn_dim_expansion=4, num_transformer_layers=6, ).to(self.device) checkpoint = torch.utils.model_zoo.load_url( "https://huggingface.co/Anonymous-sub/Rerender/resolve/main/models/gmflow_sintel-0c07dcb3.pth", map_location=lambda storage, loc: storage, ) weights = checkpoint["model"] if "model" in checkpoint else checkpoint flow_model.load_state_dict(weights, strict=False) flow_model.eval() self.flow_model = flow_model # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack( [single_negative_image_embeds] * num_images_per_prompt, dim=0 ) if do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: repeat_dims = [1] image_embeds = [] for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) single_negative_image_embeds = single_negative_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) ) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) else: single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) image_embeds.append(single_image_embeds) return image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # `prompt` needs more sophisticated handling when there are multiple # conditionings. if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning( f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" " prompts. The conditionings will be fixed across the prompts." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(image, prompt, prompt_embeds) elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(image) != len(self.controlnet.nets): raise ValueError( f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." ) for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents def prepare_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, repeat_noise, generator=None ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape if repeat_noise: noise = randn_tensor((1, *shape[1:]), generator=generator, device=device, dtype=dtype) one_tuple = (1,) * (len(shape) - 1) noise = noise.repeat(batch_size, *one_tuple) else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, frames: Union[List[np.ndarray], torch.FloatTensor] = None, control_frames: Union[List[np.ndarray], torch.FloatTensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 0.8, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], end_opt_step=15, num_intraattn_steps=1, step_interattn_end=350, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. frames (`List[np.ndarray]` or `torch.FloatTensor`): The input images to be used as the starting point for the image generation process. control_frames (`List[np.ndarray]` or `torch.FloatTensor`): The ControlNet input images condition to provide guidance to the `unet` for generation. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. end_opt_step: The feature optimization is activated from strength * num_inference_step to end_opt_step. num_intraattn_steps: Apply num_interattn_steps steps of spatial-guided attention. step_interattn_end: Apply temporal-guided attention in [step_interattn_end, 1000] steps Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, control_frames[0], callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters batch_size = len(frames) device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) prompt_embeds = prompt_embeds.repeat(batch_size, 1, 1) negative_prompt_embeds = negative_prompt_embeds.repeat(batch_size, 1, 1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 4. Prepare image imgs_np = [] for frame in frames: if isinstance(frame, PIL.Image.Image): imgs_np.append(np.asarray(frame)) else: # np.ndarray imgs_np.append(frame) images_pt = self.image_processor.preprocess(frames).to(dtype=torch.float32) # 5. Prepare controlnet_conditioning_image if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_frames, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_frames: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images else: assert False self.flow_model.to(device) flows, occs, attn_mask, interattn_paras = get_flow_and_interframe_paras(self.flow_model, imgs_np) correlation_matrix = get_intraframe_paras(self, images_pt, self.frescoProc, prompt_embeds, generator) """ Flexible settings for attention: * Turn off FRESCO-guided attention: frescoProc.controller.disable_controller() Then you can turn on one specific attention submodule * Turn on Cross-frame attention: frescoProc.controller.enable_cfattn(attn_mask) * Turn on Spatial-guided attention: frescoProc.controller.enable_intraattn() * Turn on Temporal-guided attention: frescoProc.controller.enable_interattn(interattn_paras) Flexible settings for optimization: * Turn off Spatial-guided optimization: set optimize_temporal = False in apply_FRESCO_opt() * Turn off Temporal-guided optimization: set correlation_matrix = [] in apply_FRESCO_opt() * Turn off FRESCO-guided optimization: disable_FRESCO_opt(pipe) Flexible settings for background smoothing: * Turn off background smoothing: set saliency = None in apply_FRESCO_opt() """ self.frescoProc.controller.enable_controller(interattn_paras=interattn_paras, attn_mask=attn_mask) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps apply_FRESCO_opt( self, steps=timesteps[:end_opt_step], flows=flows, occs=occs, correlation_matrix=correlation_matrix, saliency=None, optimize_temporal=True, ) clear_cache() # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) # 6. Prepare latent variables latents = self.prepare_latents( images_pt, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator=generator, repeat_noise=True, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Add image embeds for IP-Adapter added_cond_kwargs = ( {"image_embeds": image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None ) # 7.2 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if i >= num_intraattn_steps: self.frescoProc.controller.disable_intraattn() if t < step_interattn_end: self.frescoProc.controller.disable_interattn() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/fresco_v2v.py/0
{ "file_path": "diffusers/examples/community/fresco_v2v.py", "repo_id": "diffusers", "token_count": 53403 }
116
## ---------------------------------------------------------- # A SDXL pipeline can take unlimited weighted prompt # # Author: Andrew Zhu # GitHub: https://github.com/xhinker # Medium: https://medium.com/@xhinker ## ----------------------------------------------------------- import inspect import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import DiffusionPipeline, StableDiffusionXLPipeline from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_accelerate_available, is_accelerate_version, is_invisible_watermark_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker def parse_prompt_attention(text): """ Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \\( - literal character '(' \\[ - literal character '[' \\) - literal character ')' \\] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] """ import re re_attention = re.compile( r""" \\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)| \)|]|[^\\()\[\]:]+|: """, re.X, ) re_break = re.compile(r"\s*\bBREAK\b\s*", re.S) res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 def multiply_range(start_position, multiplier): for p in range(start_position, len(res)): res[p][1] *= multiplier for m in re_attention.finditer(text): text = m.group(0) weight = m.group(1) if text.startswith("\\"): res.append([text[1:], 1.0]) elif text == "(": round_brackets.append(len(res)) elif text == "[": square_brackets.append(len(res)) elif weight is not None and len(round_brackets) > 0: multiply_range(round_brackets.pop(), float(weight)) elif text == ")" and len(round_brackets) > 0: multiply_range(round_brackets.pop(), round_bracket_multiplier) elif text == "]" and len(square_brackets) > 0: multiply_range(square_brackets.pop(), square_bracket_multiplier) else: parts = re.split(re_break, text) for i, part in enumerate(parts): if i > 0: res.append(["BREAK", -1]) res.append([part, 1.0]) for pos in round_brackets: multiply_range(pos, round_bracket_multiplier) for pos in square_brackets: multiply_range(pos, square_bracket_multiplier) if len(res) == 0: res = [["", 1.0]] # merge runs of identical weights i = 0 while i + 1 < len(res): if res[i][1] == res[i + 1][1]: res[i][0] += res[i + 1][0] res.pop(i + 1) else: i += 1 return res def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str): """ Get prompt token ids and weights, this function works for both prompt and negative prompt Args: pipe (CLIPTokenizer) A CLIPTokenizer prompt (str) A prompt string with weights Returns: text_tokens (list) A list contains token ids text_weight (list) A list contains the correspondent weight of token ids Example: import torch from transformers import CLIPTokenizer clip_tokenizer = CLIPTokenizer.from_pretrained( "stablediffusionapi/deliberate-v2" , subfolder = "tokenizer" , dtype = torch.float16 ) token_id_list, token_weight_list = get_prompts_tokens_with_weights( clip_tokenizer = clip_tokenizer ,prompt = "a (red:1.5) cat"*70 ) """ texts_and_weights = parse_prompt_attention(prompt) text_tokens, text_weights = [], [] for word, weight in texts_and_weights: # tokenize and discard the starting and the ending token token = clip_tokenizer(word, truncation=False).input_ids[1:-1] # so that tokenize whatever length prompt # the returned token is a 1d list: [320, 1125, 539, 320] # merge the new tokens to the all tokens holder: text_tokens text_tokens = [*text_tokens, *token] # each token chunk will come with one weight, like ['red cat', 2.0] # need to expand weight for each token. chunk_weights = [weight] * len(token) # append the weight back to the weight holder: text_weights text_weights = [*text_weights, *chunk_weights] return text_tokens, text_weights def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False): """ Produce tokens and weights in groups and pad the missing tokens Args: token_ids (list) The token ids from tokenizer weights (list) The weights list from function get_prompts_tokens_with_weights pad_last_block (bool) Control if fill the last token list to 75 tokens with eos Returns: new_token_ids (2d list) new_weights (2d list) Example: token_groups,weight_groups = group_tokens_and_weights( token_ids = token_id_list , weights = token_weight_list ) """ bos, eos = 49406, 49407 # this will be a 2d list new_token_ids = [] new_weights = [] while len(token_ids) >= 75: # get the first 75 tokens head_75_tokens = [token_ids.pop(0) for _ in range(75)] head_75_weights = [weights.pop(0) for _ in range(75)] # extract token ids and weights temp_77_token_ids = [bos] + head_75_tokens + [eos] temp_77_weights = [1.0] + head_75_weights + [1.0] # add 77 token and weights chunk to the holder list new_token_ids.append(temp_77_token_ids) new_weights.append(temp_77_weights) # padding the left if len(token_ids) > 0: padding_len = 75 - len(token_ids) if pad_last_block else 0 temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos] new_token_ids.append(temp_77_token_ids) temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0] new_weights.append(temp_77_weights) return new_token_ids, new_weights def get_weighted_text_embeddings_sdxl( pipe: StableDiffusionXLPipeline, prompt: str = "", prompt_2: str = None, neg_prompt: str = "", neg_prompt_2: str = None, num_images_per_prompt: int = 1, device: Optional[torch.device] = None, clip_skip: Optional[int] = None, lora_scale: Optional[int] = None, ): """ This function can process long prompt with weights, no length limitation for Stable Diffusion XL Args: pipe (StableDiffusionPipeline) prompt (str) prompt_2 (str) neg_prompt (str) neg_prompt_2 (str) num_images_per_prompt (int) device (torch.device) clip_skip (int) Returns: prompt_embeds (torch.Tensor) neg_prompt_embeds (torch.Tensor) """ device = device or pipe._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(pipe, StableDiffusionXLLoraLoaderMixin): pipe._lora_scale = lora_scale # dynamically adjust the LoRA scale if pipe.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale) else: scale_lora_layers(pipe.text_encoder, lora_scale) if pipe.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(pipe.text_encoder_2, lora_scale) else: scale_lora_layers(pipe.text_encoder_2, lora_scale) if prompt_2: prompt = f"{prompt} {prompt_2}" if neg_prompt_2: neg_prompt = f"{neg_prompt} {neg_prompt_2}" prompt_t1 = prompt_t2 = prompt neg_prompt_t1 = neg_prompt_t2 = neg_prompt if isinstance(pipe, TextualInversionLoaderMixin): prompt_t1 = pipe.maybe_convert_prompt(prompt_t1, pipe.tokenizer) neg_prompt_t1 = pipe.maybe_convert_prompt(neg_prompt_t1, pipe.tokenizer) prompt_t2 = pipe.maybe_convert_prompt(prompt_t2, pipe.tokenizer_2) neg_prompt_t2 = pipe.maybe_convert_prompt(neg_prompt_t2, pipe.tokenizer_2) eos = pipe.tokenizer.eos_token_id # tokenizer 1 prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, prompt_t1) neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, neg_prompt_t1) # tokenizer 2 prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, prompt_t2) neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, neg_prompt_t2) # padding the shorter one for prompt set 1 prompt_token_len = len(prompt_tokens) neg_prompt_token_len = len(neg_prompt_tokens) if prompt_token_len > neg_prompt_token_len: # padding the neg_prompt with eos token neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) neg_prompt_weights = neg_prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) else: # padding the prompt prompt_tokens = prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) prompt_weights = prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) # padding the shorter one for token set 2 prompt_token_len_2 = len(prompt_tokens_2) neg_prompt_token_len_2 = len(neg_prompt_tokens_2) if prompt_token_len_2 > neg_prompt_token_len_2: # padding the neg_prompt with eos token neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) else: # padding the prompt prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) prompt_weights_2 = prompt_weights + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) embeds = [] neg_embeds = [] prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(prompt_tokens.copy(), prompt_weights.copy()) neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights( neg_prompt_tokens.copy(), neg_prompt_weights.copy() ) prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights( prompt_tokens_2.copy(), prompt_weights_2.copy() ) neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights( neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy() ) # get prompt embeddings one by one is not working. for i in range(len(prompt_token_groups)): # get positive prompt embeddings with weights token_tensor = torch.tensor([prompt_token_groups[i]], dtype=torch.long, device=device) weight_tensor = torch.tensor(prompt_weight_groups[i], dtype=torch.float16, device=device) token_tensor_2 = torch.tensor([prompt_token_groups_2[i]], dtype=torch.long, device=device) # use first text encoder prompt_embeds_1 = pipe.text_encoder(token_tensor.to(device), output_hidden_states=True) # use second text encoder prompt_embeds_2 = pipe.text_encoder_2(token_tensor_2.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds_2[0] if clip_skip is None: prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2] prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-(clip_skip + 2)] prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-(clip_skip + 2)] prompt_embeds_list = [prompt_embeds_1_hidden_states, prompt_embeds_2_hidden_states] token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0) for j in range(len(weight_tensor)): if weight_tensor[j] != 1.0: token_embedding[j] = ( token_embedding[-1] + (token_embedding[j] - token_embedding[-1]) * weight_tensor[j] ) token_embedding = token_embedding.unsqueeze(0) embeds.append(token_embedding) # get negative prompt embeddings with weights neg_token_tensor = torch.tensor([neg_prompt_token_groups[i]], dtype=torch.long, device=device) neg_token_tensor_2 = torch.tensor([neg_prompt_token_groups_2[i]], dtype=torch.long, device=device) neg_weight_tensor = torch.tensor(neg_prompt_weight_groups[i], dtype=torch.float16, device=device) # use first text encoder neg_prompt_embeds_1 = pipe.text_encoder(neg_token_tensor.to(device), output_hidden_states=True) neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2] # use second text encoder neg_prompt_embeds_2 = pipe.text_encoder_2(neg_token_tensor_2.to(device), output_hidden_states=True) neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2] negative_pooled_prompt_embeds = neg_prompt_embeds_2[0] neg_prompt_embeds_list = [neg_prompt_embeds_1_hidden_states, neg_prompt_embeds_2_hidden_states] neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0) for z in range(len(neg_weight_tensor)): if neg_weight_tensor[z] != 1.0: neg_token_embedding[z] = ( neg_token_embedding[-1] + (neg_token_embedding[z] - neg_token_embedding[-1]) * neg_weight_tensor[z] ) neg_token_embedding = neg_token_embedding.unsqueeze(0) neg_embeds.append(neg_token_embedding) prompt_embeds = torch.cat(embeds, dim=1) negative_prompt_embeds = torch.cat(neg_embeds, dim=1) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view( bs_embed * num_images_per_prompt, -1 ) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view( bs_embed * num_images_per_prompt, -1 ) if pipe.text_encoder is not None: if isinstance(pipe, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(pipe.text_encoder, lora_scale) if pipe.text_encoder_2 is not None: if isinstance(pipe, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(pipe.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # ------------------------------------------------------------------------------------------------------------------------------- # reuse the backbone code from StableDiffusionXLPipeline # ------------------------------------------------------------------------------------------------------------------------------- logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0" , torch_dtype = torch.float16 , use_safetensors = True , variant = "fp16" , custom_pipeline = "lpw_stable_diffusion_xl", ) prompt = "a white cat running on the grass"*20 prompt2 = "play a football"*20 prompt = f"{prompt},{prompt2}" neg_prompt = "blur, low quality" pipe.to("cuda") images = pipe( prompt = prompt , negative_prompt = neg_prompt ).images[0] pipe.to("cpu") torch.cuda.empty_cache() images ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class SDXLLongPromptWeightingPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, feature_extractor: Optional[CLIPImageProcessor] = None, image_encoder: Optional[CLIPVisionModelWithProjection] = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) model_sequence = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) model_sequence.extend([self.unet, self.vae]) hook = None for cpu_offloaded_model in model_sequence: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) # We'll offload the last model manually. self.final_offload_hook = hook # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder( text_input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt, negative_prompt_2] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, height, width, strength, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start def prepare_latents( self, image, mask, width, height, num_channels_latents, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True, latents=None, is_strength_max=True, return_noise=False, return_image_latents=False, ): batch_size *= num_images_per_prompt if image is None: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents elif mask is None: if not isinstance(image, (torch.Tensor, Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents else: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.Tensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str`): The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str`): The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders image (`PipelineImageInput`, *optional*): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`PipelineImageInput`, *optional*): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str`): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str`): The prompt not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, strength, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 3. Encode input prompt lora_scale = ( self._cross_attention_kwargs.get("scale", None) if self._cross_attention_kwargs is not None else None ) negative_prompt = negative_prompt if negative_prompt is not None else "" ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = get_weighted_text_embeddings_sdxl( pipe=self, prompt=prompt, neg_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, lora_scale=lora_scale, ) dtype = prompt_embeds.dtype if isinstance(image, Image.Image): image = self.image_processor.preprocess(image, height=height, width=width) if image is not None: image = image.to(device=self.device, dtype=dtype) if isinstance(mask_image, Image.Image): mask = self.mask_processor.preprocess(mask_image, height=height, width=width) else: mask = mask_image if mask_image is not None: mask = mask.to(device=self.device, dtype=dtype) if masked_image_latents is not None: masked_image = masked_image_latents elif image.shape[1] == 4: # if image is in latent space, we can't mask it masked_image = None else: masked_image = image * (mask < 0.5) else: mask = None # 4. Prepare timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) if image is not None: timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 add_noise = True if self.denoising_start is None else False # 5. Prepare latent variables num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 latents = self.prepare_latents( image=image, mask=mask, width=width, height=height, num_channels_latents=num_channels_unet, timestep=latent_timestep, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, dtype=prompt_embeds.dtype, device=device, generator=generator, add_noise=add_noise, latents=latents, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents, ) if mask is not None: if return_image_latents: latents, noise, image_latents = latents else: latents, noise = latents # 5.1 Prepare mask latent variables if mask is not None: mask, masked_image_latents = self.prepare_mask_latents( mask=mask, masked_image=masked_image, batch_size=batch_size * num_images_per_prompt, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, do_classifier_free_guidance=self.do_classifier_free_guidance, ) # Check that sizes of mask, masked image and latents match if num_channels_unet == 9: # default case for runwayml/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) elif num_channels_unet != 4: raise ValueError( f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else {} height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7.1 Apply denoising_end if ( self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and self.denoising_start >= self.denoising_end ): raise ValueError( f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + f" {self.denoising_end} when using type float." ) elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 8. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) # 9. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if mask is not None and num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # predict the noise residual added_cond_kwargs.update({"text_embeds": add_text_embeds, "time_ids": add_time_ids}) noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if mask is not None and num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) def text2img( self, prompt: str = None, prompt_2: Optional[str] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for text-to-image. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, height=height, width=width, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) def img2img( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for image-to-image. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, image=image, height=height, width=width, strength=strength, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) def inpaint( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.Tensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for inpainting. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, image=image, mask_image=mask_image, masked_image_latents=masked_image_latents, height=height, width=width, strength=strength, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) # Override to properly handle the loading and unloading of the additional text encoder. def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): # We could have accessed the unet config from `lora_state_dict()` too. We pass # it here explicitly to be able to tell that it's coming from an SDXL # pipeline. state_dict, network_alphas = self.lora_state_dict( pretrained_model_name_or_path_or_dict, unet_config=self.unet.config, **kwargs, ) self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet) text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} if len(text_encoder_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix="text_encoder", lora_scale=self.lora_scale, ) text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} if len(text_encoder_2_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder_2, prefix="text_encoder_2", lora_scale=self.lora_scale, ) @classmethod def save_lora_weights( cls, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = False, ): state_dict = {} def pack_weights(layers, prefix): layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} return layers_state_dict state_dict.update(pack_weights(unet_lora_layers, "unet")) if text_encoder_lora_layers and text_encoder_2_lora_layers: state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder")) state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, ) def _remove_text_encoder_monkey_patch(self): self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder) self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
diffusers/examples/community/lpw_stable_diffusion_xl.py/0
{ "file_path": "diffusers/examples/community/lpw_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 48001 }
117
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Based on [Style Aligned Image Generation via Shared Attention](https://arxiv.org/abs/2312.02133). # Authors: Amir Hertz, Andrey Voynov, Shlomi Fruchter, Daniel Cohen-Or # Project Page: https://style-aligned-gen.github.io/ # Code: https://github.com/google/style-aligned # # Adapted to Diffusers by [Aryan V S](https://github.com/a-r-r-o-w/). import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import ( Attention, AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from typing import List >>> import torch >>> from diffusers.pipelines.pipeline_utils import DiffusionPipeline >>> from PIL import Image >>> model_id = "a-r-r-o-w/dreamshaper-xl-turbo" >>> pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", custom_pipeline="pipeline_sdxl_style_aligned") >>> pipe = pipe.to("cuda") # Enable memory saving techniques >>> pipe.enable_vae_slicing() >>> pipe.enable_vae_tiling() >>> prompt = [ ... "a toy train. macro photo. 3d game asset", ... "a toy airplane. macro photo. 3d game asset", ... "a toy bicycle. macro photo. 3d game asset", ... "a toy car. macro photo. 3d game asset", ... ] >>> negative_prompt = "low quality, worst quality, " >>> # Enable StyleAligned >>> pipe.enable_style_aligned( ... share_group_norm=False, ... share_layer_norm=False, ... share_attention=True, ... adain_queries=True, ... adain_keys=True, ... adain_values=False, ... full_attention_share=False, ... shared_score_scale=1.0, ... shared_score_shift=0.0, ... only_self_level=0.0, >>> ) >>> # Run inference >>> images = pipe( ... prompt=prompt, ... negative_prompt=negative_prompt, ... guidance_scale=2, ... height=1024, ... width=1024, ... num_inference_steps=10, ... generator=torch.Generator().manual_seed(42), >>> ).images >>> # Disable StyleAligned if you do not wish to use it anymore >>> pipe.disable_style_aligned() ``` """ def expand_first(feat: torch.Tensor, scale: float = 1.0) -> torch.Tensor: b = feat.shape[0] feat_style = torch.stack((feat[0], feat[b // 2])).unsqueeze(1) if scale == 1: feat_style = feat_style.expand(2, b // 2, *feat.shape[1:]) else: feat_style = feat_style.repeat(1, b // 2, 1, 1, 1) feat_style = torch.cat([feat_style[:, :1], scale * feat_style[:, 1:]], dim=1) return feat_style.reshape(*feat.shape) def concat_first(feat: torch.Tensor, dim: int = 2, scale: float = 1.0) -> torch.Tensor: feat_style = expand_first(feat, scale=scale) return torch.cat((feat, feat_style), dim=dim) def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> Tuple[torch.Tensor, torch.Tensor]: feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt() feat_mean = feat.mean(dim=-2, keepdims=True) return feat_mean, feat_std def adain(feat: torch.Tensor) -> torch.Tensor: feat_mean, feat_std = calc_mean_std(feat) feat_style_mean = expand_first(feat_mean) feat_style_std = expand_first(feat_std) feat = (feat - feat_mean) / feat_std feat = feat * feat_style_std + feat_style_mean return feat def get_switch_vec(total_num_layers, level): if level == 0: return torch.zeros(total_num_layers, dtype=torch.bool) if level == 1: return torch.ones(total_num_layers, dtype=torch.bool) to_flip = level > 0.5 if to_flip: level = 1 - level num_switch = int(level * total_num_layers) vec = torch.arange(total_num_layers) vec = vec % (total_num_layers // num_switch) vec = vec == 0 if to_flip: vec = ~vec return vec class SharedAttentionProcessor(AttnProcessor2_0): def __init__( self, share_attention: bool = True, adain_queries: bool = True, adain_keys: bool = True, adain_values: bool = False, full_attention_share: bool = False, shared_score_scale: float = 1.0, shared_score_shift: float = 0.0, ): r"""Shared Attention Processor as proposed in the StyleAligned paper.""" super().__init__() self.share_attention = share_attention self.adain_queries = adain_queries self.adain_keys = adain_keys self.adain_values = adain_values self.full_attention_share = full_attention_share self.shared_score_scale = shared_score_scale self.shared_score_shift = shared_score_shift def shifted_scaled_dot_product_attention( self, attn: Attention, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor ) -> torch.Tensor: logits = torch.einsum("bhqd,bhkd->bhqk", query, key) * attn.scale logits[:, :, :, query.shape[2] :] += self.shared_score_shift probs = logits.softmax(-1) return torch.einsum("bhqk,bhkd->bhqd", probs, value) def shared_call( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ): residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if self.adain_queries: query = adain(query) if self.adain_keys: key = adain(key) if self.adain_values: value = adain(value) if self.share_attention: key = concat_first(key, -2, scale=self.shared_score_scale) value = concat_first(value, -2) if self.shared_score_shift != 0: hidden_states = self.shifted_scaled_dot_product_attention(attn, query, key, value) else: hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) else: hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ): if self.full_attention_share: b, n, d = hidden_states.shape k = 2 hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d) # hidden_states = einops.rearrange(hidden_states, "(k b) n d -> k (b n) d", k=2) hidden_states = super().__call__( attn, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **kwargs, ) hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d) # hidden_states = einops.rearrange(hidden_states, "k (b n) d -> (k b) n d", n=n) else: hidden_states = self.shared_call(attn, hidden_states, hidden_states, attention_mask, **kwargs) return hidden_states # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class StyleAlignedSDXLPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This pipeline also adds experimental support for [StyleAligned](https://arxiv.org/abs/2312.02133). It can be enabled/disabled using `.enable_style_aligned()` or `.disable_style_aligned()` respectively. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start def prepare_latents( self, image, mask, width, height, num_channels_latents, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True, latents=None, is_strength_max=True, return_noise=False, return_image_latents=False, ): batch_size *= num_images_per_prompt if image is None: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents elif mask is None: if not isinstance(image, (torch.Tensor, Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents else: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def _enable_shared_attention_processors( self, share_attention: bool, adain_queries: bool, adain_keys: bool, adain_values: bool, full_attention_share: bool, shared_score_scale: float, shared_score_shift: float, only_self_level: float, ): r"""Helper method to enable usage of Shared Attention Processor.""" attn_procs = {} num_self_layers = len([name for name in self.unet.attn_processors.keys() if "attn1" in name]) only_self_vec = get_switch_vec(num_self_layers, only_self_level) for i, name in enumerate(self.unet.attn_processors.keys()): is_self_attention = "attn1" in name if is_self_attention: if only_self_vec[i // 2]: attn_procs[name] = AttnProcessor2_0() else: attn_procs[name] = SharedAttentionProcessor( share_attention=share_attention, adain_queries=adain_queries, adain_keys=adain_keys, adain_values=adain_values, full_attention_share=full_attention_share, shared_score_scale=shared_score_scale, shared_score_shift=shared_score_shift, ) else: attn_procs[name] = AttnProcessor2_0() self.unet.set_attn_processor(attn_procs) def _disable_shared_attention_processors(self): r""" Helper method to disable usage of the Shared Attention Processor. All processors are reset to the default Attention Processor for pytorch versions above 2.0. """ attn_procs = {} for i, name in enumerate(self.unet.attn_processors.keys()): attn_procs[name] = AttnProcessor2_0() self.unet.set_attn_processor(attn_procs) def _register_shared_norm(self, share_group_norm: bool = True, share_layer_norm: bool = True): r"""Helper method to register shared group/layer normalization layers.""" def register_norm_forward(norm_layer: Union[nn.GroupNorm, nn.LayerNorm]) -> Union[nn.GroupNorm, nn.LayerNorm]: if not hasattr(norm_layer, "orig_forward"): setattr(norm_layer, "orig_forward", norm_layer.forward) orig_forward = norm_layer.orig_forward def forward_(hidden_states: torch.Tensor) -> torch.Tensor: n = hidden_states.shape[-2] hidden_states = concat_first(hidden_states, dim=-2) hidden_states = orig_forward(hidden_states) return hidden_states[..., :n, :] norm_layer.forward = forward_ return norm_layer def get_norm_layers(pipeline_, norm_layers_: Dict[str, List[Union[nn.GroupNorm, nn.LayerNorm]]]): if isinstance(pipeline_, nn.LayerNorm) and share_layer_norm: norm_layers_["layer"].append(pipeline_) if isinstance(pipeline_, nn.GroupNorm) and share_group_norm: norm_layers_["group"].append(pipeline_) else: for layer in pipeline_.children(): get_norm_layers(layer, norm_layers_) norm_layers = {"group": [], "layer": []} get_norm_layers(self.unet, norm_layers) norm_layers_list = [] for key in ["group", "layer"]: for layer in norm_layers[key]: norm_layers_list.append(register_norm_forward(layer)) return norm_layers_list @property def style_aligned_enabled(self): r"""Returns whether StyleAligned has been enabled in the pipeline or not.""" return hasattr(self, "_style_aligned_norm_layers") and self._style_aligned_norm_layers is not None def enable_style_aligned( self, share_group_norm: bool = True, share_layer_norm: bool = True, share_attention: bool = True, adain_queries: bool = True, adain_keys: bool = True, adain_values: bool = False, full_attention_share: bool = False, shared_score_scale: float = 1.0, shared_score_shift: float = 0.0, only_self_level: float = 0.0, ): r""" Enables the StyleAligned mechanism as in https://arxiv.org/abs/2312.02133. Args: share_group_norm (`bool`, defaults to `True`): Whether or not to use shared group normalization layers. share_layer_norm (`bool`, defaults to `True`): Whether or not to use shared layer normalization layers. share_attention (`bool`, defaults to `True`): Whether or not to use attention sharing between batch images. adain_queries (`bool`, defaults to `True`): Whether or not to apply the AdaIn operation on attention queries. adain_keys (`bool`, defaults to `True`): Whether or not to apply the AdaIn operation on attention keys. adain_values (`bool`, defaults to `False`): Whether or not to apply the AdaIn operation on attention values. full_attention_share (`bool`, defaults to `False`): Whether or not to use full attention sharing between all images in a batch. Can lead to content leakage within each batch and some loss in diversity. shared_score_scale (`float`, defaults to `1.0`): Scale for shared attention. """ self._style_aligned_norm_layers = self._register_shared_norm(share_group_norm, share_layer_norm) self._enable_shared_attention_processors( share_attention=share_attention, adain_queries=adain_queries, adain_keys=adain_keys, adain_values=adain_values, full_attention_share=full_attention_share, shared_score_scale=shared_score_scale, shared_score_shift=shared_score_shift, only_self_level=only_self_level, ) def disable_style_aligned(self): r"""Disables the StyleAligned mechanism if it had been previously enabled.""" if self.style_aligned_enabled: for layer in self._style_aligned_norm_layers: layer.forward = layer.orig_forward self._style_aligned_norm_layers = None self._disable_shared_attention_processors() # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.Tensor] = None, strength: float = 0.3, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, prompt_2=prompt_2, height=height, width=width, callback_steps=callback_steps, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 4. Preprocess image and mask_image if image is not None: image = self.image_processor.preprocess(image, height=height, width=width) image = image.to(device=self.device, dtype=prompt_embeds.dtype) if mask_image is not None: mask = self.mask_processor.preprocess(mask_image, height=height, width=width) mask = mask.to(device=self.device, dtype=prompt_embeds.dtype) if masked_image_latents is not None: masked_image = masked_image_latents elif image.shape[1] == 4: # if image is in latent space, we can't mask it masked_image = None else: masked_image = image * (mask < 0.5) else: mask = None # 4. Prepare timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) if image is not None: timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 add_noise = True if self.denoising_start is None else False # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 latents = self.prepare_latents( image=image, mask=mask, width=width, height=height, num_channels_latents=num_channels_latents, timestep=latent_timestep, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, dtype=prompt_embeds.dtype, device=device, generator=generator, add_noise=add_noise, latents=latents, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents, ) if mask is not None: if return_image_latents: latents, noise, image_latents = latents else: latents, noise = latents mask, masked_image_latents = self.prepare_mask_latents( mask=mask, masked_image=masked_image, batch_size=batch_size * num_images_per_prompt, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, do_classifier_free_guidance=self.do_classifier_free_guidance, ) # Check that sizes of mask, masked image and latents match if num_channels_unet == 9: # default case for runwayml/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) elif num_channels_unet != 4: raise ValueError( f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) image_embeds = image_embeds.to(device) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 8.1 Apply denoising_end if ( self.denoising_end is not None and isinstance(self.denoising_end, float) and self.denoising_end > 0 and self.denoising_end < 1 ): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 9. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None: added_cond_kwargs["image_embeds"] = image_embeds noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if mask is not None and num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/pipeline_sdxl_style_aligned.py/0
{ "file_path": "diffusers/examples/community/pipeline_sdxl_style_aligned.py", "repo_id": "diffusers", "token_count": 42028 }
118
# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UFOGen class UFOGenSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.Tensor`): the betas that the scheduler is being initialized with. Returns: `torch.Tensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class UFOGenScheduler(SchedulerMixin, ConfigMixin): """ `UFOGenScheduler` implements multistep and onestep sampling for a UFOGen model, introduced in [UFOGen: You Forward Once Large Scale Text-to-Image Generation via Diffusion GANs](https://arxiv.org/abs/2311.09257) by Yanwu Xu, Yang Zhao, Zhisheng Xiao, and Tingbo Hou. UFOGen is a varianet of the denoising diffusion GAN (DDGAN) model designed for one-step sampling. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. clip_sample (`bool`, defaults to `True`): Clip the predicted sample for numerical stability. clip_sample_range (`float`, defaults to 1.0): The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. set_alpha_to_one (`bool`, defaults to `True`): Each diffusion step uses the alphas product value at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, otherwise it uses the alpha value at step 0. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True`. timestep_spacing (`str`, defaults to `"leading"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). denoising_step_size (`int`, defaults to 250): The denoising step size parameter from the UFOGen paper. The number of steps used for training is roughly `math.ceil(num_train_timesteps / denoising_step_size)`. """ order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, clip_sample: bool = True, set_alpha_to_one: bool = True, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, clip_sample_range: float = 1.0, sample_max_value: float = 1.0, timestep_spacing: str = "leading", steps_offset: int = 0, rescale_betas_zero_snr: bool = False, denoising_step_size: int = 250, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) elif beta_schedule == "sigmoid": # GeoDiff sigmoid schedule betas = torch.linspace(-6, 6, num_train_timesteps) self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") # Rescale for zero SNR if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # For the final step, there is no previous alphas_cumprod because we are already at 0 # `set_alpha_to_one` decides whether we set this parameter simply to one or # whether we use the final alpha of the "non-previous" one. self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.custom_timesteps = False self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ return sample def set_timesteps( self, num_inference_steps: Optional[int] = None, device: Union[str, torch.device] = None, timesteps: Optional[List[int]] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, `num_inference_steps` must be `None`. """ if num_inference_steps is not None and timesteps is not None: raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") if timesteps is not None: for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError("`custom_timesteps` must be in descending order.") if timesteps[0] >= self.config.num_train_timesteps: raise ValueError( f"`timesteps` must start before `self.config.train_timesteps`:" f" {self.config.num_train_timesteps}." ) timesteps = np.array(timesteps, dtype=np.int64) self.custom_timesteps = True else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps self.custom_timesteps = False # TODO: For now, handle special case when num_inference_steps == 1 separately if num_inference_steps == 1: # Set the timestep schedule to num_train_timesteps - 1 rather than 0 # (that is, the one-step timestep schedule is always trailing rather than leading or linspace) timesteps = np.array([self.config.num_train_timesteps - 1], dtype=np.int64) else: # TODO: For now, retain the DDPM timestep spacing logic # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) .round()[::-1] .copy() .astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) self.timesteps = torch.from_numpy(timesteps).to(device) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[UFOGenSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_ddpm.UFOGenSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # 0. Resolve timesteps t = timestep prev_t = self.previous_timestep(t) # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t # beta_prod_t_prev = 1 - alpha_prod_t_prev # current_alpha_t = alpha_prod_t / alpha_prod_t_prev # current_beta_t = 1 - current_alpha_t # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.config.prediction_type == "sample": pred_original_sample = model_output elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" " `v_prediction` for UFOGenScheduler." ) # 3. Clip or threshold "predicted x_0" if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 4. Single-step or multi-step sampling # Noise is not used on the final timestep of the timestep schedule. # This also means that noise is not used for one-step sampling. if t != self.timesteps[-1]: # TODO: is this correct? # Sample prev sample x_{t - 1} ~ q(x_{t - 1} | x_0 = G(x_t, t)) device = model_output.device noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype) sqrt_alpha_prod_t_prev = alpha_prod_t_prev**0.5 sqrt_one_minus_alpha_prod_t_prev = (1 - alpha_prod_t_prev) ** 0.5 pred_prev_sample = sqrt_alpha_prod_t_prev * pred_original_sample + sqrt_one_minus_alpha_prod_t_prev * noise else: # Simply return the pred_original_sample. If `prediction_type == "sample"`, this is equivalent to returning # the output of the GAN generator U-Net on the initial noisy latents x_T ~ N(0, I). pred_prev_sample = pred_original_sample if not return_dict: return (pred_prev_sample,) return UFOGenSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor, ) -> torch.Tensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: # Make sure alphas_cumprod and timestep have same device and dtype as sample alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep def previous_timestep(self, timestep): if self.custom_timesteps: index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] if index == self.timesteps.shape[0] - 1: prev_t = torch.tensor(-1) else: prev_t = self.timesteps[index + 1] else: num_inference_steps = ( self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps ) prev_t = timestep - self.config.num_train_timesteps // num_inference_steps return prev_t
diffusers/examples/community/scheduling_ufogen.py/0
{ "file_path": "diffusers/examples/community/scheduling_ufogen.py", "repo_id": "diffusers", "token_count": 10811 }
119
# DreamBooth training example [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. The `train_dreambooth.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Dog toy example Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example. Let's first download it locally: ```python from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` And launch the training using: **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 \ --push_to_hub ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Training on a 16GB GPU: With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. To install `bitsandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Training on a 12GB GPU: It is possible to run dreambooth on a 12GB GPU by using the following optimizations: - [gradient checkpointing and the 8-bit optimizer](#training-on-a-16gb-gpu) - [xformers](#training-with-xformers) - [setting grads to none](#set-grads-to-none) ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Training on a 8 GB GPU: By using [DeepSpeed](https://www.deepspeed.ai/) it's possible to offload some tensors from VRAM to either CPU or NVME allowing to train with less VRAM. DeepSpeed needs to be enabled with `accelerate config`. During configuration answer yes to "Do you want to use DeepSpeed?". With DeepSpeed stage 2, fp16 mixed precision and offloading both parameters and optimizer state to cpu it's possible to train on under 8 GB VRAM with a drawback of requiring significantly more RAM (about 25 GB). See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. Changing the default Adam optimizer to DeepSpeed's special version of Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but enabling it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer does not seem to be compatible with DeepSpeed at the moment. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch --mixed_precision="fp16" train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --sample_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Fine-tune text encoder with the UNet. The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam \ --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Using DreamBooth for pipelines other than Stable Diffusion The [AltDiffusion pipeline](https://huggingface.co/docs/diffusers/api/pipelines/alt_diffusion) also supports dreambooth fine-tuning. The process is the same as above, all you need to do is replace the `MODEL_NAME` like this: ``` export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion-m9" or export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion" ``` ### Inference Once you have trained a model using the above command, you can run inference simply using the `StableDiffusionPipeline`. Make sure to include the `identifier` (e.g. sks in above example) in your prompt. ```python from diffusers import StableDiffusionPipeline import torch model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ``` ### Inference from a training checkpoint You can also perform inference from one of the checkpoints saved during the training process, if you used the `--checkpointing_steps` argument. Please, refer to [the documentation](https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint) to see how to do it. ## Training with Low-Rank Adaptation of Large Language Models (LoRA) Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen* In a nutshell, LoRA allows to adapt pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114) - Rank-decomposition matrices have significantly fewer parameters than the original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted towards new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. ### Training Let's get started with a simple example. We will re-use the dog example of the [previous section](#dog-toy-example). First, you need to set-up your dreambooth training example as is explained in the [installation section](#Installing-the-dependencies). Next, let's download the dog dataset. Download images from [here](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ) and save them in a directory. Make sure to set `INSTANCE_DIR` to the name of your directory further below. This will be our training data. Now, you can launch the training. Here we will use [Stable Diffusion 1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5). **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** **___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [wandb](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training and pass `--report_to="wandb"` to automatically log images.___** ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export INSTANCE_DIR="dog" export OUTPUT_DIR="path-to-save-model" ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag. ```bash huggingface-cli login ``` Now we can start training! ```bash accelerate launch train_dreambooth_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --checkpointing_steps=100 \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=50 \ --seed="0" \ --push_to_hub ``` **___Note: When using LoRA we can use a much higher learning rate compared to vanilla dreambooth. Here we use *1e-4* instead of the usual *2e-6*.___** The final LoRA embedding weights have been uploaded to [patrickvonplaten/lora_dreambooth_dog_example](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example). **___Note: [The final weights](https://huggingface.co/patrickvonplaten/lora/blob/main/pytorch_attn_procs.bin) are only 3 MB in size which is orders of magnitudes smaller than the original model.** The training results are summarized [here](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5). You can use the `Step` slider to see how the model learned the features of our subject while the model trained. Optionally, we can also train additional LoRA layers for the text encoder. Specify the `--train_text_encoder` argument above for that. If you're interested to know more about how we enable this support, check out this [PR](https://github.com/huggingface/diffusers/pull/2918). With the default hyperparameters from the above, the training seems to go in a positive direction. Check out [this panel](https://wandb.ai/sayakpaul/dreambooth-lora/reports/test-23-04-17-17-00-13---Vmlldzo0MDkwNjMy). The trained LoRA layers are available [here](https://huggingface.co/sayakpaul/dreambooth). ### Inference After training, LoRA weights can be loaded very easily into the original pipeline. First, you need to load the original pipeline: ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("base-model-name").to("cuda") ``` Next, we can load the adapter layers into the pipeline with the [`load_lora_weights` function](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters#lora). ```python pipe.load_lora_weights("path-to-the-lora-checkpoint") ``` Finally, we can run the model in inference. ```python image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] ``` If you are loading the LoRA parameters from the Hub and if the Hub repository has a `base_model` tag (such as [this](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example/blob/main/README.md?code=true#L4)), then you can do: ```py from huggingface_hub.repocard import RepoCard lora_model_id = "patrickvonplaten/lora_dreambooth_dog_example" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) ... ``` If you used `--train_text_encoder` during training, then use `pipe.load_lora_weights()` to load the LoRA weights. For example: ```python from huggingface_hub.repocard import RepoCard from diffusers import StableDiffusionPipeline import torch lora_model_id = "sayakpaul/dreambooth-text-encoder-test" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.load_lora_weights(lora_model_id) image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] ``` Note that the use of [`StableDiffusionLoraLoaderMixin.load_lora_weights`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights) is preferred to [`UNet2DConditionLoadersMixin.load_attn_procs`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs) for loading LoRA parameters. This is because `StableDiffusionLoraLoaderMixin.load_lora_weights` can handle the following situations: * LoRA parameters that don't have separate identifiers for the UNet and the text encoder (such as [`"patrickvonplaten/lora_dreambooth_dog_example"`](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example)). So, you can just do: ```py pipe.load_lora_weights(lora_model_path) ``` * LoRA parameters that have separate identifiers for the UNet and the text encoder such as: [`"sayakpaul/dreambooth"`](https://huggingface.co/sayakpaul/dreambooth). ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. ____Note: The flax example don't yet support features like gradient checkpoint, gradient accumulation etc, so to use flax for faster training we will need >30GB cards.___ Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -U -r requirements_flax.txt ``` ### Training without prior preservation loss ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="dog" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --max_train_steps=400 ``` ### Training with prior preservation loss ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Fine-tune text encoder with the UNet. ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=2e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation. You can also use Dreambooth to train the specialized in-painting model. See [the script in the research folder for details](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/dreambooth_inpaint). ### Set grads to none To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument. More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html ### Experimental results You can refer to [this blog post](https://huggingface.co/blog/dreambooth) that discusses some of DreamBooth experiments in detail. Specifically, it recommends a set of DreamBooth-specific tips and tricks that we have found to work well for a variety of subjects. ## IF You can use the lora and full dreambooth scripts to train the text to image [IF model](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0) and the stage II upscaler [IF model](https://huggingface.co/DeepFloyd/IF-II-L-v1.0). Note that IF has a predicted variance, and our finetuning scripts only train the models predicted error, so for finetuned IF models we switch to a fixed variance schedule. The full finetuning scripts will update the scheduler config for the full saved model. However, when loading saved LoRA weights, you must also update the pipeline's scheduler config. ```py from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0") pipe.load_lora_weights("<lora weights path>") # Update scheduler config to fixed variance schedule pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small") ``` Additionally, a few alternative cli flags are needed for IF. `--resolution=64`: IF is a pixel space diffusion model. In order to operate on un-compressed pixels, the input images are of a much smaller resolution. `--pre_compute_text_embeddings`: IF uses [T5](https://huggingface.co/docs/transformers/model_doc/t5) for its text encoder. In order to save GPU memory, we pre compute all text embeddings and then de-allocate T5. `--tokenizer_max_length=77`: T5 has a longer default text length, but the default IF encoding procedure uses a smaller number. `--text_encoder_use_attention_mask`: T5 passes the attention mask to the text encoder. ### Tips and Tricks We find LoRA to be sufficient for finetuning the stage I model as the low resolution of the model makes representing finegrained detail hard regardless. For common and/or not-visually complex object concepts, you can get away with not-finetuning the upscaler. Just be sure to adjust the prompt passed to the upscaler to remove the new token from the instance prompt. I.e. if your stage I prompt is "a sks dog", use "a dog" for your stage II prompt. For finegrained detail like faces that aren't present in the original training set, we find that full finetuning of the stage II upscaler is better than LoRA finetuning stage II. For finegrained detail like faces, we find that lower learning rates along with larger batch sizes work best. For stage II, we find that lower learning rates are also needed. We found experimentally that the DDPM scheduler with the default larger number of denoising steps to sometimes work better than the DPM Solver scheduler used in the training scripts. ### Stage II additional validation images The stage II validation requires images to upscale, we can download a downsized version of the training set: ```py from huggingface_hub import snapshot_download local_dir = "./dog_downsized" snapshot_download( "diffusers/dog-example-downsized", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` ### IF stage I LoRA Dreambooth This training configuration requires ~28 GB VRAM. ```sh export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_dog_lora" accelerate launch train_dreambooth_lora.py \ --report_to wandb \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a sks dog" \ --resolution=64 \ --train_batch_size=4 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --scale_lr \ --max_train_steps=1200 \ --validation_prompt="a sks dog" \ --validation_epochs=25 \ --checkpointing_steps=100 \ --pre_compute_text_embeddings \ --tokenizer_max_length=77 \ --text_encoder_use_attention_mask ``` ### IF stage II LoRA Dreambooth `--validation_images`: These images are upscaled during validation steps. `--class_labels_conditioning=timesteps`: Pass additional conditioning to the UNet needed for stage II. `--learning_rate=1e-6`: Lower learning rate than stage I. `--resolution=256`: The upscaler expects higher resolution inputs ```sh export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_dog_upscale" export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" python train_dreambooth_lora.py \ --report_to wandb \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a sks dog" \ --resolution=256 \ --train_batch_size=4 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-6 \ --max_train_steps=2000 \ --validation_prompt="a sks dog" \ --validation_epochs=100 \ --checkpointing_steps=500 \ --pre_compute_text_embeddings \ --tokenizer_max_length=77 \ --text_encoder_use_attention_mask \ --validation_images $VALIDATION_IMAGES \ --class_labels_conditioning=timesteps ``` ### IF Stage I Full Dreambooth `--skip_save_text_encoder`: When training the full model, this will skip saving the entire T5 with the finetuned model. You can still load the pipeline with a T5 loaded from the original model. `use_8bit_adam`: Due to the size of the optimizer states, we recommend training the full XL IF model with 8bit adam. `--learning_rate=1e-7`: For full dreambooth, IF requires very low learning rates. With higher learning rates model quality will degrade. Note that it is likely the learning rate can be increased with larger batch sizes. Using 8bit adam and a batch size of 4, the model can be trained in ~48 GB VRAM. `--validation_scheduler`: Set a particular scheduler via a string. We found that it is better to use the DDPMScheduler for validation when training DeepFloyd IF. ```sh export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_if" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=64 \ --train_batch_size=4 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-7 \ --max_train_steps=150 \ --validation_prompt "a photo of sks dog" \ --validation_steps 25 \ --text_encoder_use_attention_mask \ --tokenizer_max_length 77 \ --pre_compute_text_embeddings \ --use_8bit_adam \ --set_grads_to_none \ --skip_save_text_encoder \ --validation_scheduler DDPMScheduler \ --push_to_hub ``` ### IF Stage II Full Dreambooth `--learning_rate=5e-6`: With a smaller effective batch size of 4, we found that we required learning rates as low as 1e-8. `--resolution=256`: The upscaler expects higher resolution inputs `--train_batch_size=2` and `--gradient_accumulation_steps=6`: We found that full training of stage II particularly with faces required large effective batch sizes. ```sh export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_dog_upscale" export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" accelerate launch train_dreambooth.py \ --report_to wandb \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a sks dog" \ --resolution=256 \ --train_batch_size=2 \ --gradient_accumulation_steps=6 \ --learning_rate=5e-6 \ --max_train_steps=2000 \ --validation_prompt="a sks dog" \ --validation_steps=150 \ --checkpointing_steps=500 \ --pre_compute_text_embeddings \ --tokenizer_max_length=77 \ --text_encoder_use_attention_mask \ --validation_images $VALIDATION_IMAGES \ --class_labels_conditioning timesteps \ --validation_scheduler DDPMScheduler\ --push_to_hub ``` ## Stable Diffusion XL We support fine-tuning of the UNet shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) with DreamBooth and LoRA via the `train_dreambooth_lora_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md).
diffusers/examples/dreambooth/README.md/0
{ "file_path": "diffusers/examples/dreambooth/README.md", "repo_id": "diffusers", "token_count": 9826 }
120
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import copy import gc import importlib import itertools import logging import math import os import shutil import warnings from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, model_info, upload_folder from huggingface_hub.utils import insecure_hashlib from packaging import version from PIL import Image from PIL.ImageOps import exif_transpose from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import compute_snr from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.31.0.dev0") logger = get_logger(__name__) def save_model_card( repo_id: str, images: list = None, base_model: str = None, train_text_encoder=False, prompt: str = None, repo_folder: str = None, pipeline: DiffusionPipeline = None, ): img_str = "" if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" model_description = f""" # DreamBooth - {repo_id} This is a dreambooth model derived from {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. \n {img_str} DreamBooth for the text encoder was enabled: {train_text_encoder}. """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=base_model, prompt=prompt, model_description=model_description, inference=True, ) tags = ["text-to-image", "dreambooth", "diffusers-training"] if isinstance(pipeline, StableDiffusionPipeline): tags.extend(["stable-diffusion", "stable-diffusion-diffusers"]) else: tags.extend(["if", "if-diffusers"]) model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation( text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, global_step, prompt_embeds, negative_prompt_embeds, ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) pipeline_args = {} if vae is not None: pipeline_args["vae"] = vae # create pipeline (note: unet and vae are loaded again in float32) pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, **pipeline_args, ) # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it scheduler_args = {} if "variance_type" in pipeline.scheduler.config: variance_type = pipeline.scheduler.config.variance_type if variance_type in ["learned", "learned_range"]: variance_type = "fixed_small" scheduler_args["variance_type"] = variance_type module = importlib.import_module("diffusers") scheduler_class = getattr(module, args.validation_scheduler) pipeline.scheduler = scheduler_class.from_config(pipeline.scheduler.config, **scheduler_args) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.pre_compute_text_embeddings: pipeline_args = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, } else: pipeline_args = {"prompt": args.validation_prompt} # run inference generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] if args.validation_images is None: for _ in range(args.num_validation_images): with torch.autocast("cuda"): image = pipeline(**pipeline_args, num_inference_steps=25, generator=generator).images[0] images.append(image) else: for image in args.validation_images: image = Image.open(image) image = pipeline(**pipeline_args, image=image, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, global_step, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() return images def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation elif model_class == "T5EncoderModel": from transformers import T5EncoderModel return T5EncoderModel else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="dreambooth-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--train_text_encoder", action="store_true", help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" "instructions." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more details" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--offset_noise", action="store_true", default=False, help=( "Fine-tuning against a modified noise" " See: https://www.crosslabs.org//blog/diffusion-with-offset-noise for more information." ), ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--pre_compute_text_embeddings", action="store_true", help="Whether or not to pre-compute text embeddings. If text embeddings are pre-computed, the text encoder will not be kept in memory during training and will leave more GPU memory available for training the rest of the model. This is not compatible with `--train_text_encoder`.", ) parser.add_argument( "--tokenizer_max_length", type=int, default=None, required=False, help="The maximum length of the tokenizer. If not set, will default to the tokenizer's max length.", ) parser.add_argument( "--text_encoder_use_attention_mask", action="store_true", required=False, help="Whether to use attention mask for the text encoder", ) parser.add_argument( "--skip_save_text_encoder", action="store_true", required=False, help="Set to not save text encoder" ) parser.add_argument( "--validation_images", required=False, default=None, nargs="+", help="Optional set of images to use for validation. Used when the target pipeline takes an initial image as input such as when training image variation or superresolution.", ) parser.add_argument( "--class_labels_conditioning", required=False, default=None, help="The optional `class_label` conditioning to pass to the unet, available values are `timesteps`.", ) parser.add_argument( "--validation_scheduler", type=str, default="DPMSolverMultistepScheduler", choices=["DPMSolverMultistepScheduler", "DDPMScheduler"], help="Select which scheduler to use for validation. DDPMScheduler is recommended for DeepFloyd IF.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") if args.train_text_encoder and args.pre_compute_text_embeddings: raise ValueError("`--train_text_encoder` cannot be used with `--pre_compute_text_embeddings`") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, class_num=None, size=512, center_crop=False, encoder_hidden_states=None, class_prompt_encoder_hidden_states=None, tokenizer_max_length=None, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.encoder_hidden_states = encoder_hidden_states self.class_prompt_encoder_hidden_states = class_prompt_encoder_hidden_states self.tokenizer_max_length = tokenizer_max_length self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError(f"Instance {self.instance_data_root} images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) if class_num is not None: self.num_class_images = min(len(self.class_images_path), class_num) else: self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) instance_image = exif_transpose(instance_image) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) if self.encoder_hidden_states is not None: example["instance_prompt_ids"] = self.encoder_hidden_states else: text_inputs = tokenize_prompt( self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length ) example["instance_prompt_ids"] = text_inputs.input_ids example["instance_attention_mask"] = text_inputs.attention_mask if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) class_image = exif_transpose(class_image) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) if self.class_prompt_encoder_hidden_states is not None: example["class_prompt_ids"] = self.class_prompt_encoder_hidden_states else: class_text_inputs = tokenize_prompt( self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length ) example["class_prompt_ids"] = class_text_inputs.input_ids example["class_attention_mask"] = class_text_inputs.attention_mask return example def collate_fn(examples, with_prior_preservation=False): has_attention_mask = "instance_attention_mask" in examples[0] input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] if has_attention_mask: attention_mask = [example["instance_attention_mask"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] if has_attention_mask: attention_mask += [example["class_attention_mask"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } if has_attention_mask: attention_mask = torch.cat(attention_mask, dim=0) batch["attention_mask"] = attention_mask return batch class PromptDataset(Dataset): """A simple dataset to prepare the prompts to generate class images on multiple GPUs.""" def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def model_has_vae(args): config_file_name = Path("vae", AutoencoderKL.config_name).as_posix() if os.path.isdir(args.pretrained_model_name_or_path): config_file_name = os.path.join(args.pretrained_model_name_or_path, config_file_name) return os.path.isfile(config_file_name) else: files_in_repo = model_info(args.pretrained_model_name_or_path, revision=args.revision).siblings return any(file.rfilename == config_file_name for file in files_in_repo) def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None): if tokenizer_max_length is not None: max_length = tokenizer_max_length else: max_length = tokenizer.model_max_length text_inputs = tokenizer( prompt, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt", ) return text_inputs def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None): text_input_ids = input_ids.to(text_encoder.device) if text_encoder_use_attention_mask: attention_mask = attention_mask.to(text_encoder.device) else: attention_mask = None prompt_embeds = text_encoder( text_input_ids, attention_mask=attention_mask, return_dict=False, ) prompt_embeds = prompt_embeds[0] return prompt_embeds def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, variant=args.variant, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) if model_has_vae(args): vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) else: vae = None unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: for model in models: sub_dir = "unet" if isinstance(model, type(unwrap_model(unet))) else "text_encoder" model.save_pretrained(os.path.join(output_dir, sub_dir)) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): while len(models) > 0: # pop models so that they are not loaded again model = models.pop() if isinstance(model, type(unwrap_model(text_encoder))): # load transformers style into model load_model = text_encoder_cls.from_pretrained(input_dir, subfolder="text_encoder") model.config = load_model.config else: # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if vae is not None: vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.train_text_encoder: text_encoder.gradient_checkpointing_enable() # Check that all trainable models are in full precision low_precision_error_string = ( "Please make sure to always have all model weights in full float32 precision when starting training - even if" " doing mixed precision training. copy of the weights should still be float32." ) if unwrap_model(unet).dtype != torch.float32: raise ValueError(f"Unet loaded as datatype {unwrap_model(unet).dtype}. {low_precision_error_string}") if args.train_text_encoder and unwrap_model(text_encoder).dtype != torch.float32: raise ValueError( f"Text encoder loaded as datatype {unwrap_model(text_encoder).dtype}." f" {low_precision_error_string}" ) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) if args.pre_compute_text_embeddings: def compute_text_embeddings(prompt): with torch.no_grad(): text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length) prompt_embeds = encode_prompt( text_encoder, text_inputs.input_ids, text_inputs.attention_mask, text_encoder_use_attention_mask=args.text_encoder_use_attention_mask, ) return prompt_embeds pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt) validation_prompt_negative_prompt_embeds = compute_text_embeddings("") if args.validation_prompt is not None: validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt) else: validation_prompt_encoder_hidden_states = None if args.class_prompt is not None: pre_computed_class_prompt_encoder_hidden_states = compute_text_embeddings(args.class_prompt) else: pre_computed_class_prompt_encoder_hidden_states = None text_encoder = None tokenizer = None gc.collect() torch.cuda.empty_cache() else: pre_computed_encoder_hidden_states = None validation_prompt_encoder_hidden_states = None validation_prompt_negative_prompt_embeds = None pre_computed_class_prompt_encoder_hidden_states = None # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, class_num=args.num_class_images, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, encoder_hidden_states=pre_computed_encoder_hidden_states, class_prompt_encoder_hidden_states=pre_computed_class_prompt_encoder_hidden_states, tokenizer_max_length=args.tokenizer_max_length, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype if vae is not None: vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder and text_encoder is not None: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = vars(copy.deepcopy(args)) tracker_config.pop("validation_images") accelerator.init_trackers("dreambooth", config=tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): pixel_values = batch["pixel_values"].to(dtype=weight_dtype) if vae is not None: # Convert images to latent space model_input = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() model_input = model_input * vae.config.scaling_factor else: model_input = pixel_values # Sample noise that we'll add to the model input if args.offset_noise: noise = torch.randn_like(model_input) + 0.1 * torch.randn( model_input.shape[0], model_input.shape[1], 1, 1, device=model_input.device ) else: noise = torch.randn_like(model_input) bsz, channels, height, width = model_input.shape # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device ) timesteps = timesteps.long() # Add noise to the model input according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) # Get the text embedding for conditioning if args.pre_compute_text_embeddings: encoder_hidden_states = batch["input_ids"] else: encoder_hidden_states = encode_prompt( text_encoder, batch["input_ids"], batch["attention_mask"], text_encoder_use_attention_mask=args.text_encoder_use_attention_mask, ) if unwrap_model(unet).config.in_channels == channels * 2: noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1) if args.class_labels_conditioning == "timesteps": class_labels = timesteps else: class_labels = None # Predict the noise residual model_pred = unet( noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels, return_dict=False )[0] if model_pred.shape[1] == 6: model_pred, _ = torch.chunk(model_pred, 2, dim=1) # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(model_input, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Compute instance loss if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) base_weight = ( torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr ) if noise_scheduler.config.prediction_type == "v_prediction": # Velocity objective needs to be floored to an SNR weight of one. mse_loss_weights = base_weight + 1 else: # Epsilon and sample both use the same loss weights. mse_loss_weights = base_weight loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() if args.with_prior_preservation: # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") images = [] if args.validation_prompt is not None and global_step % args.validation_steps == 0: images = log_validation( unwrap_model(text_encoder) if text_encoder is not None else text_encoder, tokenizer, unwrap_model(unet), vae, args, accelerator, weight_dtype, global_step, validation_prompt_encoder_hidden_states, validation_prompt_negative_prompt_embeds, ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: pipeline_args = {} if text_encoder is not None: pipeline_args["text_encoder"] = unwrap_model(text_encoder) if args.skip_save_text_encoder: pipeline_args["text_encoder"] = None pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unwrap_model(unet), revision=args.revision, variant=args.variant, **pipeline_args, ) # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it scheduler_args = {} if "variance_type" in pipeline.scheduler.config: variance_type = pipeline.scheduler.config.variance_type if variance_type in ["learned", "learned_range"]: variance_type = "fixed_small" scheduler_args["variance_type"] = variance_type pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, train_text_encoder=args.train_text_encoder, prompt=args.instance_prompt, repo_folder=args.output_dir, pipeline=pipeline, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/dreambooth/train_dreambooth.py/0
{ "file_path": "diffusers/examples/dreambooth/train_dreambooth.py", "repo_id": "diffusers", "token_count": 25383 }
121
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 Harutatsu Akiyama and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import shutil import warnings from contextlib import nullcontext from pathlib import Path from urllib.parse import urlparse import accelerate import datasets import numpy as np import PIL import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_instruct_pix2pix import ( StableDiffusionXLInstructPix2PixPipeline, ) from diffusers.training_utils import EMAModel from diffusers.utils import check_min_version, deprecate, is_wandb_available, load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.31.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "fusing/instructpix2pix-1000-samples": ("file_name", "edited_image", "edit_prompt"), } WANDB_TABLE_COL_NAMES = ["file_name", "edited_image", "edit_prompt"] TORCH_DTYPE_MAPPING = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16} def log_validation(pipeline, args, accelerator, generator, global_step, is_final_validation=False): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) val_save_dir = os.path.join(args.output_dir, "validation_images") if not os.path.exists(val_save_dir): os.makedirs(val_save_dir) original_image = ( lambda image_url_or_path: load_image(image_url_or_path) if urlparse(image_url_or_path).scheme else Image.open(image_url_or_path).convert("RGB") )(args.val_image_url_or_path) if torch.backends.mps.is_available(): autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type) with autocast_ctx: edited_images = [] # Run inference for val_img_idx in range(args.num_validation_images): a_val_img = pipeline( args.validation_prompt, image=original_image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7, generator=generator, ).images[0] edited_images.append(a_val_img) # Save validation images a_val_img.save(os.path.join(val_save_dir, f"step_{global_step}_val_img_{val_img_idx}.png")) for tracker in accelerator.trackers: if tracker.name == "wandb": wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES) for edited_image in edited_images: wandb_table.add_data(wandb.Image(original_image), wandb.Image(edited_image), args.validation_prompt) logger_name = "test" if is_final_validation else "validation" tracker.log({logger_name: wandb_table}) def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(): parser = argparse.ArgumentParser(description="Script to train Stable Diffusion XL for InstructPix2Pix.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to an improved VAE to stabilize training. For more details check out: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--vae_precision", type=str, choices=["fp32", "fp16", "bf16"], default="fp32", help=( "The vanilla SDXL 1.0 VAE can cause NaNs due to large activation values. Some custom models might already have a solution" " to this problem, and this flag allows you to use mixed precision to stabilize training." ), ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--original_image_column", type=str, default="input_image", help="The column of the dataset containing the original image on which edits where made.", ) parser.add_argument( "--edited_image_column", type=str, default="edited_image", help="The column of the dataset containing the edited image.", ) parser.add_argument( "--edit_prompt_column", type=str, default="edit_prompt", help="The column of the dataset containing the edit instruction.", ) parser.add_argument( "--val_image_url_or_path", type=str, default=None, help="URL to the original image that you would like to edit (used during inference for debugging purposes).", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference." ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run fine-tuning validation every X steps. The validation process consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--output_dir", type=str, default="instruct-pix2pix-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=256, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this resolution." ), ) parser.add_argument( "--crops_coords_top_left_h", type=int, default=0, help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), ) parser.add_argument( "--crops_coords_top_left_w", type=int, default=0, help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--conditioning_dropout_prob", type=float, default=None, help="Conditioning dropout probability. Drops out the conditionings (image and edit prompt) used in training InstructPix2Pix. See section 3.2.1 in the paper: https://arxiv.org/abs/2211.09800.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args def convert_to_np(image, resolution): if isinstance(image, str): image = PIL.Image.open(image) image = image.convert("RGB").resize((resolution, resolution)) return np.array(image).transpose(2, 0, 1) def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) if torch.backends.mps.is_available() and args.mixed_precision == "bf16": # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id vae_path = ( args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # InstructPix2Pix uses an additional image for conditioning. To accommodate that, # it uses 8 channels (instead of 4) in the first (conv) layer of the UNet. This UNet is # then fine-tuned on the custom InstructPix2Pix dataset. This modified UNet is initialized # from the pre-trained checkpoints. For the extra channels added to the first layer, they are # initialized to zero. logger.info("Initializing the XL InstructPix2Pix UNet from the pretrained UNet.") in_channels = 8 out_channels = unet.conv_in.out_channels unet.register_to_config(in_channels=in_channels) with torch.no_grad(): new_conv_in = nn.Conv2d( in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding ) new_conv_in.weight.zero_() new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight) unet.conv_in = new_conv_in # Create EMA for the unet. if args.use_ema: ema_unet = EMAModel(unet.parameters(), model_cls=UNet2DConditionModel, model_config=unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/main/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.original_image_column is None: original_image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: original_image_column = args.original_image_column if original_image_column not in column_names: raise ValueError( f"--original_image_column' value '{args.original_image_column}' needs to be one of: {', '.join(column_names)}" ) if args.edit_prompt_column is None: edit_prompt_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: edit_prompt_column = args.edit_prompt_column if edit_prompt_column not in column_names: raise ValueError( f"--edit_prompt_column' value '{args.edit_prompt_column}' needs to be one of: {', '.join(column_names)}" ) if args.edited_image_column is None: edited_image_column = dataset_columns[2] if dataset_columns is not None else column_names[2] else: edited_image_column = args.edited_image_column if edited_image_column not in column_names: raise ValueError( f"--edited_image_column' value '{args.edited_image_column}' needs to be one of: {', '.join(column_names)}" ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 warnings.warn(f"weight_dtype {weight_dtype} may cause nan during vae encoding", UserWarning) elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 warnings.warn(f"weight_dtype {weight_dtype} may cause nan during vae encoding", UserWarning) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(captions, tokenizer): inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt", ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), ] ) def preprocess_images(examples): original_images = np.concatenate( [convert_to_np(image, args.resolution) for image in examples[original_image_column]] ) edited_images = np.concatenate( [convert_to_np(image, args.resolution) for image in examples[edited_image_column]] ) # We need to ensure that the original and the edited images undergo the same # augmentation transforms. images = np.concatenate([original_images, edited_images]) images = torch.tensor(images) images = 2 * (images / 255) - 1 return train_transforms(images) # Load scheduler, tokenizer and models. tokenizer_1 = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) tokenizer_2 = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False, ) text_encoder_cls_1 = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) text_encoder_cls_2 = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder_1 = text_encoder_cls_1.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder_2 = text_encoder_cls_2.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant ) # We ALWAYS pre-compute the additional condition embeddings needed for SDXL # UNet as the model is already big and it uses two text encoders. text_encoder_1.to(accelerator.device, dtype=weight_dtype) text_encoder_2.to(accelerator.device, dtype=weight_dtype) tokenizers = [tokenizer_1, tokenizer_2] text_encoders = [text_encoder_1, text_encoder_2] # Freeze vae and text_encoders vae.requires_grad_(False) text_encoder_1.requires_grad_(False) text_encoder_2.requires_grad_(False) # Set UNet to trainable. unet.train() # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompt(text_encoders, tokenizers, prompt): prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return prompt_embeds, pooled_prompt_embeds # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompts(text_encoders, tokenizers, prompts): prompt_embeds_all = [] pooled_prompt_embeds_all = [] for prompt in prompts: prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt) prompt_embeds_all.append(prompt_embeds) pooled_prompt_embeds_all.append(pooled_prompt_embeds) return torch.stack(prompt_embeds_all), torch.stack(pooled_prompt_embeds_all) # Adapted from examples.dreambooth.train_dreambooth_lora_sdxl # Here, we compute not just the text embeddings but also the additional embeddings # needed for the SD XL UNet to operate. def compute_embeddings_for_prompts(prompts, text_encoders, tokenizers): with torch.no_grad(): prompt_embeds_all, pooled_prompt_embeds_all = encode_prompts(text_encoders, tokenizers, prompts) add_text_embeds_all = pooled_prompt_embeds_all prompt_embeds_all = prompt_embeds_all.to(accelerator.device) add_text_embeds_all = add_text_embeds_all.to(accelerator.device) return prompt_embeds_all, add_text_embeds_all # Get null conditioning def compute_null_conditioning(): null_conditioning_list = [] for a_tokenizer, a_text_encoder in zip(tokenizers, text_encoders): null_conditioning_list.append( a_text_encoder( tokenize_captions([""], tokenizer=a_tokenizer).to(accelerator.device), output_hidden_states=True, ).hidden_states[-2] ) return torch.concat(null_conditioning_list, dim=-1) null_conditioning = compute_null_conditioning() def compute_time_ids(): crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w) original_size = target_size = (args.resolution, args.resolution) add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = torch.tensor([add_time_ids], dtype=weight_dtype) return add_time_ids.to(accelerator.device).repeat(args.train_batch_size, 1) add_time_ids = compute_time_ids() def preprocess_train(examples): # Preprocess images. preprocessed_images = preprocess_images(examples) # Since the original and edited images were concatenated before # applying the transformations, we need to separate them and reshape # them accordingly. original_images, edited_images = preprocessed_images.chunk(2) original_images = original_images.reshape(-1, 3, args.resolution, args.resolution) edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution) # Collate the preprocessed images into the `examples`. examples["original_pixel_values"] = original_images examples["edited_pixel_values"] = edited_images # Preprocess the captions. captions = list(examples[edit_prompt_column]) prompt_embeds_all, add_text_embeds_all = compute_embeddings_for_prompts(captions, text_encoders, tokenizers) examples["prompt_embeds"] = prompt_embeds_all examples["add_text_embeds"] = add_text_embeds_all return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): original_pixel_values = torch.stack([example["original_pixel_values"] for example in examples]) original_pixel_values = original_pixel_values.to(memory_format=torch.contiguous_format).float() edited_pixel_values = torch.stack([example["edited_pixel_values"] for example in examples]) edited_pixel_values = edited_pixel_values.to(memory_format=torch.contiguous_format).float() prompt_embeds = torch.concat([example["prompt_embeds"] for example in examples], dim=0) add_text_embeds = torch.concat([example["add_text_embeds"] for example in examples], dim=0) return { "original_pixel_values": original_pixel_values, "edited_pixel_values": edited_pixel_values, "prompt_embeds": prompt_embeds, "add_text_embeds": add_text_embeds, } # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # Move vae, unet and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. if args.pretrained_vae_model_name_or_path is not None: vae.to(accelerator.device, dtype=weight_dtype) else: vae.to(accelerator.device, dtype=TORCH_DTYPE_MAPPING[args.vae_precision]) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("instruct-pix2pix-xl", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # We want to learn the denoising process w.r.t the edited images which # are conditioned on the original image (which was edited) and the edit instruction. # So, first, convert images to latent space. if args.pretrained_vae_model_name_or_path is not None: edited_pixel_values = batch["edited_pixel_values"].to(dtype=weight_dtype) else: edited_pixel_values = batch["edited_pixel_values"] latents = vae.encode(edited_pixel_values).latent_dist.sample() latents = latents * vae.config.scaling_factor if args.pretrained_vae_model_name_or_path is None: latents = latents.to(weight_dtype) # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # SDXL additional inputs encoder_hidden_states = batch["prompt_embeds"] add_text_embeds = batch["add_text_embeds"] # Get the additional image embedding for conditioning. # Instead of getting a diagonal Gaussian here, we simply take the mode. if args.pretrained_vae_model_name_or_path is not None: original_pixel_values = batch["original_pixel_values"].to(dtype=weight_dtype) else: original_pixel_values = batch["original_pixel_values"] original_image_embeds = vae.encode(original_pixel_values).latent_dist.sample() if args.pretrained_vae_model_name_or_path is None: original_image_embeds = original_image_embeds.to(weight_dtype) # Conditioning dropout to support classifier-free guidance during inference. For more details # check out the section 3.2.1 of the original paper https://arxiv.org/abs/2211.09800. if args.conditioning_dropout_prob is not None: random_p = torch.rand(bsz, device=latents.device, generator=generator) # Sample masks for the edit prompts. prompt_mask = random_p < 2 * args.conditioning_dropout_prob prompt_mask = prompt_mask.reshape(bsz, 1, 1) # Final text conditioning. encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states) # Sample masks for the original images. image_mask_dtype = original_image_embeds.dtype image_mask = 1 - ( (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype) * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype) ) image_mask = image_mask.reshape(bsz, 1, 1, 1) # Final image conditioning. original_image_embeds = image_mask * original_image_embeds # Concatenate the `original_image_embeds` with the `noisy_latents`. concatenated_noisy_latents = torch.cat([noisy_latents, original_image_embeds], dim=1) # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} model_pred = unet( concatenated_noisy_latents, timesteps, encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) ### BEGIN: Perform validation every `validation_epochs` steps if global_step % args.validation_steps == 0: if (args.val_image_url_or_path is not None) and (args.validation_prompt is not None): # create pipeline if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) # The models need unwrapping because for compatibility in distributed training mode. pipeline = StableDiffusionXLInstructPix2PixPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unwrap_model(unet), text_encoder=text_encoder_1, text_encoder_2=text_encoder_2, tokenizer=tokenizer_1, tokenizer_2=tokenizer_2, vae=vae, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) log_validation( pipeline, args, accelerator, generator, global_step, is_final_validation=False, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) del pipeline torch.cuda.empty_cache() ### END: Perform validation every `validation_epochs` steps if global_step >= args.max_train_steps: break # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionXLInstructPix2PixPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder_1, text_encoder_2=text_encoder_2, tokenizer=tokenizer_1, tokenizer_2=tokenizer_2, vae=vae, unet=unwrap_model(unet), revision=args.revision, variant=args.variant, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if (args.val_image_url_or_path is not None) and (args.validation_prompt is not None): log_validation( pipeline, args, accelerator, generator, global_step, is_final_validation=True, ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py/0
{ "file_path": "diffusers/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py", "repo_id": "diffusers", "token_count": 23515 }
122
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """Script to train a consistency model from scratch via (improved) consistency training.""" import argparse import gc import logging import math import os import shutil from datetime import timedelta from pathlib import Path import accelerate import datasets import numpy as np import torch from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm import diffusers from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNet2DModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, resolve_interpolation_mode from diffusers.utils import is_tensorboard_available, is_wandb_available from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb logger = get_logger(__name__, log_level="INFO") def _extract_into_tensor(arr, timesteps, broadcast_shape): """ Extract values from a 1-D numpy array for a batch of indices. :param arr: the 1-D numpy array. :param timesteps: a tensor of indices into the array to extract. :param broadcast_shape: a larger shape of K dimensions with the batch dimension equal to the length of timesteps. :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. """ if not isinstance(arr, torch.Tensor): arr = torch.from_numpy(arr) res = arr[timesteps].float().to(timesteps.device) while len(res.shape) < len(broadcast_shape): res = res[..., None] return res.expand(broadcast_shape) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) def get_discretization_steps(global_step: int, max_train_steps: int, s_0: int = 10, s_1: int = 1280, constant=False): """ Calculates the current discretization steps at global step k using the discretization curriculum N(k). """ if constant: return s_0 + 1 k_prime = math.floor(max_train_steps / (math.log2(math.floor(s_1 / s_0)) + 1)) num_discretization_steps = min(s_0 * 2 ** math.floor(global_step / k_prime), s_1) + 1 return num_discretization_steps def get_skip_steps(global_step, initial_skip: int = 1): # Currently only support constant skip curriculum. return initial_skip def get_karras_sigmas( num_discretization_steps: int, sigma_min: float = 0.002, sigma_max: float = 80.0, rho: float = 7.0, dtype=torch.float32, ): """ Calculates the Karras sigmas timestep discretization of [sigma_min, sigma_max]. """ ramp = np.linspace(0, 1, num_discretization_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho # Make sure sigmas are in increasing rather than decreasing order (see section 2 of the iCT paper) sigmas = sigmas[::-1].copy() sigmas = torch.from_numpy(sigmas).to(dtype=dtype) return sigmas def get_discretized_lognormal_weights(noise_levels: torch.Tensor, p_mean: float = -1.1, p_std: float = 2.0): """ Calculates the unnormalized weights for a 1D array of noise level sigma_i based on the discretized lognormal" " distribution used in the iCT paper (given in Equation 10). """ upper_prob = torch.special.erf((torch.log(noise_levels[1:]) - p_mean) / (math.sqrt(2) * p_std)) lower_prob = torch.special.erf((torch.log(noise_levels[:-1]) - p_mean) / (math.sqrt(2) * p_std)) weights = upper_prob - lower_prob return weights def get_loss_weighting_schedule(noise_levels: torch.Tensor): """ Calculates the loss weighting schedule lambda given a set of noise levels. """ return 1.0 / (noise_levels[1:] - noise_levels[:-1]) def add_noise(original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor): # Make sure timesteps (Karras sigmas) have the same device and dtype as original_samples sigmas = timesteps.to(device=original_samples.device, dtype=original_samples.dtype) while len(sigmas.shape) < len(original_samples.shape): sigmas = sigmas.unsqueeze(-1) noisy_samples = original_samples + noise * sigmas return noisy_samples def get_noise_preconditioning(sigmas, noise_precond_type: str = "cm"): """ Calculates the noise preconditioning function c_noise, which is used to transform the raw Karras sigmas into the timestep input for the U-Net. """ if noise_precond_type == "none": return sigmas elif noise_precond_type == "edm": return 0.25 * torch.log(sigmas) elif noise_precond_type == "cm": return 1000 * 0.25 * torch.log(sigmas + 1e-44) else: raise ValueError( f"Noise preconditioning type {noise_precond_type} is not current supported. Currently supported noise" f" preconditioning types are `none` (which uses the sigmas as is), `edm`, and `cm`." ) def get_input_preconditioning(sigmas, sigma_data=0.5, input_precond_type: str = "cm"): """ Calculates the input preconditioning factor c_in, which is used to scale the U-Net image input. """ if input_precond_type == "none": return 1 elif input_precond_type == "cm": return 1.0 / (sigmas**2 + sigma_data**2) else: raise ValueError( f"Input preconditioning type {input_precond_type} is not current supported. Currently supported input" f" preconditioning types are `none` (which uses a scaling factor of 1.0) and `cm`." ) def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=1.0): scaled_timestep = timestep_scaling * timestep c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2) c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5 return c_skip, c_out def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name="teacher"): logger.info("Running validation... ") unet = accelerator.unwrap_model(unet) pipeline = ConsistencyModelPipeline( unet=unet, scheduler=scheduler, ) pipeline = pipeline.to(device=accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) class_labels = [None] if args.class_conditional: if args.num_classes is not None: class_labels = list(range(args.num_classes)) else: logger.warning( "The model is class-conditional but the number of classes is not set. The generated images will be" " unconditional rather than class-conditional." ) image_logs = [] for class_label in class_labels: images = [] with torch.autocast("cuda"): images = pipeline( num_inference_steps=1, batch_size=args.eval_batch_size, class_labels=[class_label] * args.eval_batch_size, generator=generator, ).images log = {"images": images} if args.class_conditional and class_label is not None: log["class_label"] = str(class_label) else: log["class_label"] = "images" image_logs.append(log) for tracker in accelerator.trackers: if tracker.name == "tensorboard": for log in image_logs: images = log["images"] class_label = log["class_label"] formatted_images = [] for image in images: formatted_images.append(np.asarray(image)) formatted_images = np.stack(formatted_images) tracker.writer.add_images(class_label, formatted_images, step, dataformats="NHWC") elif tracker.name == "wandb": formatted_images = [] for log in image_logs: images = log["images"] class_label = log["class_label"] for image in images: image = wandb.Image(image, caption=class_label) formatted_images.append(image) tracker.log({f"validation/{name}": formatted_images}) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() torch.cuda.empty_cache() return image_logs def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") # ------------Model Arguments----------- parser.add_argument( "--model_config_name_or_path", type=str, default=None, help="The config of the UNet model to train, leave as None to use standard DDPM configuration.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, help=( "If initializing the weights from a pretrained model, the path to the pretrained model or model identifier" " from huggingface.co/models." ), ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help=( "Variant of the model files of the pretrained model identifier from huggingface.co/models, e.g. `fp16`," " `non_ema`, etc.", ), ) # ------------Dataset Arguments----------- parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that HF Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--dataset_image_column_name", type=str, default="image", help="The name of the image column in the dataset to use for training.", ) parser.add_argument( "--dataset_class_label_column_name", type=str, default="label", help="If doing class-conditional training, the name of the class label column in the dataset to use.", ) # ------------Image Processing Arguments----------- parser.add_argument( "--resolution", type=int, default=64, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--interpolation_type", type=str, default="bilinear", help=( "The interpolation function used when resizing images to the desired resolution. Choose between `bilinear`," " `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", default=False, action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--class_conditional", action="store_true", help=( "Whether to train a class-conditional model. If set, the class labels will be taken from the `label`" " column of the provided dataset." ), ) parser.add_argument( "--num_classes", type=int, default=None, help="The number of classes in the training data, if training a class-conditional model.", ) parser.add_argument( "--class_embed_type", type=str, default=None, help=( "The class embedding type to use. Choose from `None`, `identity`, and `timestep`. If `class_conditional`" " and `num_classes` and set, but `class_embed_type` is `None`, a embedding matrix will be used." ), ) # ------------Dataloader Arguments----------- parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "The number of subprocesses to use for data loading. 0 means that the data will be loaded in the main" " process." ), ) # ------------Training Arguments----------- # ----General Training Arguments---- parser.add_argument( "--output_dir", type=str, default="ddpm-model-64", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--overwrite_output_dir", action="store_true") parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") # ----Batch Size and Training Length---- parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) # ----Learning Rate---- parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="cosine", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) # ----Optimizer (Adam) Arguments---- parser.add_argument( "--optimizer_type", type=str, default="adamw", help=( "The optimizer algorithm to use for training. Choose between `radam` and `adamw`. The iCT paper uses" " RAdam." ), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.95, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument( "--adam_weight_decay", type=float, default=1e-6, help="Weight decay magnitude for the Adam optimizer." ) parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") # ----Consistency Training (CT) Specific Arguments---- parser.add_argument( "--prediction_type", type=str, default="sample", choices=["sample"], help="Whether the model should predict the 'epsilon'/noise error or directly the reconstructed image 'x0'.", ) parser.add_argument("--ddpm_num_steps", type=int, default=1000) parser.add_argument("--ddpm_num_inference_steps", type=int, default=1000) parser.add_argument("--ddpm_beta_schedule", type=str, default="linear") parser.add_argument( "--sigma_min", type=float, default=0.002, help=( "The lower boundary for the timestep discretization, which should be set to a small positive value close" " to zero to avoid numerical issues when solving the PF-ODE backwards in time." ), ) parser.add_argument( "--sigma_max", type=float, default=80.0, help=( "The upper boundary for the timestep discretization, which also determines the variance of the Gaussian" " prior." ), ) parser.add_argument( "--rho", type=float, default=7.0, help="The rho parameter for the Karras sigmas timestep dicretization.", ) parser.add_argument( "--huber_c", type=float, default=None, help=( "The Pseudo-Huber loss parameter c. If not set, this will default to the value recommended in the Improved" " Consistency Training (iCT) paper of 0.00054 * sqrt(d), where d is the data dimensionality." ), ) parser.add_argument( "--discretization_s_0", type=int, default=10, help=( "The s_0 parameter in the discretization curriculum N(k). This controls the number of training steps after" " which the number of discretization steps N will be doubled." ), ) parser.add_argument( "--discretization_s_1", type=int, default=1280, help=( "The s_1 parameter in the discretization curriculum N(k). This controls the upper limit to the number of" " discretization steps used. Increasing this value will reduce the bias at the cost of higher variance." ), ) parser.add_argument( "--constant_discretization_steps", action="store_true", help=( "Whether to set the discretization curriculum N(k) to be the constant value `discretization_s_0 + 1`. This" " is useful for testing when `max_number_steps` is small, when `k_prime` would otherwise be 0, causing" " a divide-by-zero error." ), ) parser.add_argument( "--p_mean", type=float, default=-1.1, help=( "The mean parameter P_mean for the (discretized) lognormal noise schedule, which controls the probability" " of sampling a (discrete) noise level sigma_i." ), ) parser.add_argument( "--p_std", type=float, default=2.0, help=( "The standard deviation parameter P_std for the (discretized) noise schedule, which controls the" " probability of sampling a (discrete) noise level sigma_i." ), ) parser.add_argument( "--noise_precond_type", type=str, default="cm", help=( "The noise preconditioning function to use for transforming the raw Karras sigmas into the timestep" " argument of the U-Net. Choose between `none` (the identity function), `edm`, and `cm`." ), ) parser.add_argument( "--input_precond_type", type=str, default="cm", help=( "The input preconditioning function to use for scaling the image input of the U-Net. Choose between `none`" " (a scaling factor of 1) and `cm`." ), ) parser.add_argument( "--skip_steps", type=int, default=1, help=( "The gap in indices between the student and teacher noise levels. In the iCT paper this is always set to" " 1, but theoretically this could be greater than 1 and/or altered according to a curriculum throughout" " training, much like the number of discretization steps is." ), ) parser.add_argument( "--cast_teacher", action="store_true", help="Whether to cast the teacher U-Net model to `weight_dtype` or leave it in full precision.", ) # ----Exponential Moving Average (EMA) Arguments---- parser.add_argument( "--use_ema", action="store_true", help="Whether to use Exponential Moving Average for the final model weights.", ) parser.add_argument( "--ema_min_decay", type=float, default=None, help=( "The minimum decay magnitude for EMA. If not set, this will default to the value of `ema_max_decay`," " resulting in a constant EMA decay rate." ), ) parser.add_argument( "--ema_max_decay", type=float, default=0.99993, help=( "The maximum decay magnitude for EMA. Setting `ema_min_decay` equal to this value will result in a" " constant decay rate." ), ) parser.add_argument( "--use_ema_warmup", action="store_true", help="Whether to use EMA warmup.", ) parser.add_argument("--ema_inv_gamma", type=float, default=1.0, help="The inverse gamma value for the EMA decay.") parser.add_argument("--ema_power", type=float, default=3 / 4, help="The power value for the EMA decay.") # ----Training Optimization Arguments---- parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) # ----Distributed Training Arguments---- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") # ------------Validation Arguments----------- parser.add_argument( "--validation_steps", type=int, default=200, help="Run validation every X steps.", ) parser.add_argument( "--eval_batch_size", type=int, default=16, help=( "The number of images to generate for evaluation. Note that if `class_conditional` and `num_classes` is" " set the effective number of images generated per evaluation step is `eval_batch_size * num_classes`." ), ) parser.add_argument("--save_images_epochs", type=int, default=10, help="How often to save images during training.") # ------------Validation Arguments----------- parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--save_model_epochs", type=int, default=10, help="How often to save the model during training." ) # ------------Logging Arguments----------- parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) # ------------HuggingFace Hub Arguments----------- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--hub_private_repo", action="store_true", help="Whether or not to create a private repository." ) # ------------Accelerate Arguments----------- parser.add_argument( "--tracker_project_name", type=str, default="consistency-training", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.dataset_name is None and args.train_data_dir is None: raise ValueError("You must specify either a dataset name from the hub or a train data directory.") return args def main(args): logging_dir = os.path.join(args.output_dir, args.logging_dir) if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=7200)) # a big number for high resolution or big dataset accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) if args.report_to == "tensorboard": if not is_tensorboard_available(): raise ImportError("Make sure to install tensorboard if you want to use it for logging during training.") elif args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # 1. Initialize the noise scheduler. initial_discretization_steps = get_discretization_steps( 0, args.max_train_steps, s_0=args.discretization_s_0, s_1=args.discretization_s_1, constant=args.constant_discretization_steps, ) noise_scheduler = CMStochasticIterativeScheduler( num_train_timesteps=initial_discretization_steps, sigma_min=args.sigma_min, sigma_max=args.sigma_max, rho=args.rho, ) # 2. Initialize the student U-Net model. if args.pretrained_model_name_or_path is not None: logger.info(f"Loading pretrained U-Net weights from {args.pretrained_model_name_or_path}... ") unet = UNet2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) elif args.model_config_name_or_path is None: # TODO: use default architectures from iCT paper if not args.class_conditional and (args.num_classes is not None or args.class_embed_type is not None): logger.warning( f"`--class_conditional` is set to `False` but `--num_classes` is set to {args.num_classes} and" f" `--class_embed_type` is set to {args.class_embed_type}. These values will be overridden to `None`." ) args.num_classes = None args.class_embed_type = None elif args.class_conditional and args.num_classes is None and args.class_embed_type is None: logger.warning( "`--class_conditional` is set to `True` but neither `--num_classes` nor `--class_embed_type` is set." "`class_conditional` will be overridden to `False`." ) args.class_conditional = False unet = UNet2DModel( sample_size=args.resolution, in_channels=3, out_channels=3, layers_per_block=2, block_out_channels=(128, 128, 256, 256, 512, 512), down_block_types=( "DownBlock2D", "DownBlock2D", "DownBlock2D", "DownBlock2D", "AttnDownBlock2D", "DownBlock2D", ), up_block_types=( "UpBlock2D", "AttnUpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", ), class_embed_type=args.class_embed_type, num_class_embeds=args.num_classes, ) else: config = UNet2DModel.load_config(args.model_config_name_or_path) unet = UNet2DModel.from_config(config) unet.train() # Create EMA for the student U-Net model. if args.use_ema: if args.ema_min_decay is None: args.ema_min_decay = args.ema_max_decay ema_unet = EMAModel( unet.parameters(), decay=args.ema_max_decay, min_decay=args.ema_min_decay, use_ema_warmup=args.use_ema_warmup, inv_gamma=args.ema_inv_gamma, power=args.ema_power, model_cls=UNet2DModel, model_config=unet.config, ) # 3. Initialize the teacher U-Net model from the student U-Net model. # Note that following the improved Consistency Training paper, the teacher U-Net is not updated via EMA (e.g. the # EMA decay rate is 0.) teacher_unet = UNet2DModel.from_config(unet.config) teacher_unet.load_state_dict(unet.state_dict()) teacher_unet.train() teacher_unet.requires_grad_(False) # 4. Handle mixed precision and device placement weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 args.mixed_precision = accelerator.mixed_precision elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 args.mixed_precision = accelerator.mixed_precision # Cast teacher_unet to weight_dtype if cast_teacher is set. if args.cast_teacher: teacher_dtype = weight_dtype else: teacher_dtype = torch.float32 teacher_unet.to(accelerator.device) if args.use_ema: ema_unet.to(accelerator.device) # 5. Handle saving and loading of checkpoints. # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: teacher_unet.save_pretrained(os.path.join(output_dir, "unet_teacher")) if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): load_model = UNet2DModel.from_pretrained(os.path.join(input_dir, "unet_teacher")) teacher_unet.load_state_dict(load_model.state_dict()) teacher_unet.to(accelerator.device) del load_model if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # 6. Enable optimizations if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() teacher_unet.enable_xformers_memory_efficient_attention() if args.use_ema: ema_unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.optimizer_type == "radam": optimizer_class = torch.optim.RAdam elif args.optimizer_type == "adamw": # Use 8-bit Adam for lower memory usage or to fine-tune the model for 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW else: raise ValueError( f"Optimizer type {args.optimizer_type} is not supported. Currently supported optimizer types are `radam`" f" and `adamw`." ) # 7. Initialize the optimizer optimizer = optimizer_class( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # 8. Dataset creation and data preprocessing # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, split="train", ) else: dataset = load_dataset("imagefolder", data_dir=args.train_data_dir, cache_dir=args.cache_dir, split="train") # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets and DataLoaders creation. interpolation_mode = resolve_interpolation_mode(args.interpolation_type) augmentations = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=interpolation_mode), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def transform_images(examples): images = [augmentations(image.convert("RGB")) for image in examples[args.dataset_image_column_name]] batch_dict = {"images": images} if args.class_conditional: batch_dict["class_labels"] = examples[args.dataset_class_label_column_name] return batch_dict logger.info(f"Dataset size: {len(dataset)}") dataset.set_transform(transform_images) train_dataloader = torch.utils.data.DataLoader( dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers ) # 9. Initialize the learning rate scheduler # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps, ) # 10. Prepare for training # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) def recalculate_num_discretization_step_values(discretization_steps, skip_steps): """ Recalculates all quantities depending on the number of discretization steps N. """ noise_scheduler = CMStochasticIterativeScheduler( num_train_timesteps=discretization_steps, sigma_min=args.sigma_min, sigma_max=args.sigma_max, rho=args.rho, ) current_timesteps = get_karras_sigmas(discretization_steps, args.sigma_min, args.sigma_max, args.rho) valid_teacher_timesteps_plus_one = current_timesteps[: len(current_timesteps) - skip_steps + 1] # timestep_weights are the unnormalized probabilities of sampling the timestep/noise level at each index timestep_weights = get_discretized_lognormal_weights( valid_teacher_timesteps_plus_one, p_mean=args.p_mean, p_std=args.p_std ) # timestep_loss_weights is the timestep-dependent loss weighting schedule lambda(sigma_i) timestep_loss_weights = get_loss_weighting_schedule(valid_teacher_timesteps_plus_one) current_timesteps = current_timesteps.to(accelerator.device) timestep_weights = timestep_weights.to(accelerator.device) timestep_loss_weights = timestep_loss_weights.to(accelerator.device) return noise_scheduler, current_timesteps, timestep_weights, timestep_loss_weights # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) accelerator.init_trackers(args.tracker_project_name, config=tracker_config) # Function for unwraping if torch.compile() was used in accelerate. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 # Resolve the c parameter for the Pseudo-Huber loss if args.huber_c is None: args.huber_c = 0.00054 * args.resolution * math.sqrt(unwrap_model(unet).config.in_channels) # Get current number of discretization steps N according to our discretization curriculum current_discretization_steps = get_discretization_steps( initial_global_step, args.max_train_steps, s_0=args.discretization_s_0, s_1=args.discretization_s_1, constant=args.constant_discretization_steps, ) current_skip_steps = get_skip_steps(initial_global_step, initial_skip=args.skip_steps) if current_skip_steps >= current_discretization_steps: raise ValueError( f"The current skip steps is {current_skip_steps}, but should be smaller than the current number of" f" discretization steps {current_discretization_steps}" ) # Recalculate all quantities depending on the number of discretization steps N ( noise_scheduler, current_timesteps, timestep_weights, timestep_loss_weights, ) = recalculate_num_discretization_step_values(current_discretization_steps, current_skip_steps) progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) # 11. Train! for epoch in range(first_epoch, args.num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): # 1. Get batch of images from dataloader (sample x ~ p_data(x)) clean_images = batch["images"].to(weight_dtype) if args.class_conditional: class_labels = batch["class_labels"] else: class_labels = None bsz = clean_images.shape[0] # 2. Sample a random timestep for each image according to the noise schedule. # Sample random indices i ~ p(i), where p(i) is the dicretized lognormal distribution in the iCT paper # NOTE: timestep_indices should be in the range [0, len(current_timesteps) - k - 1] inclusive timestep_indices = torch.multinomial(timestep_weights, bsz, replacement=True).long() teacher_timesteps = current_timesteps[timestep_indices] student_timesteps = current_timesteps[timestep_indices + current_skip_steps] # 3. Sample noise and add it to the clean images for both teacher and student unets # Sample noise z ~ N(0, I) that we'll add to the images noise = torch.randn(clean_images.shape, dtype=weight_dtype, device=clean_images.device) # Add noise to the clean images according to the noise magnitude at each timestep # (this is the forward diffusion process) teacher_noisy_images = add_noise(clean_images, noise, teacher_timesteps) student_noisy_images = add_noise(clean_images, noise, student_timesteps) # 4. Calculate preconditioning and scalings for boundary conditions for the consistency model. teacher_rescaled_timesteps = get_noise_preconditioning(teacher_timesteps, args.noise_precond_type) student_rescaled_timesteps = get_noise_preconditioning(student_timesteps, args.noise_precond_type) c_in_teacher = get_input_preconditioning(teacher_timesteps, input_precond_type=args.input_precond_type) c_in_student = get_input_preconditioning(student_timesteps, input_precond_type=args.input_precond_type) c_skip_teacher, c_out_teacher = scalings_for_boundary_conditions(teacher_timesteps) c_skip_student, c_out_student = scalings_for_boundary_conditions(student_timesteps) c_skip_teacher, c_out_teacher, c_in_teacher = [ append_dims(x, clean_images.ndim) for x in [c_skip_teacher, c_out_teacher, c_in_teacher] ] c_skip_student, c_out_student, c_in_student = [ append_dims(x, clean_images.ndim) for x in [c_skip_student, c_out_student, c_in_student] ] with accelerator.accumulate(unet): # 5. Get the student unet denoising prediction on the student timesteps # Get rng state now to ensure that dropout is synced between the student and teacher models. dropout_state = torch.get_rng_state() student_model_output = unet( c_in_student * student_noisy_images, student_rescaled_timesteps, class_labels=class_labels ).sample # NOTE: currently only support prediction_type == sample, so no need to convert model_output student_denoise_output = c_skip_student * student_noisy_images + c_out_student * student_model_output # 6. Get the teacher unet denoising prediction on the teacher timesteps with torch.no_grad(), torch.autocast("cuda", dtype=teacher_dtype): torch.set_rng_state(dropout_state) teacher_model_output = teacher_unet( c_in_teacher * teacher_noisy_images, teacher_rescaled_timesteps, class_labels=class_labels ).sample # NOTE: currently only support prediction_type == sample, so no need to convert model_output teacher_denoise_output = ( c_skip_teacher * teacher_noisy_images + c_out_teacher * teacher_model_output ) # 7. Calculate the weighted Pseudo-Huber loss if args.prediction_type == "sample": # Note that the loss weights should be those at the (teacher) timestep indices. lambda_t = _extract_into_tensor( timestep_loss_weights, timestep_indices, (bsz,) + (1,) * (clean_images.ndim - 1) ) loss = lambda_t * ( torch.sqrt( (student_denoise_output.float() - teacher_denoise_output.float()) ** 2 + args.huber_c**2 ) - args.huber_c ) loss = loss.mean() else: raise ValueError( f"Unsupported prediction type: {args.prediction_type}. Currently, only `sample` is supported." ) # 8. Backpropagate on the consistency training loss accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: # 9. Update teacher_unet and ema_unet parameters using unet's parameters. teacher_unet.load_state_dict(unet.state_dict()) if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 if accelerator.is_main_process: # 10. Recalculate quantities depending on the global step, if necessary. new_discretization_steps = get_discretization_steps( global_step, args.max_train_steps, s_0=args.discretization_s_0, s_1=args.discretization_s_1, constant=args.constant_discretization_steps, ) current_skip_steps = get_skip_steps(global_step, initial_skip=args.skip_steps) if current_skip_steps >= new_discretization_steps: raise ValueError( f"The current skip steps is {current_skip_steps}, but should be smaller than the current" f" number of discretization steps {new_discretization_steps}." ) if new_discretization_steps != current_discretization_steps: ( noise_scheduler, current_timesteps, timestep_weights, timestep_loss_weights, ) = recalculate_num_discretization_step_values(new_discretization_steps, current_skip_steps) current_discretization_steps = new_discretization_steps if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if global_step % args.validation_steps == 0: # NOTE: since we do not use EMA for the teacher model, the teacher parameters and student # parameters are the same at this point in time log_validation(unet, noise_scheduler, args, accelerator, weight_dtype, global_step, "teacher") # teacher_unet.to(dtype=teacher_dtype) if args.use_ema: # Store the student unet weights and load the EMA weights. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( unet, noise_scheduler, args, accelerator, weight_dtype, global_step, "ema_student", ) # Restore student unet weights ema_unet.restore(unet.parameters()) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} if args.use_ema: logs["ema_decay"] = ema_unet.cur_decay_value progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # progress_bar.close() accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) pipeline = ConsistencyModelPipeline(unet=unet, scheduler=noise_scheduler) pipeline.save_pretrained(args.output_dir) # If using EMA, save EMA weights as well. if args.use_ema: ema_unet.copy_to(unet.parameters()) unet.save_pretrained(os.path.join(args.output_dir, "ema_unet")) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/consistency_training/train_cm_ct_unconditional.py/0
{ "file_path": "diffusers/examples/research_projects/consistency_training/train_cm_ct_unconditional.py", "repo_id": "diffusers", "token_count": 26288 }
123
# GLIGEN: Open-Set Grounded Text-to-Image Generation These scripts contain the code to prepare the grounding data and train the GLIGEN model on COCO dataset. ### Install the requirements ```bash conda create -n diffusers python==3.10 conda activate diffusers pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### Prepare the training data If you want to make your own grounding data, you need to install the requirements. I used [RAM](https://github.com/xinyu1205/recognize-anything) to tag images, [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO/issues?q=refer) to detect objects, and [BLIP2](https://huggingface.co/docs/transformers/en/model_doc/blip-2) to caption instances. Only RAM needs to be installed manually: ```bash pip install git+https://github.com/xinyu1205/recognize-anything.git --no-deps ``` Download the pre-trained model: ```bash huggingface-cli download --resume-download xinyu1205/recognize_anything_model ram_swin_large_14m.pth huggingface-cli download --resume-download IDEA-Research/grounding-dino-base huggingface-cli download --resume-download Salesforce/blip2-flan-t5-xxl huggingface-cli download --resume-download clip-vit-large-patch14 huggingface-cli download --resume-download masterful/gligen-1-4-generation-text-box ``` Make the training data on 8 GPUs: ```bash torchrun --master_port 17673 --nproc_per_node=8 make_datasets.py \ --data_root /mnt/workspace/workgroup/zhizhonghuang/dataset/COCO/train2017 \ --save_root /root/gligen_data \ --ram_checkpoint /root/.cache/huggingface/hub/models--xinyu1205--recognize_anything_model/snapshots/ebc52dc741e86466202a5ab8ab22eae6e7d48bf1/ram_swin_large_14m.pth ``` You can download the COCO training data from ```bash huggingface-cli download --resume-download Hzzone/GLIGEN_COCO coco_train2017.pth ``` It's in the format of ```json [ ... { 'file_path': Path, 'annos': [ { 'caption': Instance Caption, 'bbox': bbox in xyxy, 'text_embeddings_before_projection': CLIP text embedding before linear projection } ] } ... ] ``` ### Training commands The training script is heavily based on https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py ```bash accelerate launch train_gligen_text.py \ --data_path /root/data/zhizhonghuang/coco_train2017.pth \ --image_path /mnt/workspace/workgroup/zhizhonghuang/dataset/COCO/train2017 \ --train_batch_size 8 \ --max_train_steps 100000 \ --checkpointing_steps 1000 \ --checkpoints_total_limit 10 \ --learning_rate 5e-5 \ --dataloader_num_workers 16 \ --mixed_precision fp16 \ --report_to wandb \ --tracker_project_name gligen \ --output_dir /root/data/zhizhonghuang/ckpt/GLIGEN_Text_Retrain_COCO ``` I trained the model on 8 A100 GPUs for about 11 hours (at least 24GB GPU memory). The generated images will follow the layout possibly at 50k iterations. Note that although the pre-trained GLIGEN model has been loaded, the parameters of `fuser` and `position_net` have been reset (see line 420 in `train_gligen_text.py`) The trained model can be downloaded from ```bash huggingface-cli download --resume-download Hzzone/GLIGEN_COCO config.json diffusion_pytorch_model.safetensors ``` You can run `demo.ipynb` to visualize the generated images. Example prompts: ```python prompt = 'A realistic image of landscape scene depicting a green car parking on the left of a blue truck, with a red air balloon and a bird in the sky' boxes = [[0.041015625, 0.548828125, 0.453125, 0.859375], [0.525390625, 0.552734375, 0.93359375, 0.865234375], [0.12890625, 0.015625, 0.412109375, 0.279296875], [0.578125, 0.08203125, 0.857421875, 0.27734375]] gligen_phrases = ['a green car', 'a blue truck', 'a red air balloon', 'a bird'] ``` Example images: ![alt text](generated-images-100000-00.png) ### Citation ``` @article{li2023gligen, title={GLIGEN: Open-Set Grounded Text-to-Image Generation}, author={Li, Yuheng and Liu, Haotian and Wu, Qingyang and Mu, Fangzhou and Yang, Jianwei and Gao, Jianfeng and Li, Chunyuan and Lee, Yong Jae}, journal={CVPR}, year={2023} } ```
diffusers/examples/research_projects/gligen/README.md/0
{ "file_path": "diffusers/examples/research_projects/gligen/README.md", "repo_id": "diffusers", "token_count": 1748 }
124
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNet2DConditionModel def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "-m", "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "-c", "--caption", type=str, default="robotic cat with wings", help="Text used to generate images.", ) parser.add_argument( "-n", "--images_num", type=int, default=4, help="How much images to generate.", ) parser.add_argument( "-s", "--seed", type=int, default=42, help="Seed for random process.", ) parser.add_argument( "-ci", "--cuda_id", type=int, default=0, help="cuda_id.", ) args = parser.parse_args() return args def image_grid(imgs, rows, cols): if not len(imgs) == rows * cols: raise ValueError("The specified number of rows and columns are not correct.") w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid def generate_images( pipeline, prompt="robotic cat with wings", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42, ): generator = torch.Generator(pipeline.device).manual_seed(seed) images = pipeline( prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=generator, num_images_per_prompt=num_images_per_prompt, ).images _rows = int(math.sqrt(num_images_per_prompt)) grid = image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows) return grid, images args = parse_args() # Load models and create wrapper for stable diffusion tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) pipeline.safety_checker = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")): unet = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, "unet", unet) else: unet = unet.to(torch.device("cuda", args.cuda_id)) pipeline = pipeline.to(unet.device) grid, images = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split())))) dirname = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/text2images.py/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/text2images.py", "repo_id": "diffusers", "token_count": 1518 }
125
import argparse import logging import math import os import random from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import PIL import torch import torch.utils.checkpoint import transformers from flax import jax_utils from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, FlaxPNDMScheduler, FlaxStableDiffusionPipeline, FlaxUNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.14.0.dev0") logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=True, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--use_auth_token", action="store_true", help=( "Will use the token generated when running `huggingface-cli login` (necessary to use this script with" " private models)." ), ) parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng): if model.config.vocab_size == new_num_tokens or new_num_tokens is None: return model.config.vocab_size = new_num_tokens params = model.params old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"] old_num_tokens, emb_dim = old_embeddings.shape initializer = jax.nn.initializers.normal() new_embeddings = initializer(rng, (new_num_tokens, emb_dim)) new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings) new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id]) params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings model.params = params return model def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() if args.seed is not None: set_seed(args.seed) if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae, vae_params = FlaxAutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") # Create sampling rng rng = jax.random.PRNGKey(args.seed) rng, _ = jax.random.split(rng) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder = resize_token_embeddings( text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng ) original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"] train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) input_ids = torch.stack([example["input_ids"] for example in examples]) batch = {"pixel_values": pixel_values, "input_ids": input_ids} batch = {k: v.numpy() for k, v in batch.items()} return batch total_train_batch_size = args.train_batch_size * jax.local_device_count() train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn ) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) optimizer = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) def create_mask(params, label_fn): def _map(params, mask, label_fn): for k in params: if label_fn(k): mask[k] = "token_embedding" else: if isinstance(params[k], dict): mask[k] = {} _map(params[k], mask[k], label_fn) else: mask[k] = "zero" mask = {} _map(params, mask, label_fn) return mask def zero_grads(): # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491 def init_fn(_): return () def update_fn(updates, state, params=None): return jax.tree_util.tree_map(jnp.zeros_like, updates), () return optax.GradientTransformation(init_fn, update_fn) # Zero out gradients of layers other than the token embedding layer tx = optax.multi_transform( {"token_embedding": optimizer, "zero": zero_grads()}, create_mask(text_encoder.params, lambda s: s == "token_embedding"), ) state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx) noise_scheduler = FlaxDDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 ) noise_scheduler_state = noise_scheduler.create_state() # Initialize our training train_rngs = jax.random.split(rng, jax.local_device_count()) # Define gradient train step fn def train_step(state, vae_params, unet_params, batch, train_rng): dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) def compute_loss(params): vae_outputs = vae.apply( {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) encoder_hidden_states = state.apply_fn( batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True )[0] # Predict the noise residual and compute loss model_pred = unet.apply( {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = (target - model_pred) ** 2 loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) # Keep the token embeddings fixed except the newly added embeddings for the concept, # as we only want to optimize the concept embeddings token_embeds = original_token_embeds.at[placeholder_token_id].set( new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id] ) new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics, new_train_rng # Create parallel version of the train and eval step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) vae_params = jax_utils.replicate(vae_params) unet_params = jax_utils.replicate(unet_params) # Train! num_update_steps_per_epoch = math.ceil(len(train_dataloader)) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_metrics = [] steps_per_epoch = len(train_dataset) // total_train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_dataloader: batch = shard(batch) state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs) train_metrics.append(train_metric) train_step_progress_bar.update(1) global_step += 1 if global_step >= args.max_train_steps: break train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") # Create the pipeline using using the trained modules and save it. if jax.process_index() == 0: scheduler = FlaxPNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", from_pt=True ) pipeline = FlaxStableDiffusionPipeline( text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) pipeline.save_pretrained( args.output_dir, params={ "text_encoder": get_params_to_save(state.params), "vae": get_params_to_save(vae_params), "unet": get_params_to_save(unet_params), "safety_checker": safety_checker.params, }, ) # Also save the newly trained embeddings learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][ placeholder_token_id ] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if __name__ == "__main__": main()
diffusers/examples/research_projects/multi_token_textual_inversion/textual_inversion_flax.py/0
{ "file_path": "diffusers/examples/research_projects/multi_token_textual_inversion/textual_inversion_flax.py", "repo_id": "diffusers", "token_count": 10599 }
126
import inspect from typing import Callable, List, Optional, Union import torch from PIL import Image from retriever import Retriever, normalize_images, preprocess_images from transformers import CLIPImageProcessor, CLIPModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, ImagePipelineOutput, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name class RDMPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Retrieval Augmented Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. clip ([`CLIPModel`]): Frozen CLIP model. Retrieval Augmented Diffusion uses the CLIP model, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, clip: CLIPModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], feature_extractor: CLIPImageProcessor, retriever: Optional[Retriever] = None, ): super().__init__() self.register_modules( vae=vae, clip=clip, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) # Copy from statement here and all the methods we take from stable_diffusion_pipeline self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.retriever = retriever def _encode_prompt(self, prompt): # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] prompt_embeds = self.clip.get_text_features(text_input_ids.to(self.device)) prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True) prompt_embeds = prompt_embeds[:, None, :] return prompt_embeds def _encode_image(self, retrieved_images, batch_size): if len(retrieved_images[0]) == 0: return None for i in range(len(retrieved_images)): retrieved_images[i] = normalize_images(retrieved_images[i]) retrieved_images[i] = preprocess_images(retrieved_images[i], self.feature_extractor).to( self.clip.device, dtype=self.clip.dtype ) _, c, h, w = retrieved_images[0].shape retrieved_images = torch.reshape(torch.cat(retrieved_images, dim=0), (-1, c, h, w)) image_embeddings = self.clip.get_image_features(retrieved_images) image_embeddings = image_embeddings / torch.linalg.norm(image_embeddings, dim=-1, keepdim=True) _, d = image_embeddings.shape image_embeddings = torch.reshape(image_embeddings, (batch_size, -1, d)) return image_embeddings def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def retrieve_images(self, retrieved_images, prompt_embeds, knn=10): if self.retriever is not None: additional_images = self.retriever.retrieve_imgs_batch(prompt_embeds[:, 0].cpu(), knn).total_examples for i in range(len(retrieved_images)): retrieved_images[i] += additional_images[i][self.retriever.config.image_column] return retrieved_images @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], retrieved_images: Optional[List[Image.Image]] = None, height: int = 768, width: int = 768, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: Optional[int] = 1, knn: Optional[int] = 10, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if retrieved_images is not None: retrieved_images = [retrieved_images for _ in range(batch_size)] else: retrieved_images = [[] for _ in range(batch_size)] device = self._execution_device if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt_embeds is None: prompt_embeds = self._encode_prompt(prompt) retrieved_images = self.retrieve_images(retrieved_images, prompt_embeds, knn=knn) image_embeddings = self._encode_image(retrieved_images, batch_size) if image_embeddings is not None: prompt_embeds = torch.cat([prompt_embeds, image_embeddings], dim=1) # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_embeddings = torch.zeros_like(prompt_embeds).to(prompt_embeds.device) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([uncond_embeddings, prompt_embeds]) # get the initial random noise unless the user supplied it num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=[True] * image.shape[0] ) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/research_projects/rdm/pipeline_rdm.py/0
{ "file_path": "diffusers/examples/research_projects/rdm/pipeline_rdm.py", "repo_id": "diffusers", "token_count": 7202 }
127
<jupyter_start><jupyter_text>Running Stable Diffusion 3 (SD3) DreamBooth LoRA training under 16GB GPU VRAM Install Dependencies<jupyter_code>!pip install -q -U git+https://github.com/huggingface/diffusers !pip install -q -U \ transformers \ accelerate \ wandb \ bitsandbytes \ peft<jupyter_output><empty_output><jupyter_text>As SD3 is gated, before using it with diffusers you first need to go to the [Stable Diffusion 3 Medium Hugging Face page](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in:<jupyter_code>!huggingface-cli login<jupyter_output><empty_output><jupyter_text>Clone `diffusers`<jupyter_code>!git clone https://github.com/huggingface/diffusers %cd diffusers/examples/research_projects/sd3_lora_colab<jupyter_output><empty_output><jupyter_text>Download instance data images<jupyter_code>from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) !rm -rf dog/.huggingface<jupyter_output><empty_output><jupyter_text>Compute embeddingsHere we are using the default instance prompt "a photo of sks dog". But you can configure this. Refer to the `compute_embeddings.py` script for details on other supported arguments.<jupyter_code>!python compute_embeddings.py<jupyter_output><empty_output><jupyter_text>Clear memory<jupyter_code>import torch import gc def flush(): torch.cuda.empty_cache() gc.collect() flush()<jupyter_output><empty_output><jupyter_text>Train!<jupyter_code>!accelerate launch train_dreambooth_lora_sd3_miniature.py \ --pretrained_model_name_or_path="stabilityai/stable-diffusion-3-medium-diffusers" \ --instance_data_dir="dog" \ --data_df_path="sample_embeddings.parquet" \ --output_dir="trained-sd3-lora-miniature" \ --mixed_precision="fp16" \ --instance_prompt="a photo of sks dog" \ --resolution=1024 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --seed="0"<jupyter_output><empty_output><jupyter_text>Training will take about an hour to complete depending on the length of your dataset. Inference<jupyter_code>flush() from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16 ) lora_output_path = "trained-sd3-lora-miniature" pipeline.load_lora_weights("trained-sd3-lora-miniature") pipeline.enable_sequential_cpu_offload() image = pipeline("a photo of sks dog in a bucket").images[0] image.save("bucket_dog.png")<jupyter_output><empty_output>
diffusers/examples/research_projects/sd3_lora_colab/sd3_dreambooth_lora_16gb.ipynb/0
{ "file_path": "diffusers/examples/research_projects/sd3_lora_colab/sd3_dreambooth_lora_16gb.ipynb", "repo_id": "diffusers", "token_count": 1077 }
128
import argparse import json import torch from diffusers import AutoencoderKL, DDPMPipeline, DDPMScheduler, UNet2DModel, VQModel def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("block.", "resnets.") new_item = new_item.replace("conv_shorcut", "conv1") new_item = new_item.replace("in_shortcut", "conv_shortcut") new_item = new_item.replace("temb_proj", "time_emb_proj") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0, in_mid=False): mapping = [] for old_item in old_list: new_item = old_item # In `model.mid`, the layer is called `attn`. if not in_mid: new_item = new_item.replace("attn", "attentions") new_item = new_item.replace(".k.", ".key.") new_item = new_item.replace(".v.", ".value.") new_item = new_item.replace(".q.", ".query.") new_item = new_item.replace("proj_out", "proj_attn") new_item = new_item.replace("norm", "group_norm") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." if attention_paths_to_split is not None: if config is None: raise ValueError("Please specify the config if setting 'attention_paths_to_split' to 'True'.") for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config.get("num_head_channels", 1) // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape).squeeze() checkpoint[path_map["key"]] = key.reshape(target_shape).squeeze() checkpoint[path_map["value"]] = value.reshape(target_shape).squeeze() for path in paths: new_path = path["new"] if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue new_path = new_path.replace("down.", "down_blocks.") new_path = new_path.replace("up.", "up_blocks.") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) if "attentions" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]].squeeze() else: checkpoint[new_path] = old_checkpoint[path["old"]] def convert_ddpm_checkpoint(checkpoint, config): """ Takes a state dict and a config, and returns a converted checkpoint. """ new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["temb.dense.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["temb.dense.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["temb.dense.1.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["temb.dense.1.bias"] new_checkpoint["conv_norm_out.weight"] = checkpoint["norm_out.weight"] new_checkpoint["conv_norm_out.bias"] = checkpoint["norm_out.bias"] new_checkpoint["conv_in.weight"] = checkpoint["conv_in.weight"] new_checkpoint["conv_in.bias"] = checkpoint["conv_in.bias"] new_checkpoint["conv_out.weight"] = checkpoint["conv_out.weight"] new_checkpoint["conv_out.bias"] = checkpoint["conv_out.bias"] num_down_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "down" in layer}) down_blocks = { layer_id: [key for key in checkpoint if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } num_up_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "up" in layer}) up_blocks = {layer_id: [key for key in checkpoint if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)} for i in range(num_down_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) if any("downsample" in layer for layer in down_blocks[i]): new_checkpoint[f"down_blocks.{i}.downsamplers.0.conv.weight"] = checkpoint[ f"down.{i}.downsample.op.weight" ] new_checkpoint[f"down_blocks.{i}.downsamplers.0.conv.bias"] = checkpoint[f"down.{i}.downsample.op.bias"] # new_checkpoint[f'down_blocks.{i}.downsamplers.0.op.weight'] = checkpoint[f'down.{i}.downsample.conv.weight'] # new_checkpoint[f'down_blocks.{i}.downsamplers.0.op.bias'] = checkpoint[f'down.{i}.downsample.conv.bias'] if any("block" in layer for layer in down_blocks[i]): num_blocks = len( {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in down_blocks[i] if "block" in layer} ) blocks = { layer_id: [key for key in down_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks) } if num_blocks > 0: for j in range(config["layers_per_block"]): paths = renew_resnet_paths(blocks[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint) if any("attn" in layer for layer in down_blocks[i]): num_attn = len( {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in down_blocks[i] if "attn" in layer} ) attns = { layer_id: [key for key in down_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks) } if num_attn > 0: for j in range(config["layers_per_block"]): paths = renew_attention_paths(attns[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint, config=config) mid_block_1_layers = [key for key in checkpoint if "mid.block_1" in key] mid_block_2_layers = [key for key in checkpoint if "mid.block_2" in key] mid_attn_1_layers = [key for key in checkpoint if "mid.attn_1" in key] # Mid new 2 paths = renew_resnet_paths(mid_block_1_layers) assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_1", "new": "resnets.0"}], ) paths = renew_resnet_paths(mid_block_2_layers) assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_2", "new": "resnets.1"}], ) paths = renew_attention_paths(mid_attn_1_layers, in_mid=True) assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "attn_1", "new": "attentions.0"}], ) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i if any("upsample" in layer for layer in up_blocks[i]): new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[ f"up.{i}.upsample.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[f"up.{i}.upsample.conv.bias"] if any("block" in layer for layer in up_blocks[i]): num_blocks = len( {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in up_blocks[i] if "block" in layer} ) blocks = { layer_id: [key for key in up_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks) } if num_blocks > 0: for j in range(config["layers_per_block"] + 1): replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"} paths = renew_resnet_paths(blocks[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices]) if any("attn" in layer for layer in up_blocks[i]): num_attn = len( {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in up_blocks[i] if "attn" in layer} ) attns = { layer_id: [key for key in up_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks) } if num_attn > 0: for j in range(config["layers_per_block"] + 1): replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"} paths = renew_attention_paths(attns[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices]) new_checkpoint = {k.replace("mid_new_2", "mid_block"): v for k, v in new_checkpoint.items()} return new_checkpoint def convert_vq_autoenc_checkpoint(checkpoint, config): """ Takes a state dict and a config, and returns a converted checkpoint. """ new_checkpoint = {} new_checkpoint["encoder.conv_norm_out.weight"] = checkpoint["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = checkpoint["encoder.norm_out.bias"] new_checkpoint["encoder.conv_in.weight"] = checkpoint["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = checkpoint["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = checkpoint["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = checkpoint["encoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = checkpoint["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = checkpoint["decoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = checkpoint["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = checkpoint["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = checkpoint["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = checkpoint["decoder.conv_out.bias"] num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in checkpoint if "down" in layer}) down_blocks = { layer_id: [key for key in checkpoint if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in checkpoint if "up" in layer}) up_blocks = {layer_id: [key for key in checkpoint if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)} for i in range(num_down_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) if any("downsample" in layer for layer in down_blocks[i]): new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = checkpoint[ f"encoder.down.{i}.downsample.conv.weight" ] new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = checkpoint[ f"encoder.down.{i}.downsample.conv.bias" ] if any("block" in layer for layer in down_blocks[i]): num_blocks = len( {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in down_blocks[i] if "block" in layer} ) blocks = { layer_id: [key for key in down_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks) } if num_blocks > 0: for j in range(config["layers_per_block"]): paths = renew_resnet_paths(blocks[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint) if any("attn" in layer for layer in down_blocks[i]): num_attn = len( {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in down_blocks[i] if "attn" in layer} ) attns = { layer_id: [key for key in down_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks) } if num_attn > 0: for j in range(config["layers_per_block"]): paths = renew_attention_paths(attns[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint, config=config) mid_block_1_layers = [key for key in checkpoint if "mid.block_1" in key] mid_block_2_layers = [key for key in checkpoint if "mid.block_2" in key] mid_attn_1_layers = [key for key in checkpoint if "mid.attn_1" in key] # Mid new 2 paths = renew_resnet_paths(mid_block_1_layers) assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_1", "new": "resnets.0"}], ) paths = renew_resnet_paths(mid_block_2_layers) assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_2", "new": "resnets.1"}], ) paths = renew_attention_paths(mid_attn_1_layers, in_mid=True) assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "attn_1", "new": "attentions.0"}], ) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i if any("upsample" in layer for layer in up_blocks[i]): new_checkpoint[f"decoder.up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[ f"decoder.up.{i}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[ f"decoder.up.{i}.upsample.conv.bias" ] if any("block" in layer for layer in up_blocks[i]): num_blocks = len( {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in up_blocks[i] if "block" in layer} ) blocks = { layer_id: [key for key in up_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks) } if num_blocks > 0: for j in range(config["layers_per_block"] + 1): replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"} paths = renew_resnet_paths(blocks[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices]) if any("attn" in layer for layer in up_blocks[i]): num_attn = len( {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in up_blocks[i] if "attn" in layer} ) attns = { layer_id: [key for key in up_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks) } if num_attn > 0: for j in range(config["layers_per_block"] + 1): replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"} paths = renew_attention_paths(attns[j]) assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices]) new_checkpoint = {k.replace("mid_new_2", "mid_block"): v for k, v in new_checkpoint.items()} new_checkpoint["quant_conv.weight"] = checkpoint["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = checkpoint["quant_conv.bias"] if "quantize.embedding.weight" in checkpoint: new_checkpoint["quantize.embedding.weight"] = checkpoint["quantize.embedding.weight"] new_checkpoint["post_quant_conv.weight"] = checkpoint["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = checkpoint["post_quant_conv.bias"] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() checkpoint = torch.load(args.checkpoint_path) with open(args.config_file) as f: config = json.loads(f.read()) # unet case key_prefix_set = {key.split(".")[0] for key in checkpoint.keys()} if "encoder" in key_prefix_set and "decoder" in key_prefix_set: converted_checkpoint = convert_vq_autoenc_checkpoint(checkpoint, config) else: converted_checkpoint = convert_ddpm_checkpoint(checkpoint, config) if "ddpm" in config: del config["ddpm"] if config["_class_name"] == "VQModel": model = VQModel(**config) model.load_state_dict(converted_checkpoint) model.save_pretrained(args.dump_path) elif config["_class_name"] == "AutoencoderKL": model = AutoencoderKL(**config) model.load_state_dict(converted_checkpoint) model.save_pretrained(args.dump_path) else: model = UNet2DModel(**config) model.load_state_dict(converted_checkpoint) scheduler = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) pipe = DDPMPipeline(unet=model, scheduler=scheduler) pipe.save_pretrained(args.dump_path)
diffusers/scripts/convert_ddpm_original_checkpoint_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ddpm_original_checkpoint_to_diffusers.py", "repo_id": "diffusers", "token_count": 8490 }
129
# coding=utf-8 # Copyright 2024, Haofan Wang, Qixun Wang, All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the LoRA's safetensors checkpoints.""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def convert(base_model_path, checkpoint_path, LORA_PREFIX_UNET, LORA_PREFIX_TEXT_ENCODER, alpha): # load base model pipeline = StableDiffusionPipeline.from_pretrained(base_model_path, torch_dtype=torch.float32) # load LoRA weight from .safetensors state_dict = load_file(checkpoint_path) visited = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: layer_infos = key.split(".")[0].split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_") curr_layer = pipeline.text_encoder else: layer_infos = key.split(".")[0].split(LORA_PREFIX_UNET + "_")[-1].split("_") curr_layer = pipeline.unet # find the target layer temp_name = layer_infos.pop(0) while len(layer_infos) > -1: try: curr_layer = curr_layer.__getattr__(temp_name) if len(layer_infos) > 0: temp_name = layer_infos.pop(0) elif len(layer_infos) == 0: break except Exception: if len(temp_name) > 0: temp_name += "_" + layer_infos.pop(0) else: temp_name = layer_infos.pop(0) pair_keys = [] if "lora_down" in key: pair_keys.append(key.replace("lora_down", "lora_up")) pair_keys.append(key) else: pair_keys.append(key) pair_keys.append(key.replace("lora_up", "lora_down")) # update weight if len(state_dict[pair_keys[0]].shape) == 4: weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32) weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32) curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3) else: weight_up = state_dict[pair_keys[0]].to(torch.float32) weight_down = state_dict[pair_keys[1]].to(torch.float32) curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down) # update visited list for item in pair_keys: visited.append(item) return pipeline if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") args = parser.parse_args() base_model_path = args.base_model_path checkpoint_path = args.checkpoint_path dump_path = args.dump_path lora_prefix_unet = args.lora_prefix_unet lora_prefix_text_encoder = args.lora_prefix_text_encoder alpha = args.alpha pipe = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) pipe = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
diffusers/scripts/convert_lora_safetensor_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_lora_safetensor_to_diffusers.py", "repo_id": "diffusers", "token_count": 2130 }
130
# Run this script to convert the Stable Cascade model weights to a diffusers pipeline. import argparse import json import os from contextlib import nullcontext import torch from safetensors.torch import load_file from transformers import ( AutoTokenizer, T5EncoderModel, ) from diffusers import ( AutoencoderOobleck, CosineDPMSolverMultistepScheduler, StableAudioDiTModel, StableAudioPipeline, StableAudioProjectionModel, ) from diffusers.models.modeling_utils import load_model_dict_into_meta from diffusers.utils import is_accelerate_available if is_accelerate_available(): from accelerate import init_empty_weights def convert_stable_audio_state_dict_to_diffusers(state_dict, num_autoencoder_layers=5): projection_model_state_dict = { k.replace("conditioner.conditioners.", "").replace("embedder.embedding", "time_positional_embedding"): v for (k, v) in state_dict.items() if "conditioner.conditioners" in k } # NOTE: we assume here that there's no projection layer from the text encoder to the latent space, script should be adapted a bit if there is. for key, value in list(projection_model_state_dict.items()): new_key = key.replace("seconds_start", "start_number_conditioner").replace( "seconds_total", "end_number_conditioner" ) projection_model_state_dict[new_key] = projection_model_state_dict.pop(key) model_state_dict = {k.replace("model.model.", ""): v for (k, v) in state_dict.items() if "model.model." in k} for key, value in list(model_state_dict.items()): # attention layers new_key = ( key.replace("transformer.", "") .replace("layers", "transformer_blocks") .replace("self_attn", "attn1") .replace("cross_attn", "attn2") .replace("ff.ff", "ff.net") ) new_key = ( new_key.replace("pre_norm", "norm1") .replace("cross_attend_norm", "norm2") .replace("ff_norm", "norm3") .replace("to_out", "to_out.0") ) new_key = new_key.replace("gamma", "weight").replace("beta", "bias") # replace layernorm # other layers new_key = ( new_key.replace("project", "proj") .replace("to_timestep_embed", "timestep_proj") .replace("timestep_features", "time_proj") .replace("to_global_embed", "global_proj") .replace("to_cond_embed", "cross_attention_proj") ) # we're using diffusers implementation of time_proj (GaussianFourierProjection) which creates a 1D tensor if new_key == "time_proj.weight": model_state_dict[key] = model_state_dict[key].squeeze(1) if "to_qkv" in new_key: q, k, v = torch.chunk(model_state_dict.pop(key), 3, dim=0) model_state_dict[new_key.replace("qkv", "q")] = q model_state_dict[new_key.replace("qkv", "k")] = k model_state_dict[new_key.replace("qkv", "v")] = v elif "to_kv" in new_key: k, v = torch.chunk(model_state_dict.pop(key), 2, dim=0) model_state_dict[new_key.replace("kv", "k")] = k model_state_dict[new_key.replace("kv", "v")] = v else: model_state_dict[new_key] = model_state_dict.pop(key) autoencoder_state_dict = { k.replace("pretransform.model.", "").replace("coder.layers.0", "coder.conv1"): v for (k, v) in state_dict.items() if "pretransform.model." in k } for key, _ in list(autoencoder_state_dict.items()): new_key = key if "coder.layers" in new_key: # get idx of the layer idx = int(new_key.split("coder.layers.")[1].split(".")[0]) new_key = new_key.replace(f"coder.layers.{idx}", f"coder.block.{idx-1}") if "encoder" in new_key: for i in range(3): new_key = new_key.replace(f"block.{idx-1}.layers.{i}", f"block.{idx-1}.res_unit{i+1}") new_key = new_key.replace(f"block.{idx-1}.layers.3", f"block.{idx-1}.snake1") new_key = new_key.replace(f"block.{idx-1}.layers.4", f"block.{idx-1}.conv1") else: for i in range(2, 5): new_key = new_key.replace(f"block.{idx-1}.layers.{i}", f"block.{idx-1}.res_unit{i-1}") new_key = new_key.replace(f"block.{idx-1}.layers.0", f"block.{idx-1}.snake1") new_key = new_key.replace(f"block.{idx-1}.layers.1", f"block.{idx-1}.conv_t1") new_key = new_key.replace("layers.0.beta", "snake1.beta") new_key = new_key.replace("layers.0.alpha", "snake1.alpha") new_key = new_key.replace("layers.2.beta", "snake2.beta") new_key = new_key.replace("layers.2.alpha", "snake2.alpha") new_key = new_key.replace("layers.1.bias", "conv1.bias") new_key = new_key.replace("layers.1.weight_", "conv1.weight_") new_key = new_key.replace("layers.3.bias", "conv2.bias") new_key = new_key.replace("layers.3.weight_", "conv2.weight_") if idx == num_autoencoder_layers + 1: new_key = new_key.replace(f"block.{idx-1}", "snake1") elif idx == num_autoencoder_layers + 2: new_key = new_key.replace(f"block.{idx-1}", "conv2") else: new_key = new_key value = autoencoder_state_dict.pop(key) if "snake" in new_key: value = value.unsqueeze(0).unsqueeze(-1) if new_key in autoencoder_state_dict: raise ValueError(f"{new_key} already in state dict.") autoencoder_state_dict[new_key] = value return model_state_dict, projection_model_state_dict, autoencoder_state_dict parser = argparse.ArgumentParser(description="Convert Stable Audio 1.0 model weights to a diffusers pipeline") parser.add_argument("--model_folder_path", type=str, help="Location of Stable Audio weights and config") parser.add_argument("--use_safetensors", action="store_true", help="Use SafeTensors for conversion") parser.add_argument( "--save_directory", type=str, default="./tmp/stable-audio-1.0", help="Directory to save a pipeline to. Will be created if it doesn't exist.", ) parser.add_argument( "--repo_id", type=str, default="stable-audio-1.0", help="Hub organization to save the pipelines to", ) parser.add_argument("--push_to_hub", action="store_true", help="Push to hub") parser.add_argument("--variant", type=str, help="Set to bf16 to save bfloat16 weights") args = parser.parse_args() checkpoint_path = ( os.path.join(args.model_folder_path, "model.safetensors") if args.use_safetensors else os.path.join(args.model_folder_path, "model.ckpt") ) config_path = os.path.join(args.model_folder_path, "model_config.json") device = "cpu" if args.variant == "bf16": dtype = torch.bfloat16 else: dtype = torch.float32 with open(config_path) as f_in: config_dict = json.load(f_in) conditioning_dict = { conditioning["id"]: conditioning["config"] for conditioning in config_dict["model"]["conditioning"]["configs"] } t5_model_config = conditioning_dict["prompt"] # T5 Text encoder text_encoder = T5EncoderModel.from_pretrained(t5_model_config["t5_model_name"]) tokenizer = AutoTokenizer.from_pretrained( t5_model_config["t5_model_name"], truncation=True, model_max_length=t5_model_config["max_length"] ) # scheduler scheduler = CosineDPMSolverMultistepScheduler( sigma_min=0.3, sigma_max=500, solver_order=2, prediction_type="v_prediction", sigma_data=1.0, sigma_schedule="exponential", ) ctx = init_empty_weights if is_accelerate_available() else nullcontext if args.use_safetensors: orig_state_dict = load_file(checkpoint_path, device=device) else: orig_state_dict = torch.load(checkpoint_path, map_location=device) model_config = config_dict["model"]["diffusion"]["config"] model_state_dict, projection_model_state_dict, autoencoder_state_dict = convert_stable_audio_state_dict_to_diffusers( orig_state_dict ) with ctx(): projection_model = StableAudioProjectionModel( text_encoder_dim=text_encoder.config.d_model, conditioning_dim=config_dict["model"]["conditioning"]["cond_dim"], min_value=conditioning_dict["seconds_start"][ "min_val" ], # assume `seconds_start` and `seconds_total` have the same min / max values. max_value=conditioning_dict["seconds_start"][ "max_val" ], # assume `seconds_start` and `seconds_total` have the same min / max values. ) if is_accelerate_available(): load_model_dict_into_meta(projection_model, projection_model_state_dict) else: projection_model.load_state_dict(projection_model_state_dict) attention_head_dim = model_config["embed_dim"] // model_config["num_heads"] with ctx(): model = StableAudioDiTModel( sample_size=int(config_dict["sample_size"]) / int(config_dict["model"]["pretransform"]["config"]["downsampling_ratio"]), in_channels=model_config["io_channels"], num_layers=model_config["depth"], attention_head_dim=attention_head_dim, num_key_value_attention_heads=model_config["cond_token_dim"] // attention_head_dim, num_attention_heads=model_config["num_heads"], out_channels=model_config["io_channels"], cross_attention_dim=model_config["cond_token_dim"], time_proj_dim=256, global_states_input_dim=model_config["global_cond_dim"], cross_attention_input_dim=model_config["cond_token_dim"], ) if is_accelerate_available(): load_model_dict_into_meta(model, model_state_dict) else: model.load_state_dict(model_state_dict) autoencoder_config = config_dict["model"]["pretransform"]["config"] with ctx(): autoencoder = AutoencoderOobleck( encoder_hidden_size=autoencoder_config["encoder"]["config"]["channels"], downsampling_ratios=autoencoder_config["encoder"]["config"]["strides"], decoder_channels=autoencoder_config["decoder"]["config"]["channels"], decoder_input_channels=autoencoder_config["decoder"]["config"]["latent_dim"], audio_channels=autoencoder_config["io_channels"], channel_multiples=autoencoder_config["encoder"]["config"]["c_mults"], sampling_rate=config_dict["sample_rate"], ) if is_accelerate_available(): load_model_dict_into_meta(autoencoder, autoencoder_state_dict) else: autoencoder.load_state_dict(autoencoder_state_dict) # Prior pipeline pipeline = StableAudioPipeline( transformer=model, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=scheduler, vae=autoencoder, projection_model=projection_model, ) pipeline.to(dtype).save_pretrained( args.save_directory, repo_id=args.repo_id, push_to_hub=args.push_to_hub, variant=args.variant )
diffusers/scripts/convert_stable_audio.py/0
{ "file_path": "diffusers/scripts/convert_stable_audio.py", "repo_id": "diffusers", "token_count": 4812 }
131
import random import torch from huggingface_hub import HfApi from diffusers import UNet2DModel api = HfApi() results = {} # fmt: off results["google_ddpm_cifar10_32"] = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) results["google_ddpm_ema_bedroom_256"] = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) results["CompVis_ldm_celebahq_256"] = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) results["google_ncsnpp_ffhq_1024"] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) results["google_ncsnpp_bedroom_256"] = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) results["google_ncsnpp_celebahq_256"] = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) results["google_ncsnpp_church_256"] = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) results["google_ncsnpp_ffhq_256"] = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) results["google_ddpm_cat_256"] = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) results["google_ddpm_celebahq_256"] = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) results["google_ddpm_ema_celebahq_256"] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) results["google_ddpm_church_256"] = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) results["google_ddpm_bedroom_256"] = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) results["google_ddpm_ema_church_256"] = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) results["google_ddpm_ema_cat_256"] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on models = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.id == "CompVis/ldm-celebahq-256": local_checkpoint = "/home/patrick/google_checkpoints/" + mod.id.split("/")[-1] print(f"Started running {mod.id}!!!") if mod.id.startswith("CompVis"): model = UNet2DModel.from_pretrained(local_checkpoint, subfolder="unet") else: model = UNet2DModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) noise = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) time_step = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): logits = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.id.split("/")).split("-"))], atol=1e-3 ) print(f"{mod.id} has passed successfully!!!")
diffusers/scripts/generate_logits.py/0
{ "file_path": "diffusers/scripts/generate_logits.py", "repo_id": "diffusers", "token_count": 3530 }
132
from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available def text_encoder_lora_state_dict(text_encoder): deprecate( "text_encoder_load_state_dict in `models`", "0.27.0", "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", ) state_dict = {} for name, module in text_encoder_attn_modules(text_encoder): for k, v in module.q_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v for k, v in module.k_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v for k, v in module.v_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v for k, v in module.out_proj.lora_linear_layer.state_dict().items(): state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v return state_dict if is_transformers_available(): def text_encoder_attn_modules(text_encoder): deprecate( "text_encoder_attn_modules in `models`", "0.27.0", "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", ) from transformers import CLIPTextModel, CLIPTextModelWithProjection attn_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for i, layer in enumerate(text_encoder.text_model.encoder.layers): name = f"text_model.encoder.layers.{i}.self_attn" mod = layer.self_attn attn_modules.append((name, mod)) else: raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}") return attn_modules _import_structure = {} if is_torch_available(): _import_structure["single_file_model"] = ["FromOriginalModelMixin"] _import_structure["unet"] = ["UNet2DConditionLoadersMixin"] _import_structure["utils"] = ["AttnProcsLayers"] if is_transformers_available(): _import_structure["single_file"] = ["FromSingleFileMixin"] _import_structure["lora_pipeline"] = [ "AmusedLoraLoaderMixin", "StableDiffusionLoraLoaderMixin", "SD3LoraLoaderMixin", "StableDiffusionXLLoraLoaderMixin", "LoraLoaderMixin", "FluxLoraLoaderMixin", ] _import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"] _import_structure["ip_adapter"] = ["IPAdapterMixin"] _import_structure["peft"] = ["PeftAdapterMixin"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if is_torch_available(): from .single_file_model import FromOriginalModelMixin from .unet import UNet2DConditionLoadersMixin from .utils import AttnProcsLayers if is_transformers_available(): from .ip_adapter import IPAdapterMixin from .lora_pipeline import ( AmusedLoraLoaderMixin, FluxLoraLoaderMixin, LoraLoaderMixin, SD3LoraLoaderMixin, StableDiffusionLoraLoaderMixin, StableDiffusionXLLoraLoaderMixin, ) from .single_file import FromSingleFileMixin from .textual_inversion import TextualInversionLoaderMixin from .peft import PeftAdapterMixin else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diffusers/src/diffusers/loaders/__init__.py/0
{ "file_path": "diffusers/src/diffusers/loaders/__init__.py", "repo_id": "diffusers", "token_count": 1789 }
133
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Callable, List, Optional, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import logging from .modeling_utils import ModelMixin logger = logging.get_logger(__name__) class MultiAdapter(ModelMixin): r""" MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to user-assigned weighting. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the model (such as downloading or saving, etc.) Parameters: adapters (`List[T2IAdapter]`, *optional*, defaults to None): A list of `T2IAdapter` model instances. """ def __init__(self, adapters: List["T2IAdapter"]): super(MultiAdapter, self).__init__() self.num_adapter = len(adapters) self.adapters = nn.ModuleList(adapters) if len(adapters) == 0: raise ValueError("Expecting at least one adapter") if len(adapters) == 1: raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`") # The outputs from each adapter are added together with a weight. # This means that the change in dimensions from downsampling must # be the same for all adapters. Inductively, it also means the # downscale_factor and total_downscale_factor must be the same for all # adapters. first_adapter_total_downscale_factor = adapters[0].total_downscale_factor first_adapter_downscale_factor = adapters[0].downscale_factor for idx in range(1, len(adapters)): if ( adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor or adapters[idx].downscale_factor != first_adapter_downscale_factor ): raise ValueError( f"Expecting all adapters to have the same downscaling behavior, but got:\n" f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n" f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n" f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n" f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}" ) self.total_downscale_factor = first_adapter_total_downscale_factor self.downscale_factor = first_adapter_downscale_factor def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]: r""" Args: xs (`torch.Tensor`): (batch, channel, height, width) input images for multiple adapter models concated along dimension 1, `channel` should equal to `num_adapter` * "number of channel of image". adapter_weights (`List[float]`, *optional*, defaults to None): List of floats representing the weight which will be multiply to each adapter's output before adding them together. """ if adapter_weights is None: adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter) else: adapter_weights = torch.tensor(adapter_weights) accume_state = None for x, w, adapter in zip(xs, adapter_weights, self.adapters): features = adapter(x) if accume_state is None: accume_state = features for i in range(len(accume_state)): accume_state[i] = w * accume_state[i] else: for i in range(len(features)): accume_state[i] += w * features[i] return accume_state def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, save_function: Callable = None, safe_serialization: bool = True, variant: Optional[str] = None, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `[`~models.adapter.MultiAdapter.from_pretrained`]` class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model.<variant>.bin. """ idx = 0 model_path_to_save = save_directory for adapter in self.adapters: adapter.save_pretrained( model_path_to_save, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant, ) idx += 1 model_path_to_save = model_path_to_save + f"_{idx}" @classmethod def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a pretrained MultiAdapter model from multiple pre-trained adapter models. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_path (`os.PathLike`): A path to a *directory* containing model weights saved using [`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype will be automatically derived from the model's weights. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading by not initializing the weights and only loading the pre-trained weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, setting this argument to `True` will raise an error. variant (`str`, *optional*): If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is ignored when using `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from `safetensors` weights. If set to `False`, loading will *not* use `safetensors`. """ idx = 0 adapters = [] # load adapter and append to list until no adapter directory exists anymore # first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained` # second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ... model_path_to_load = pretrained_model_path while os.path.isdir(model_path_to_load): adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs) adapters.append(adapter) idx += 1 model_path_to_load = pretrained_model_path + f"_{idx}" logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.") if len(adapters) == 0: raise ValueError( f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." ) return cls(adapters) class T2IAdapter(ModelMixin, ConfigMixin): r""" A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's architecture follows the original implementation of [Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97) and [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235). This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the model (such as downloading or saving, etc.) Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels of Aapter's input(*control image*). Set this parameter to 1 if you're using gray scale image as *control image*. channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will also determine the number of downsample blocks in the Adapter. num_res_blocks (`int`, *optional*, defaults to 2): Number of ResNet blocks in each downsample block. downscale_factor (`int`, *optional*, defaults to 8): A factor that determines the total downscale factor of the Adapter. adapter_type (`str`, *optional*, defaults to `full_adapter`): The type of Adapter to use. Choose either `full_adapter` or `full_adapter_xl` or `light_adapter`. """ @register_to_config def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 8, adapter_type: str = "full_adapter", ): super().__init__() if adapter_type == "full_adapter": self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "full_adapter_xl": self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "light_adapter": self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor) else: raise ValueError( f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or " "'full_adapter_xl' or 'light_adapter'." ) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This function processes the input tensor `x` through the adapter model and returns a list of feature tensors, each representing information extracted at a different scale from the input. The length of the list is determined by the number of downsample blocks in the Adapter, as specified by the `channels` and `num_res_blocks` parameters during initialization. """ return self.adapter(x) @property def total_downscale_factor(self): return self.adapter.total_downscale_factor @property def downscale_factor(self): """The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are not evenly divisible by the downscale_factor then an exception will be raised. """ return self.adapter.unshuffle.downscale_factor # full adapter class FullAdapter(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 8, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) self.body = nn.ModuleList( [ AdapterBlock(channels[0], channels[0], num_res_blocks), *[ AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True) for i in range(1, len(channels)) ], ] ) self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method processes the input tensor `x` through the FullAdapter model and performs operations including pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each capturing information at a different stage of processing within the FullAdapter model. The number of feature tensors in the list is determined by the number of downsample blocks specified during initialization. """ x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features class FullAdapterXL(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 16, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) self.body = [] # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32] for i in range(len(channels)): if i == 1: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks)) elif i == 2: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)) else: self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks)) self.body = nn.ModuleList(self.body) # XL has only one downsampling AdapterBlock. self.total_downscale_factor = downscale_factor * 2 def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors. """ x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features class AdapterBlock(nn.Module): r""" An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and `FullAdapterXL` models. Parameters: in_channels (`int`): Number of channels of AdapterBlock's input. out_channels (`int`): Number of channels of AdapterBlock's output. num_res_blocks (`int`): Number of ResNet blocks in the AdapterBlock. down (`bool`, *optional*, defaults to `False`): Whether to perform downsampling on AdapterBlock's input. """ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): super().__init__() self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) self.in_conv = None if in_channels != out_channels: self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) self.resnets = nn.Sequential( *[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)], ) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes tensor x as input and performs operations downsampling and convolutional layers if the self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of residual blocks to the input tensor. """ if self.downsample is not None: x = self.downsample(x) if self.in_conv is not None: x = self.in_conv(x) x = self.resnets(x) return x class AdapterResnetBlock(nn.Module): r""" An `AdapterResnetBlock` is a helper model that implements a ResNet-like block. Parameters: channels (`int`): Number of channels of AdapterResnetBlock's input and output. """ def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional layer on the input tensor. It returns addition with the input tensor. """ h = self.act(self.block1(x)) h = self.block2(h) return h + x # light adapter class LightAdapter(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280], num_res_blocks: int = 4, downscale_factor: int = 8, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.body = nn.ModuleList( [ LightAdapterBlock(in_channels, channels[0], num_res_blocks), *[ LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True) for i in range(len(channels) - 1) ], LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True), ] ) self.total_downscale_factor = downscale_factor * (2 ** len(channels)) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each feature tensor corresponds to a different level of processing within the LightAdapter. """ x = self.unshuffle(x) features = [] for block in self.body: x = block(x) features.append(x) return features class LightAdapterBlock(nn.Module): r""" A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the `LightAdapter` model. Parameters: in_channels (`int`): Number of channels of LightAdapterBlock's input. out_channels (`int`): Number of channels of LightAdapterBlock's output. num_res_blocks (`int`): Number of LightAdapterResnetBlocks in the LightAdapterBlock. down (`bool`, *optional*, defaults to `False`): Whether to perform downsampling on LightAdapterBlock's input. """ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): super().__init__() mid_channels = out_channels // 4 self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1) self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)]) self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes tensor x as input and performs downsampling if required. Then it applies in convolution layer, a sequence of residual blocks, and out convolutional layer. """ if self.downsample is not None: x = self.downsample(x) x = self.in_conv(x) x = self.resnets(x) x = self.out_conv(x) return x class LightAdapterResnetBlock(nn.Module): """ A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different architecture than `AdapterResnetBlock`. Parameters: channels (`int`): Number of channels of LightAdapterResnetBlock's input and output. """ def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and another convolutional layer and adds it to input tensor. """ h = self.act(self.block1(x)) h = self.block2(h) return h + x
diffusers/src/diffusers/models/adapter.py/0
{ "file_path": "diffusers/src/diffusers/models/adapter.py", "repo_id": "diffusers", "token_count": 10101 }
134
# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..loaders import PeftAdapterMixin from ..models.attention_processor import AttentionProcessor from ..models.modeling_utils import ModelMixin from ..utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers from .controlnet import BaseOutput, zero_module from .embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed from .modeling_outputs import Transformer2DModelOutput from .transformers.transformer_flux import FluxSingleTransformerBlock, FluxTransformerBlock logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class FluxControlNetOutput(BaseOutput): controlnet_block_samples: Tuple[torch.Tensor] controlnet_single_block_samples: Tuple[torch.Tensor] class FluxControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: List[int] = [16, 56, 56], num_mode: int = None, ): super().__init__() self.out_channels = in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) text_time_guidance_cls = ( CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings ) self.time_text_embed = text_time_guidance_cls( embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim ) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = torch.nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ FluxTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ FluxSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_single_layers) ] ) # controlnet_blocks self.controlnet_blocks = nn.ModuleList([]) for _ in range(len(self.transformer_blocks)): self.controlnet_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.controlnet_single_blocks = nn.ModuleList([]) for _ in range(len(self.single_transformer_blocks)): self.controlnet_single_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.union = num_mode is not None if self.union: self.controlnet_mode_embedder = nn.Embedding(num_mode, self.inner_dim) self.controlnet_x_embedder = zero_module(torch.nn.Linear(in_channels, self.inner_dim)) self.gradient_checkpointing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self): r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value @classmethod def from_transformer( cls, transformer, num_layers: int = 4, num_single_layers: int = 10, attention_head_dim: int = 128, num_attention_heads: int = 24, load_weights_from_transformer=True, ): config = transformer.config config["num_layers"] = num_layers config["num_single_layers"] = num_single_layers config["attention_head_dim"] = attention_head_dim config["num_attention_heads"] = num_attention_heads controlnet = cls(**config) if load_weights_from_transformer: controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict()) controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict()) controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict()) controlnet.x_embedder.load_state_dict(transformer.x_embedder.state_dict()) controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False) controlnet.single_transformer_blocks.load_state_dict( transformer.single_transformer_blocks.state_dict(), strict=False ) controlnet.controlnet_x_embedder = zero_module(controlnet.controlnet_x_embedder) return controlnet def forward( self, hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, controlnet_mode: torch.Tensor = None, conditioning_scale: float = 1.0, encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: """ The [`FluxTransformer2DModel`] forward method. Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input `hidden_states`. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. controlnet_mode (`torch.Tensor`): The mode tensor of shape `(batch_size, 1)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected from the embeddings of input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) hidden_states = self.x_embedder(hidden_states) # add hidden_states = hidden_states + self.controlnet_x_embedder(controlnet_cond) timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 else: guidance = None temb = ( self.time_text_embed(timestep, pooled_projections) if guidance is None else self.time_text_embed(timestep, guidance, pooled_projections) ) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if self.union: # union mode if controlnet_mode is None: raise ValueError("`controlnet_mode` cannot be `None` when applying ControlNet-Union") # union mode emb controlnet_mode_emb = self.controlnet_mode_embedder(controlnet_mode) encoder_hidden_states = torch.cat([controlnet_mode_emb, encoder_hidden_states], dim=1) txt_ids = torch.cat([txt_ids[:1], txt_ids], dim=0) if txt_ids.ndim == 3: logger.warning( "Passing `txt_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning( "Passing `img_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) block_samples = () for index_block, block in enumerate(self.transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, encoder_hidden_states, temb, image_rotary_emb, **ckpt_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) block_samples = block_samples + (hidden_states,) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) single_block_samples = () for index_block, block in enumerate(self.single_transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, temb, image_rotary_emb, **ckpt_kwargs, ) else: hidden_states = block( hidden_states=hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) single_block_samples = single_block_samples + (hidden_states[:, encoder_hidden_states.shape[1] :],) # controlnet block controlnet_block_samples = () for block_sample, controlnet_block in zip(block_samples, self.controlnet_blocks): block_sample = controlnet_block(block_sample) controlnet_block_samples = controlnet_block_samples + (block_sample,) controlnet_single_block_samples = () for single_block_sample, controlnet_block in zip(single_block_samples, self.controlnet_single_blocks): single_block_sample = controlnet_block(single_block_sample) controlnet_single_block_samples = controlnet_single_block_samples + (single_block_sample,) # scaling controlnet_block_samples = [sample * conditioning_scale for sample in controlnet_block_samples] controlnet_single_block_samples = [sample * conditioning_scale for sample in controlnet_single_block_samples] controlnet_block_samples = None if len(controlnet_block_samples) == 0 else controlnet_block_samples controlnet_single_block_samples = ( None if len(controlnet_single_block_samples) == 0 else controlnet_single_block_samples ) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (controlnet_block_samples, controlnet_single_block_samples) return FluxControlNetOutput( controlnet_block_samples=controlnet_block_samples, controlnet_single_block_samples=controlnet_single_block_samples, ) class FluxMultiControlNetModel(ModelMixin): r""" `FluxMultiControlNetModel` wrapper class for Multi-FluxControlNetModel This module is a wrapper for multiple instances of the `FluxControlNetModel`. The `forward()` API is designed to be compatible with `FluxControlNetModel`. Args: controlnets (`List[FluxControlNetModel]`): Provides additional conditioning to the unet during the denoising process. You must set multiple `FluxControlNetModel` as a list. """ def __init__(self, controlnets): super().__init__() self.nets = nn.ModuleList(controlnets) def forward( self, hidden_states: torch.FloatTensor, controlnet_cond: List[torch.tensor], controlnet_mode: List[torch.tensor], conditioning_scale: List[float], encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[FluxControlNetOutput, Tuple]: # ControlNet-Union with multiple conditions # only load one ControlNet for saving memories if len(self.nets) == 1 and self.nets[0].union: controlnet = self.nets[0] for i, (image, mode, scale) in enumerate(zip(controlnet_cond, controlnet_mode, conditioning_scale)): block_samples, single_block_samples = controlnet( hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict, ) # merge samples if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: control_block_samples = [ control_block_sample + block_sample for control_block_sample, block_sample in zip(control_block_samples, block_samples) ] control_single_block_samples = [ control_single_block_sample + block_sample for control_single_block_sample, block_sample in zip( control_single_block_samples, single_block_samples ) ] # Regular Multi-ControlNets # load all ControlNets into memories else: for i, (image, mode, scale, controlnet) in enumerate( zip(controlnet_cond, controlnet_mode, conditioning_scale, self.nets) ): block_samples, single_block_samples = controlnet( hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict, ) # merge samples if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: control_block_samples = [ control_block_sample + block_sample for control_block_sample, block_sample in zip(control_block_samples, block_samples) ] control_single_block_samples = [ control_single_block_sample + block_sample for control_single_block_sample, block_sample in zip( control_single_block_samples, single_block_samples ) ] return control_block_samples, control_single_block_samples
diffusers/src/diffusers/models/controlnet_flux.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnet_flux.py", "repo_id": "diffusers", "token_count": 10426 }
135
# Copyright 2024 The HuggingFace Team. All rights reserved. # `TemporalConvLayer` Copyright 2024 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from ..utils import deprecate from .activations import get_activation from .attention_processor import SpatialNorm from .downsampling import ( # noqa Downsample1D, Downsample2D, FirDownsample2D, KDownsample2D, downsample_2d, ) from .normalization import AdaGroupNorm from .upsampling import ( # noqa FirUpsample2D, KUpsample2D, Upsample1D, Upsample2D, upfirdn2d_native, upsample_2d, ) class ResnetBlockCondNorm2D(nn.Module): r""" A Resnet block that use normalization layer that incorporate conditioning information. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. groups_out (`int`, *optional*, default to None): The number of groups to use for the second normalization layer. if set to None, same as `groups`. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. time_embedding_norm (`str`, *optional*, default to `"ada_group"` ): The normalization layer for time embedding `temb`. Currently only support "ada_group" or "spatial". kernel (`torch.Tensor`, optional, default to None): FIR filter, see [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. use_in_shortcut (`bool`, *optional*, default to `True`): If `True`, add a 1x1 nn.conv2d layer for skip-connection. up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the `conv_shortcut` output. conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. If None, same as `out_channels`. """ def __init__( self, *, in_channels: int, out_channels: Optional[int] = None, conv_shortcut: bool = False, dropout: float = 0.0, temb_channels: int = 512, groups: int = 32, groups_out: Optional[int] = None, eps: float = 1e-6, non_linearity: str = "swish", time_embedding_norm: str = "ada_group", # ada_group, spatial output_scale_factor: float = 1.0, use_in_shortcut: Optional[bool] = None, up: bool = False, down: bool = False, conv_shortcut_bias: bool = True, conv_2d_out_channels: Optional[int] = None, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm if groups_out is None: groups_out = groups if self.time_embedding_norm == "ada_group": # ada_group self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps) elif self.time_embedding_norm == "spatial": self.norm1 = SpatialNorm(in_channels, temb_channels) else: raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}") self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.time_embedding_norm == "ada_group": # ada_group self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps) elif self.time_embedding_norm == "spatial": # spatial self.norm2 = SpatialNorm(out_channels, temb_channels) else: raise ValueError(f" unsupported time_embedding_norm: {self.time_embedding_norm}") self.dropout = torch.nn.Dropout(dropout) conv_2d_out_channels = conv_2d_out_channels or out_channels self.conv2 = nn.Conv2d(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) self.nonlinearity = get_activation(non_linearity) self.upsample = self.downsample = None if self.up: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv2d( in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias, ) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) hidden_states = input_tensor hidden_states = self.norm1(hidden_states, temb) hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 if hidden_states.shape[0] >= 64: input_tensor = input_tensor.contiguous() hidden_states = hidden_states.contiguous() input_tensor = self.upsample(input_tensor) hidden_states = self.upsample(hidden_states) elif self.downsample is not None: input_tensor = self.downsample(input_tensor) hidden_states = self.downsample(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states, temb) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = (input_tensor + hidden_states) / self.output_scale_factor return output_tensor class ResnetBlock2D(nn.Module): r""" A Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. groups_out (`int`, *optional*, default to None): The number of groups to use for the second normalization layer. if set to None, same as `groups`. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. time_embedding_norm (`str`, *optional*, default to `"default"` ): Time scale shift config. By default, apply timestep embedding conditioning with a simple shift mechanism. Choose "scale_shift" for a stronger conditioning with scale and shift. kernel (`torch.Tensor`, optional, default to None): FIR filter, see [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. use_in_shortcut (`bool`, *optional*, default to `True`): If `True`, add a 1x1 nn.conv2d layer for skip-connection. up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the `conv_shortcut` output. conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. If None, same as `out_channels`. """ def __init__( self, *, in_channels: int, out_channels: Optional[int] = None, conv_shortcut: bool = False, dropout: float = 0.0, temb_channels: int = 512, groups: int = 32, groups_out: Optional[int] = None, pre_norm: bool = True, eps: float = 1e-6, non_linearity: str = "swish", skip_time_act: bool = False, time_embedding_norm: str = "default", # default, scale_shift, kernel: Optional[torch.Tensor] = None, output_scale_factor: float = 1.0, use_in_shortcut: Optional[bool] = None, up: bool = False, down: bool = False, conv_shortcut_bias: bool = True, conv_2d_out_channels: Optional[int] = None, ): super().__init__() if time_embedding_norm == "ada_group": raise ValueError( "This class cannot be used with `time_embedding_norm==ada_group`, please use `ResnetBlockCondNorm2D` instead", ) if time_embedding_norm == "spatial": raise ValueError( "This class cannot be used with `time_embedding_norm==spatial`, please use `ResnetBlockCondNorm2D` instead", ) self.pre_norm = True self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm self.skip_time_act = skip_time_act if groups_out is None: groups_out = groups self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels is not None: if self.time_embedding_norm == "default": self.time_emb_proj = nn.Linear(temb_channels, out_channels) elif self.time_embedding_norm == "scale_shift": self.time_emb_proj = nn.Linear(temb_channels, 2 * out_channels) else: raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ") else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(dropout) conv_2d_out_channels = conv_2d_out_channels or out_channels self.conv2 = nn.Conv2d(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) self.nonlinearity = get_activation(non_linearity) self.upsample = self.downsample = None if self.up: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel) elif kernel == "sde_vp": self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest") else: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: if kernel == "fir": fir_kernel = (1, 3, 3, 1) self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel) elif kernel == "sde_vp": self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) else: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op") self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv2d( in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias, ) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 if hidden_states.shape[0] >= 64: input_tensor = input_tensor.contiguous() hidden_states = hidden_states.contiguous() input_tensor = self.upsample(input_tensor) hidden_states = self.upsample(hidden_states) elif self.downsample is not None: input_tensor = self.downsample(input_tensor) hidden_states = self.downsample(hidden_states) hidden_states = self.conv1(hidden_states) if self.time_emb_proj is not None: if not self.skip_time_act: temb = self.nonlinearity(temb) temb = self.time_emb_proj(temb)[:, :, None, None] if self.time_embedding_norm == "default": if temb is not None: hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) elif self.time_embedding_norm == "scale_shift": if temb is None: raise ValueError( f" `temb` should not be None when `time_embedding_norm` is {self.time_embedding_norm}" ) time_scale, time_shift = torch.chunk(temb, 2, dim=1) hidden_states = self.norm2(hidden_states) hidden_states = hidden_states * (1 + time_scale) + time_shift else: hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = (input_tensor + hidden_states) / self.output_scale_factor return output_tensor # unet_rl.py def rearrange_dims(tensor: torch.Tensor) -> torch.Tensor: if len(tensor.shape) == 2: return tensor[:, :, None] if len(tensor.shape) == 3: return tensor[:, :, None, :] elif len(tensor.shape) == 4: return tensor[:, :, 0, :] else: raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") class Conv1dBlock(nn.Module): """ Conv1d --> GroupNorm --> Mish Parameters: inp_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. kernel_size (`int` or `tuple`): Size of the convolving kernel. n_groups (`int`, default `8`): Number of groups to separate the channels into. activation (`str`, defaults to `mish`): Name of the activation function. """ def __init__( self, inp_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], n_groups: int = 8, activation: str = "mish", ): super().__init__() self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) self.group_norm = nn.GroupNorm(n_groups, out_channels) self.mish = get_activation(activation) def forward(self, inputs: torch.Tensor) -> torch.Tensor: intermediate_repr = self.conv1d(inputs) intermediate_repr = rearrange_dims(intermediate_repr) intermediate_repr = self.group_norm(intermediate_repr) intermediate_repr = rearrange_dims(intermediate_repr) output = self.mish(intermediate_repr) return output # unet_rl.py class ResidualTemporalBlock1D(nn.Module): """ Residual 1D block with temporal convolutions. Parameters: inp_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. embed_dim (`int`): Embedding dimension. kernel_size (`int` or `tuple`): Size of the convolving kernel. activation (`str`, defaults `mish`): It is possible to choose the right activation function. """ def __init__( self, inp_channels: int, out_channels: int, embed_dim: int, kernel_size: Union[int, Tuple[int, int]] = 5, activation: str = "mish", ): super().__init__() self.conv_in = Conv1dBlock(inp_channels, out_channels, kernel_size) self.conv_out = Conv1dBlock(out_channels, out_channels, kernel_size) self.time_emb_act = get_activation(activation) self.time_emb = nn.Linear(embed_dim, out_channels) self.residual_conv = ( nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() ) def forward(self, inputs: torch.Tensor, t: torch.Tensor) -> torch.Tensor: """ Args: inputs : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x out_channels x horizon ] """ t = self.time_emb_act(t) t = self.time_emb(t) out = self.conv_in(inputs) + rearrange_dims(t) out = self.conv_out(out) return out + self.residual_conv(inputs) class TemporalConvLayer(nn.Module): """ Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from: https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016 Parameters: in_dim (`int`): Number of input channels. out_dim (`int`): Number of output channels. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. """ def __init__( self, in_dim: int, out_dim: Optional[int] = None, dropout: float = 0.0, norm_num_groups: int = 32, ): super().__init__() out_dim = out_dim or in_dim self.in_dim = in_dim self.out_dim = out_dim # conv layers self.conv1 = nn.Sequential( nn.GroupNorm(norm_num_groups, in_dim), nn.SiLU(), nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0)), ) self.conv2 = nn.Sequential( nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), ) self.conv3 = nn.Sequential( nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), ) self.conv4 = nn.Sequential( nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)), ) # zero out the last layer params,so the conv block is identity nn.init.zeros_(self.conv4[-1].weight) nn.init.zeros_(self.conv4[-1].bias) def forward(self, hidden_states: torch.Tensor, num_frames: int = 1) -> torch.Tensor: hidden_states = ( hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4) ) identity = hidden_states hidden_states = self.conv1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.conv3(hidden_states) hidden_states = self.conv4(hidden_states) hidden_states = identity + hidden_states hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape( (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:] ) return hidden_states class TemporalResnetBlock(nn.Module): r""" A Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. """ def __init__( self, in_channels: int, out_channels: Optional[int] = None, temb_channels: int = 512, eps: float = 1e-6, ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels kernel_size = (3, 1, 1) padding = [k // 2 for k in kernel_size] self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=eps, affine=True) self.conv1 = nn.Conv3d( in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding, ) if temb_channels is not None: self.time_emb_proj = nn.Linear(temb_channels, out_channels) else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(0.0) self.conv2 = nn.Conv3d( out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding, ) self.nonlinearity = get_activation("silu") self.use_in_shortcut = self.in_channels != out_channels self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv3d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, ) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if self.time_emb_proj is not None: temb = self.nonlinearity(temb) temb = self.time_emb_proj(temb)[:, :, :, None, None] temb = temb.permute(0, 2, 1, 3, 4) hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = input_tensor + hidden_states return output_tensor # VideoResBlock class SpatioTemporalResBlock(nn.Module): r""" A SpatioTemporal Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the spatial resenet. temporal_eps (`float`, *optional*, defaults to `eps`): The epsilon to use for the temporal resnet. merge_factor (`float`, *optional*, defaults to `0.5`): The merge factor to use for the temporal mixing. merge_strategy (`str`, *optional*, defaults to `learned_with_images`): The merge strategy to use for the temporal mixing. switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`): If `True`, switch the spatial and temporal mixing. """ def __init__( self, in_channels: int, out_channels: Optional[int] = None, temb_channels: int = 512, eps: float = 1e-6, temporal_eps: Optional[float] = None, merge_factor: float = 0.5, merge_strategy="learned_with_images", switch_spatial_to_temporal_mix: bool = False, ): super().__init__() self.spatial_res_block = ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=eps, ) self.temporal_res_block = TemporalResnetBlock( in_channels=out_channels if out_channels is not None else in_channels, out_channels=out_channels if out_channels is not None else in_channels, temb_channels=temb_channels, eps=temporal_eps if temporal_eps is not None else eps, ) self.time_mixer = AlphaBlender( alpha=merge_factor, merge_strategy=merge_strategy, switch_spatial_to_temporal_mix=switch_spatial_to_temporal_mix, ) def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, ): num_frames = image_only_indicator.shape[-1] hidden_states = self.spatial_res_block(hidden_states, temb) batch_frames, channels, height, width = hidden_states.shape batch_size = batch_frames // num_frames hidden_states_mix = ( hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) ) hidden_states = ( hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) ) if temb is not None: temb = temb.reshape(batch_size, num_frames, -1) hidden_states = self.temporal_res_block(hidden_states, temb) hidden_states = self.time_mixer( x_spatial=hidden_states_mix, x_temporal=hidden_states, image_only_indicator=image_only_indicator, ) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return hidden_states class AlphaBlender(nn.Module): r""" A module to blend spatial and temporal features. Parameters: alpha (`float`): The initial value of the blending factor. merge_strategy (`str`, *optional*, defaults to `learned_with_images`): The merge strategy to use for the temporal mixing. switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`): If `True`, switch the spatial and temporal mixing. """ strategies = ["learned", "fixed", "learned_with_images"] def __init__( self, alpha: float, merge_strategy: str = "learned_with_images", switch_spatial_to_temporal_mix: bool = False, ): super().__init__() self.merge_strategy = merge_strategy self.switch_spatial_to_temporal_mix = switch_spatial_to_temporal_mix # For TemporalVAE if merge_strategy not in self.strategies: raise ValueError(f"merge_strategy needs to be in {self.strategies}") if self.merge_strategy == "fixed": self.register_buffer("mix_factor", torch.Tensor([alpha])) elif self.merge_strategy == "learned" or self.merge_strategy == "learned_with_images": self.register_parameter("mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))) else: raise ValueError(f"Unknown merge strategy {self.merge_strategy}") def get_alpha(self, image_only_indicator: torch.Tensor, ndims: int) -> torch.Tensor: if self.merge_strategy == "fixed": alpha = self.mix_factor elif self.merge_strategy == "learned": alpha = torch.sigmoid(self.mix_factor) elif self.merge_strategy == "learned_with_images": if image_only_indicator is None: raise ValueError("Please provide image_only_indicator to use learned_with_images merge strategy") alpha = torch.where( image_only_indicator.bool(), torch.ones(1, 1, device=image_only_indicator.device), torch.sigmoid(self.mix_factor)[..., None], ) # (batch, channel, frames, height, width) if ndims == 5: alpha = alpha[:, None, :, None, None] # (batch*frames, height*width, channels) elif ndims == 3: alpha = alpha.reshape(-1)[:, None, None] else: raise ValueError(f"Unexpected ndims {ndims}. Dimensions should be 3 or 5") else: raise NotImplementedError return alpha def forward( self, x_spatial: torch.Tensor, x_temporal: torch.Tensor, image_only_indicator: Optional[torch.Tensor] = None, ) -> torch.Tensor: alpha = self.get_alpha(image_only_indicator, x_spatial.ndim) alpha = alpha.to(x_spatial.dtype) if self.switch_spatial_to_temporal_mix: alpha = 1.0 - alpha x = alpha * x_spatial + (1.0 - alpha) * x_temporal return x
diffusers/src/diffusers/models/resnet.py/0
{ "file_path": "diffusers/src/diffusers/models/resnet.py", "repo_id": "diffusers", "token_count": 14442 }
136