text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
- title: Unit 0. Welcome to the RLHF Handbook! sections: - local: chapter0/introduction title: What is this about?
alignment-handbook/chapters/en/_toctree.yml/0
{ "file_path": "alignment-handbook/chapters/en/_toctree.yml", "repo_id": "alignment-handbook", "token_count": 38 }
13
# Model arguments model_name_or_path: teknium/OpenHermes-2.5-Mistral-7B torch_dtype: null # Data training arguments dataset_mixer: HuggingFaceH4/orca_dpo_pairs: 1.0 dataset_splits: - train_prefs - test_prefs preprocessing_num_workers: 12 # Training arguments with sensible defaults bf16: true beta: 0.01 loss_type: sigmoid do_eval: true do_train: true eval_strategy: steps eval_steps: 100 gradient_accumulation_steps: 2 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: HuggingFaceH4/openhermes-2.5-mistral-7b-dpo hub_model_revision: v1.0 learning_rate: 5.0e-7 logging_steps: 10 lr_scheduler_type: cosine max_prompt_length: 512 num_train_epochs: 1 optim: adamw_torch output_dir: data/openhermes-2.5-mistral-7b-dpo-v1.0 per_device_train_batch_size: 8 per_device_eval_batch_size: 8 save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/pref_align_scan/dpo/config_openhermes.yaml/0
{ "file_path": "alignment-handbook/recipes/pref_align_scan/dpo/config_openhermes.yaml", "repo_id": "alignment-handbook", "token_count": 376 }
14
# Model arguments model_name_or_path: HuggingFaceH4/zephyr-7b-gemma-sft-v0.1 torch_dtype: bfloat16 # Data training arguments # For definitions, see: src/h4/training/config.py dataset_mixer: argilla/dpo-mix-7k: 1.0 dataset_splits: - train - test preprocessing_num_workers: 12 # DPOTrainer arguments bf16: true beta: 0.05 do_eval: true eval_strategy: steps eval_steps: 100 gradient_accumulation_steps: 8 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: zephyr-7b-gemma-dpo learning_rate: 5.0e-7 log_level: info logging_steps: 10 lr_scheduler_type: cosine max_length: 1024 max_prompt_length: 512 num_train_epochs: 2 optim: adamw_torch output_dir: data/zephyr-7b-gemma-dpo per_device_train_batch_size: 2 per_device_eval_batch_size: 4 push_to_hub: true report_to: - tensorboard - wandb save_strategy: "no" seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/zephyr-7b-gemma/dpo/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/zephyr-7b-gemma/dpo/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 365 }
15
# Model arguments model_name_or_path: alignment-handbook/zephyr-7b-sft-full # Data training arguments # For definitions, see: src/h4/training/config.py dataset_mixer: HuggingFaceH4/ultrafeedback_binarized: 1.0 dataset_splits: - train_prefs - test_prefs preprocessing_num_workers: 12 # DPOTrainer arguments bf16: true beta: 0.1 do_eval: true eval_strategy: steps eval_steps: 100 gradient_accumulation_steps: 1 gradient_checkpointing: true hub_model_id: zephyr-7b-dpo-full learning_rate: 5.0e-7 log_level: info logging_steps: 10 lr_scheduler_type: linear max_length: 1024 max_prompt_length: 512 num_train_epochs: 3 optim: rmsprop output_dir: data/zephyr-7b-dpo-full per_device_train_batch_size: 8 per_device_eval_batch_size: 4 push_to_hub: true save_strategy: "no" save_total_limit: null seed: 42 warmup_ratio: 0.1
alignment-handbook/tests/fixtures/config_dpo_full.yaml/0
{ "file_path": "alignment-handbook/tests/fixtures/config_dpo_full.yaml", "repo_id": "alignment-handbook", "token_count": 328 }
16
# Writing a custom kernel
candle/candle-book/src/cuda/writing.md/0
{ "file_path": "candle/candle-book/src/cuda/writing.md", "repo_id": "candle", "token_count": 6 }
17
# Training Training starts with data. We're going to use the huggingface hub and start with the Hello world dataset of machine learning, MNIST. Let's start with downloading `MNIST` from [huggingface](https://huggingface.co/datasets/mnist). This requires [`hf-hub`](https://github.com/huggingface/hf-hub). ```bash cargo add hf-hub ``` This is going to be very hands-on for now. ```rust,ignore {{#include ../../../candle-examples/src/lib.rs:book_training_1}} ``` This uses the standardized `parquet` files from the `refs/convert/parquet` branch on every dataset. Our handles are now [`parquet::file::serialized_reader::SerializedFileReader`]. We can inspect the content of the files with: ```rust,ignore {{#include ../../../candle-examples/src/lib.rs:book_training_2}} ``` You should see something like: ```bash Column id 1, name label, value 6 Column id 0, name image, value {bytes: [137, ....] Column id 1, name label, value 8 Column id 0, name image, value {bytes: [137, ....] ``` So each row contains 2 columns (image, label) with image being saved as bytes. Let's put them into a useful struct.
candle/candle-book/src/training/training.md/0
{ "file_path": "candle/candle-book/src/training/training.md", "repo_id": "candle", "token_count": 361 }
18
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { // This requires the code to be run with MTL_CAPTURE_ENABLED=1 let device = Device::new_metal(0)?; let metal_device = match &device { Device::Metal(m) => m, _ => anyhow::bail!("unexpected device"), }; metal_device.capture("/tmp/candle.gputrace")?; // This first synchronize ensures that a new command buffer gets created after setting up the // capture scope. device.synchronize()?; let x = Tensor::randn(0f32, 1.0, (128, 128), &device)?; let x1 = x.add(&x)?; println!("{x1:?}"); // This second synchronize ensures that the command buffer gets commited before the end of the // capture scope. device.synchronize()?; Ok(()) }
candle/candle-core/examples/metal_basics.rs/0
{ "file_path": "candle/candle-core/examples/metal_basics.rs", "repo_id": "candle", "token_count": 347 }
19
use crate::{DType, Layout}; /// cudarc related errors #[derive(thiserror::Error, Debug)] pub enum CudaError { #[error(transparent)] Cuda(#[from] cudarc::driver::DriverError), #[error(transparent)] Compiler(#[from] cudarc::nvrtc::CompileError), #[error(transparent)] Cublas(#[from] cudarc::cublas::result::CublasError), #[error(transparent)] Curand(#[from] cudarc::curand::result::CurandError), #[error("missing kernel '{module_name}'")] MissingKernel { module_name: String }, #[error("unsupported dtype {dtype:?} for {op}")] UnsupportedDtype { dtype: DType, op: &'static str }, #[error("internal error '{0}'")] InternalError(&'static str), #[error("matmul is only supported for contiguous tensors lstride: {lhs_stride:?} rstride: {rhs_stride:?} mnk: {mnk:?}")] MatMulNonContiguous { lhs_stride: Layout, rhs_stride: Layout, mnk: (usize, usize, usize), }, #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedDType { msg: &'static str, expected: DType, got: DType, }, #[error("{cuda} when loading {module_name}")] Load { cuda: cudarc::driver::DriverError, module_name: String, }, } impl From<CudaError> for crate::Error { fn from(val: CudaError) -> Self { crate::Error::Cuda(Box::new(val)).bt() } } pub trait WrapErr<O> { fn w(self) -> std::result::Result<O, crate::Error>; } impl<O, E: Into<CudaError>> WrapErr<O> for std::result::Result<O, E> { fn w(self) -> std::result::Result<O, crate::Error> { self.map_err(|e| crate::Error::Cuda(Box::new(e.into())).bt()) } }
candle/candle-core/src/cuda_backend/error.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/error.rs", "repo_id": "candle", "token_count": 750 }
20
//! Numpy support for tensors. //! //! The spec for the npy format can be found in //! [npy-format](https://docs.scipy.org/doc/numpy-1.14.2/neps/npy-format.html). //! The functions from this module can be used to read tensors from npy/npz files //! or write tensors to these files. A npy file contains a single tensor (unnamed) //! whereas a npz file can contain multiple named tensors. npz files are also compressed. //! //! These two formats are easy to use in Python using the numpy library. //! //! ```python //! import numpy as np //! x = np.arange(10) //! //! # Write a npy file. //! np.save("test.npy", x) //! //! # Read a value from the npy file. //! x = np.load("test.npy") //! //! # Write multiple values to a npz file. //! values = { "x": x, "x_plus_one": x + 1 } //! np.savez("test.npz", **values) //! //! # Load multiple values from a npz file. //! values = np.loadz("test.npz") //! ``` use crate::{DType, Device, Error, Result, Shape, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, Read, Write}; use std::path::Path; const NPY_MAGIC_STRING: &[u8] = b"\x93NUMPY"; const NPY_SUFFIX: &str = ".npy"; fn read_header<R: Read>(reader: &mut R) -> Result<String> { let mut magic_string = vec![0u8; NPY_MAGIC_STRING.len()]; reader.read_exact(&mut magic_string)?; if magic_string != NPY_MAGIC_STRING { return Err(Error::Npy("magic string mismatch".to_string())); } let mut version = [0u8; 2]; reader.read_exact(&mut version)?; let header_len_len = match version[0] { 1 => 2, 2 => 4, otherwise => return Err(Error::Npy(format!("unsupported version {otherwise}"))), }; let mut header_len = vec![0u8; header_len_len]; reader.read_exact(&mut header_len)?; let header_len = header_len .iter() .rev() .fold(0_usize, |acc, &v| 256 * acc + v as usize); let mut header = vec![0u8; header_len]; reader.read_exact(&mut header)?; Ok(String::from_utf8_lossy(&header).to_string()) } #[derive(Debug, PartialEq)] struct Header { descr: DType, fortran_order: bool, shape: Vec<usize>, } impl Header { fn shape(&self) -> Shape { Shape::from(self.shape.as_slice()) } fn to_string(&self) -> Result<String> { let fortran_order = if self.fortran_order { "True" } else { "False" }; let mut shape = self .shape .iter() .map(|x| x.to_string()) .collect::<Vec<_>>() .join(","); let descr = match self.descr { DType::BF16 => Err(Error::Npy("bf16 is not supported".into()))?, DType::F16 => "f2", DType::F32 => "f4", DType::F64 => "f8", DType::I64 => "i8", DType::U32 => "u4", DType::U8 => "u1", }; if !shape.is_empty() { shape.push(',') } Ok(format!( "{{'descr': '<{descr}', 'fortran_order': {fortran_order}, 'shape': ({shape}), }}" )) } // Hacky parser for the npy header, a typical example would be: // {'descr': '<f8', 'fortran_order': False, 'shape': (128,), } fn parse(header: &str) -> Result<Header> { let header = header.trim_matches(|c: char| c == '{' || c == '}' || c == ',' || c.is_whitespace()); let mut parts: Vec<String> = vec![]; let mut start_index = 0usize; let mut cnt_parenthesis = 0i64; for (index, c) in header.chars().enumerate() { match c { '(' => cnt_parenthesis += 1, ')' => cnt_parenthesis -= 1, ',' => { if cnt_parenthesis == 0 { parts.push(header[start_index..index].to_owned()); start_index = index + 1; } } _ => {} } } parts.push(header[start_index..].to_owned()); let mut part_map: HashMap<String, String> = HashMap::new(); for part in parts.iter() { let part = part.trim(); if !part.is_empty() { match part.split(':').collect::<Vec<_>>().as_slice() { [key, value] => { let key = key.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let value = value.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let _ = part_map.insert(key.to_owned(), value.to_owned()); } _ => return Err(Error::Npy(format!("unable to parse header {header}"))), } } } let fortran_order = match part_map.get("fortran_order") { None => false, Some(fortran_order) => match fortran_order.as_ref() { "False" => false, "True" => true, _ => return Err(Error::Npy(format!("unknown fortran_order {fortran_order}"))), }, }; let descr = match part_map.get("descr") { None => return Err(Error::Npy("no descr in header".to_string())), Some(descr) => { if descr.is_empty() { return Err(Error::Npy("empty descr".to_string())); } if descr.starts_with('>') { return Err(Error::Npy(format!("little-endian descr {descr}"))); } // the only supported types in tensor are: // float64, float32, float16, // complex64, complex128, // int64, int32, int16, int8, // uint8, and bool. match descr.trim_matches(|c: char| c == '=' || c == '<' || c == '|') { "e" | "f2" => DType::F16, "f" | "f4" => DType::F32, "d" | "f8" => DType::F64, // "i" | "i4" => DType::S32, "q" | "i8" => DType::I64, // "h" | "i2" => DType::S16, // "b" | "i1" => DType::S8, "B" | "u1" => DType::U8, "I" | "u4" => DType::U32, "?" | "b1" => DType::U8, // "F" | "F4" => DType::C64, // "D" | "F8" => DType::C128, descr => return Err(Error::Npy(format!("unrecognized descr {descr}"))), } } }; let shape = match part_map.get("shape") { None => return Err(Error::Npy("no shape in header".to_string())), Some(shape) => { let shape = shape.trim_matches(|c: char| c == '(' || c == ')' || c == ','); if shape.is_empty() { vec![] } else { shape .split(',') .map(|v| v.trim().parse::<usize>()) .collect::<std::result::Result<Vec<_>, _>>()? } } }; Ok(Header { descr, fortran_order, shape, }) } } impl Tensor { // TODO: Add the possibility to read directly to a device? pub(crate) fn from_reader<R: std::io::Read>( shape: Shape, dtype: DType, reader: &mut R, ) -> Result<Self> { let elem_count = shape.elem_count(); match dtype { DType::BF16 => { let mut data_t = vec![bf16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F16 => { let mut data_t = vec![f16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F32 => { let mut data_t = vec![0f32; elem_count]; reader.read_f32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F64 => { let mut data_t = vec![0f64; elem_count]; reader.read_f64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U8 => { let mut data_t = vec![0u8; elem_count]; reader.read_exact(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U32 => { let mut data_t = vec![0u32; elem_count]; reader.read_u32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::I64 => { let mut data_t = vec![0i64; elem_count]; reader.read_i64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } } } /// Reads a npy file and return the stored multi-dimensional array as a tensor. pub fn read_npy<T: AsRef<Path>>(path: T) -> Result<Self> { let mut reader = File::open(path.as_ref())?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } Self::from_reader(header.shape(), header.descr, &mut reader) } /// Reads a npz file and returns the stored multi-dimensional arrays together with their names. pub fn read_npz<T: AsRef<Path>>(path: T) -> Result<Vec<(String, Self)>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for i in 0..zip.len() { let mut reader = zip.by_index(i)?; let name = { let name = reader.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push((name, s)) } Ok(result) } /// Reads a npz file and returns the stored multi-dimensional arrays for some specified names. pub fn read_npz_by_name<T: AsRef<Path>>(path: T, names: &[&str]) -> Result<Vec<Self>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for name in names.iter() { let mut reader = match zip.by_name(&format!("{name}{NPY_SUFFIX}")) { Ok(reader) => reader, Err(_) => Err(Error::Npy(format!( "no array for {name} in {:?}", path.as_ref() )))?, }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push(s) } Ok(result) } fn write<T: Write>(&self, f: &mut T) -> Result<()> { f.write_all(NPY_MAGIC_STRING)?; f.write_all(&[1u8, 0u8])?; let header = Header { descr: self.dtype(), fortran_order: false, shape: self.dims().to_vec(), }; let mut header = header.to_string()?; let pad = 16 - (NPY_MAGIC_STRING.len() + 5 + header.len()) % 16; for _ in 0..pad % 16 { header.push(' ') } header.push('\n'); f.write_all(&[(header.len() % 256) as u8, (header.len() / 256) as u8])?; f.write_all(header.as_bytes())?; self.write_bytes(f) } /// Writes a multi-dimensional array in the npy format. pub fn write_npy<T: AsRef<Path>>(&self, path: T) -> Result<()> { let mut f = File::create(path.as_ref())?; self.write(&mut f) } /// Writes multiple multi-dimensional arrays using the npz format. pub fn write_npz<S: AsRef<str>, T: AsRef<Tensor>, P: AsRef<Path>>( ts: &[(S, T)], path: P, ) -> Result<()> { let mut zip = zip::ZipWriter::new(File::create(path.as_ref())?); let options: zip::write::FileOptions<()> = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored); for (name, tensor) in ts.iter() { zip.start_file(format!("{}.npy", name.as_ref()), options)?; tensor.as_ref().write(&mut zip)? } Ok(()) } } /// Lazy tensor loader. pub struct NpzTensors { index_per_name: HashMap<String, usize>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl NpzTensors { pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> { let path = path.as_ref().to_owned(); let zip_reader = BufReader::new(File::open(&path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut index_per_name = HashMap::new(); for i in 0..zip.len() { let file = zip.by_index(i)?; let name = { let name = file.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; index_per_name.insert(name, i); } Ok(Self { index_per_name, path, }) } pub fn names(&self) -> Vec<&String> { self.index_per_name.keys().collect() } /// This only returns the shape and dtype for a named tensor. Compared to `get`, this avoids /// reading the whole tensor data. pub fn get_shape_and_dtype(&self, name: &str) -> Result<(Shape, DType)> { let index = match self.index_per_name.get(name) { None => crate::bail!("cannot find tensor {name}"), Some(index) => *index, }; let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; Ok((header.shape(), header.descr)) } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { let index = match self.index_per_name.get(name) { None => return Ok(None), Some(index) => *index, }; // We hope that the file has not changed since first reading it. let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let tensor = Tensor::from_reader(header.shape(), header.descr, &mut reader)?; Ok(Some(tensor)) } } #[cfg(test)] mod tests { use super::Header; #[test] fn parse() { let h = "{'descr': '<f8', 'fortran_order': False, 'shape': (128,), }"; assert_eq!( Header::parse(h).unwrap(), Header { descr: crate::DType::F64, fortran_order: false, shape: vec![128] } ); let h = "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128), }"; let h = Header::parse(h).unwrap(); assert_eq!( h, Header { descr: crate::DType::F32, fortran_order: true, shape: vec![256, 1, 128] } ); assert_eq!( h.to_string().unwrap(), "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128,), }" ); let h = Header { descr: crate::DType::U32, fortran_order: false, shape: vec![], }; assert_eq!( h.to_string().unwrap(), "{'descr': '<u4', 'fortran_order': False, 'shape': (), }" ); } }
candle/candle-core/src/npy.rs/0
{ "file_path": "candle/candle-core/src/npy.rs", "repo_id": "candle", "token_count": 8727 }
21
use crate::{Result, Tensor, WithDType}; pub enum TensorScalar { Tensor(Tensor), Scalar(Tensor), } pub trait TensorOrScalar { fn to_tensor_scalar(self) -> Result<TensorScalar>; } impl TensorOrScalar for &Tensor { fn to_tensor_scalar(self) -> Result<TensorScalar> { Ok(TensorScalar::Tensor(self.clone())) } } impl<T: WithDType> TensorOrScalar for T { fn to_tensor_scalar(self) -> Result<TensorScalar> { let scalar = Tensor::new(self, &crate::Device::Cpu)?; Ok(TensorScalar::Scalar(scalar)) } }
candle/candle-core/src/scalar.rs/0
{ "file_path": "candle/candle-core/src/scalar.rs", "repo_id": "candle", "token_count": 261 }
22
use anyhow::Result; use candle_core::{Device, IndexOp, Tensor}; #[test] fn integer_index() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(1)?; assert_eq!(result.dims(), &[3]); assert_eq!(result.to_vec1::<u32>()?, &[3, 4, 5]); let result = tensor.i((.., 2))?; assert_eq!(result.dims(), &[2]); assert_eq!(result.to_vec1::<u32>()?, &[2, 5]); Ok(()) } #[test] fn range_index() -> Result<()> { let dev = Device::Cpu; // RangeFull let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Range let tensor = Tensor::arange(0u32, 4 * 3, &dev)?.reshape((4, 3))?; let result = tensor.i(1..3)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeFrom let result = tensor.i(2..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[6, 7, 8], [9, 10, 11]]); // RangeTo let result = tensor.i(..2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // RangeInclusive let result = tensor.i(1..=2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeTo let result = tensor.i(..1)?; assert_eq!(result.dims(), &[1, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2]]); // RangeToInclusive let result = tensor.i(..=1)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Empty range let result = tensor.i(1..1)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); // Similar to PyTorch, allow empty ranges when the computed length is negative. #[allow(clippy::reversed_empty_ranges)] let result = tensor.i(1..0)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); Ok(()) } #[test] fn index_3d() -> Result<()> { let tensor = Tensor::from_iter(0..24u32, &Device::Cpu)?.reshape((2, 3, 4))?; assert_eq!(tensor.i((0, 0, 0))?.to_scalar::<u32>()?, 0); assert_eq!(tensor.i((1, 0, 0))?.to_scalar::<u32>()?, 12); assert_eq!(tensor.i((0, 1, 0))?.to_scalar::<u32>()?, 4); assert_eq!(tensor.i((0, 1, 3))?.to_scalar::<u32>()?, 7); assert_eq!(tensor.i((0..2, 0, 0))?.to_vec1::<u32>()?, &[0, 12]); assert_eq!( tensor.i((0..2, .., 0))?.to_vec2::<u32>()?, &[[0, 4, 8], [12, 16, 20]] ); assert_eq!( tensor.i((..2, .., 3))?.to_vec2::<u32>()?, &[[3, 7, 11], [15, 19, 23]] ); assert_eq!(tensor.i((1, .., 3))?.to_vec1::<u32>()?, &[15, 19, 23]); Ok(()) } #[test] fn slice_assign() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 4 * 5, &dev)?.reshape((4, 5))?; let src = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((3, 2))?; let out = tensor.slice_assign(&[1..4, 3..5], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [5, 6, 7, 0, 1], [10, 11, 12, 2, 3], [15, 16, 17, 4, 5] ] ); let out = tensor.slice_assign(&[0..3, 0..2], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [2, 3, 7, 8, 9], [4, 5, 12, 13, 14], [15, 16, 17, 18, 19] ] ); Ok(()) }
candle/candle-core/tests/indexing_tests.rs/0
{ "file_path": "candle/candle-core/tests/indexing_tests.rs", "repo_id": "candle", "token_count": 1994 }
23
use candle::{Result, Tensor}; pub struct Batcher<I> { inner: I, batch_size: usize, return_last_incomplete_batch: bool, } impl<I> Batcher<I> { fn new(inner: I) -> Self { Self { inner, batch_size: 16, return_last_incomplete_batch: false, } } pub fn batch_size(mut self, batch_size: usize) -> Self { self.batch_size = batch_size; self } pub fn return_last_incomplete_batch(mut self, r: bool) -> Self { self.return_last_incomplete_batch = r; self } } pub struct Iter1<I: Iterator<Item = Tensor>> { inner: I, } pub struct Iter2<I: Iterator<Item = (Tensor, Tensor)>> { inner: I, } impl<I: Iterator<Item = Tensor>> Batcher<Iter1<I>> { pub fn new1(inner: I) -> Self { Self::new(Iter1 { inner }) } } impl<I: Iterator<Item = (Tensor, Tensor)>> Batcher<Iter2<I>> { pub fn new2(inner: I) -> Self { Self::new(Iter2 { inner }) } } pub struct IterResult1<I: Iterator<Item = Result<Tensor>>> { inner: I, } pub struct IterResult2<I: Iterator<Item = Result<(Tensor, Tensor)>>> { inner: I, } impl<I: Iterator<Item = Result<Tensor>>> Batcher<IterResult1<I>> { pub fn new_r1(inner: I) -> Self { Self::new(IterResult1 { inner }) } } impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Batcher<IterResult2<I>> { pub fn new_r2(inner: I) -> Self { Self::new(IterResult2 { inner }) } } impl<I: Iterator<Item = Tensor>> Iterator for Batcher<Iter1<I>> { type Item = Result<Tensor>; fn next(&mut self) -> Option<Self::Item> { let mut items = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { // We have two levels of inner here so that we can have two implementations of the // Iterator trait that are different for Iter1 and Iter2. If rust gets better // specialization at some point we can get rid of this. match self.inner.inner.next() { Some(item) => items.push(item), None => { if self.return_last_incomplete_batch { break; } return None; } } } Some(Tensor::stack(&items, 0)) } } impl<I: Iterator<Item = (Tensor, Tensor)>> Iterator for Batcher<Iter2<I>> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { let mut xs = Vec::with_capacity(self.batch_size); let mut ys = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { match self.inner.inner.next() { Some((x, y)) => { xs.push(x); ys.push(y) } None => { if self.return_last_incomplete_batch { break; } return None; } } } let xs = Tensor::stack(&xs, 0); let ys = Tensor::stack(&ys, 0); Some(xs.and_then(|xs| ys.map(|ys| (xs, ys)))) } } impl<I: Iterator<Item = Result<Tensor>>> Iterator for Batcher<IterResult1<I>> { type Item = Result<Tensor>; fn next(&mut self) -> Option<Self::Item> { let mut items = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { // We have two levels of inner here so that we can have two implementations of the // Iterator trait that are different for Iter1 and Iter2. If rust gets better // specialization at some point we can get rid of this. match self.inner.inner.next() { Some(item) => items.push(item), None => { if self.return_last_incomplete_batch { break; } return None; } } } let items = items.into_iter().collect::<Result<Vec<Tensor>>>(); Some(items.and_then(|items| Tensor::stack(&items, 0))) } } impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Iterator for Batcher<IterResult2<I>> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { let mut xs = Vec::with_capacity(self.batch_size); let mut ys = Vec::with_capacity(self.batch_size); let mut errs = vec![]; for _i in 0..self.batch_size { match self.inner.inner.next() { Some(Ok((x, y))) => { xs.push(x); ys.push(y) } Some(Err(err)) => errs.push(err), None => { if self.return_last_incomplete_batch { break; } return None; } } } if !errs.is_empty() { return Some(Err(errs.swap_remove(0))); } let xs = Tensor::stack(&xs, 0); let ys = Tensor::stack(&ys, 0); Some(xs.and_then(|xs| ys.map(|ys| (xs, ys)))) } }
candle/candle-datasets/src/batcher.rs/0
{ "file_path": "candle/candle-datasets/src/batcher.rs", "repo_id": "candle", "token_count": 2660 }
24
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle_transformers::models::bert::{BertModel, Config, HiddenAct, DTYPE}; use anyhow::{Error as E, Result}; use candle::Tensor; use candle_nn::VarBuilder; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::{PaddingParams, Tokenizer}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model to use, check out available models: https://huggingface.co/models?library=sentence-transformers&sort=trending #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, /// When set, compute embeddings for this prompt. #[arg(long)] prompt: Option<String>, /// Use the pytorch weights rather than the safetensors ones #[arg(long)] use_pth: bool, /// The number of times to run the prompt. #[arg(long, default_value = "1")] n: usize, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, /// Use tanh based approximation for Gelu instead of erf implementation. #[arg(long, default_value = "false")] approximate_gelu: bool, } impl Args { fn build_model_and_tokenizer(&self) -> Result<(BertModel, Tokenizer)> { let device = candle_examples::device(self.cpu)?; let default_model = "sentence-transformers/all-MiniLM-L6-v2".to_string(); let default_revision = "refs/pr/21".to_string(); let (model_id, revision) = match (self.model_id.to_owned(), self.revision.to_owned()) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let repo = Repo::with_revision(model_id, RepoType::Model, revision); let (config_filename, tokenizer_filename, weights_filename) = { let api = Api::new()?; let api = api.repo(repo); let config = api.get("config.json")?; let tokenizer = api.get("tokenizer.json")?; let weights = if self.use_pth { api.get("pytorch_model.bin")? } else { api.get("model.safetensors")? }; (config, tokenizer, weights) }; let config = std::fs::read_to_string(config_filename)?; let mut config: Config = serde_json::from_str(&config)?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let vb = if self.use_pth { VarBuilder::from_pth(&weights_filename, DTYPE, &device)? } else { unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], DTYPE, &device)? } }; if self.approximate_gelu { config.hidden_act = HiddenAct::GeluApproximate; } let model = BertModel::load(vb, &config)?; Ok((model, tokenizer)) } } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { println!("tracing..."); let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let start = std::time::Instant::now(); let (model, mut tokenizer) = args.build_model_and_tokenizer()?; let device = &model.device; if let Some(prompt) = args.prompt { let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let token_type_ids = token_ids.zeros_like()?; println!("Loaded and encoded {:?}", start.elapsed()); for idx in 0..args.n { let start = std::time::Instant::now(); let ys = model.forward(&token_ids, &token_type_ids, None)?; if idx == 0 { println!("{ys}"); } println!("Took {:?}", start.elapsed()); } } else { let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); if let Some(pp) = tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; tokenizer.with_padding(Some(pp)); } let tokens = tokenizer .encode_batch(sentences.to_vec(), true) .map_err(E::msg)?; let token_ids = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Ok(Tensor::new(tokens.as_slice(), device)?) }) .collect::<Result<Vec<_>>>()?; let attention_mask = tokens .iter() .map(|tokens| { let tokens = tokens.get_attention_mask().to_vec(); Ok(Tensor::new(tokens.as_slice(), device)?) }) .collect::<Result<Vec<_>>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; let attention_mask = Tensor::stack(&attention_mask, 0)?; let token_type_ids = token_ids.zeros_like()?; println!("running inference on batch {:?}", token_ids.shape()); let embeddings = model.forward(&token_ids, &token_type_ids, Some(&attention_mask))?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); let mut similarities = vec![]; for i in 0..n_sentences { let e_i = embeddings.get(i)?; for j in (i + 1)..n_sentences { let e_j = embeddings.get(j)?; let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> Result<Tensor> { Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?) }
candle/candle-examples/examples/bert/main.rs/0
{ "file_path": "candle/candle-examples/examples/bert/main.rs", "repo_id": "candle", "token_count": 3718 }
25
// This example illustrates how to implement custom operations. These operations can provide their // own forward pass (CPU and GPU versions) as well as their backward pass. // // In this example we add the RMS normalization operation and implement it for f32. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[rustfmt::skip] #[cfg(feature = "cuda")] mod cuda_kernels; use clap::Parser; use candle::{CpuStorage, CustomOp1, Layout, Result, Shape, Tensor}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } struct LayerNorm { eps: f32, } impl CustomOp1 for LayerNorm { fn name(&self) -> &'static str { "layer-norm" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { let (dim1, dim2) = layout.shape().dims2()?; let slice = storage.as_slice::<f32>()?; let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &slice[o1..o2], }; let mut dst = Vec::with_capacity(dim1 * dim2); for idx1 in 0..dim1 { let src = &src[idx1 * dim2..(idx1 + 1) * dim2]; let variance = src.iter().map(|x| x * x).sum::<f32>(); let s_variance = 1f32 / (variance / dim2 as f32 + self.eps).sqrt(); dst.extend(src.iter().map(|x| x * s_variance)) } let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, layout.shape().clone())) } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::backend::BackendStorage; use candle::cuda_backend::cudarc::driver::{LaunchAsync, LaunchConfig}; use candle::cuda_backend::WrapErr; let (d1, d2) = layout.shape().dims2()?; let d1 = d1 as u32; let d2 = d2 as u32; let dev = storage.device().clone(); let slice = storage.as_cuda_slice::<f32>()?; let slice = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => slice.slice(o1..o2), }; let elem_count = layout.shape().elem_count(); let dst = unsafe { dev.alloc::<f32>(elem_count) }.w()?; let func = dev.get_or_load_func("rms_f32", cuda_kernels::LAYERNORM_KERNELS)?; let params = (&dst, &slice, self.eps, d1, d2); let cfg = LaunchConfig { grid_dim: (d1, 1, 1), block_dim: (d2, 1, 1), shared_mem_bytes: 0, }; unsafe { func.launch(cfg, params) }.w()?; let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev); Ok((dst, layout.shape().clone())) } } fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let t = Tensor::arange(0f32, 14f32, &device)?.reshape((2, 7))?; println!("{t}"); let t = t.apply_op1(LayerNorm { eps: 1e-5 })?; println!("{t}"); Ok(()) }
candle/candle-examples/examples/custom-ops/main.rs/0
{ "file_path": "candle/candle-examples/examples/custom-ops/main.rs", "repo_id": "candle", "token_count": 1475 }
26
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle::{DType, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::encodec::{Config, Model}; use clap::{Parser, ValueEnum}; use hf_hub::api::sync::Api; mod audio_io; #[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)] enum Action { AudioToAudio, AudioToCode, CodeToAudio, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// The action to be performed, specifies the format for the input and output data. action: Action, /// The input file, either an audio file or some encodec tokens stored as safetensors. in_file: String, /// The output file, either a wave audio file or some encodec tokens stored as safetensors. out_file: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The model weight file, in safetensor format. #[arg(long)] model: Option<String>, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => Api::new()? .model("facebook/encodec_24khz".to_string()) .get("model.safetensors")?, }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let config = Config::default(); let model = Model::new(&config, vb)?; let codes = match args.action { Action::CodeToAudio => { let codes = candle::safetensors::load(args.in_file, &device)?; codes.get("codes").expect("no codes in input file").clone() } Action::AudioToCode | Action::AudioToAudio => { let pcm = if args.in_file == "-" { println!(">>>> RECORDING AUDIO, PRESS ENTER ONCE DONE <<<<"); let (stream, input_audio) = audio_io::setup_input_stream()?; let mut pcms = vec![]; let stdin = std::thread::spawn(|| { let mut s = String::new(); std::io::stdin().read_line(&mut s) }); while !stdin.is_finished() { let input = input_audio.lock().unwrap().take_all(); if input.is_empty() { std::thread::sleep(std::time::Duration::from_millis(100)); continue; } pcms.push(input) } drop(stream); pcms.concat() } else { let (pcm, sample_rate) = audio_io::pcm_decode(args.in_file)?; if sample_rate != 24_000 { println!("WARNING: encodec uses a 24khz sample rate, input uses {sample_rate}, resampling..."); audio_io::resample(&pcm, sample_rate as usize, 24_000)? } else { pcm } }; let pcm_len = pcm.len(); let pcm = Tensor::from_vec(pcm, (1, 1, pcm_len), &device)?; println!("input pcm shape: {:?}", pcm.shape()); model.encode(&pcm)? } }; println!("codes shape: {:?}", codes.shape()); match args.action { Action::AudioToCode => { codes.save_safetensors("codes", &args.out_file)?; } Action::AudioToAudio | Action::CodeToAudio => { let pcm = model.decode(&codes)?; println!("output pcm shape: {:?}", pcm.shape()); let pcm = pcm.i(0)?.i(0)?; let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?; let pcm = pcm.to_vec1::<f32>()?; if args.out_file == "-" { let (stream, ad) = audio_io::setup_output_stream()?; { let mut ad = ad.lock().unwrap(); ad.push_samples(&pcm)?; } loop { let ad = ad.lock().unwrap(); if ad.is_empty() { break; } // That's very weird, calling thread::sleep here triggers the stream to stop // playing (the callback doesn't seem to be called anymore). // std::thread::sleep(std::time::Duration::from_millis(100)); } drop(stream) } else { let mut output = std::fs::File::create(&args.out_file)?; candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24_000)?; } } } Ok(()) }
candle/candle-examples/examples/encodec/main.rs/0
{ "file_path": "candle/candle-examples/examples/encodec/main.rs", "repo_id": "candle", "token_count": 2395 }
27
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::qwen2::{Config, Model}; use candle::{DType, Tensor}; use candle_nn::VarBuilder; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::{ utils::padding::{PaddingDirection, PaddingParams, PaddingStrategy}, Tokenizer, }; // gte-Qwen1.5-7B-instruct use EOS token as padding token const EOS_TOKEN: &str = "<|endoftext|>"; const EOS_TOKEN_ID: u32 = 151643; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long, default_value = "Alibaba-NLP/gte-Qwen1.5-7B-instruct")] model_id: String, #[arg(long, default_value = "main")] revision: String, #[arg(long)] local_repo: Option<String>, } #[derive(Debug)] struct ConfigFiles { pub config: std::path::PathBuf, pub tokenizer: std::path::PathBuf, pub weights: Vec<std::path::PathBuf>, } // Loading the model from the HuggingFace Hub. Network access is required. fn load_from_hub(model_id: &str, revision: &str) -> Result<ConfigFiles> { let api = Api::new()?; let repo = api.repo(Repo::with_revision( model_id.to_string(), RepoType::Model, revision.to_string(), )); Ok(ConfigFiles { config: repo.get("config.json")?, tokenizer: repo.get("tokenizer.json")?, weights: candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?, }) } // Loading the model from a local directory. fn load_from_local(local_path: &str) -> Result<ConfigFiles> { let local_path = std::path::PathBuf::from(local_path); let weight_path = local_path.join("model.safetensors.index.json"); let json: serde_json::Value = serde_json::from_str(&std::fs::read_to_string(weight_path)?)?; let weight_map = match json.get("weight_map") { Some(serde_json::Value::Object(map)) => map, Some(_) => panic!("`weight map` is not a map"), None => panic!("`weight map` not found"), }; let mut safetensors_files = std::collections::HashSet::new(); for value in weight_map.values() { safetensors_files.insert( value .as_str() .expect("Weight files should be parsed as strings"), ); } let safetensors_paths = safetensors_files .iter() .map(|v| local_path.join(v)) .collect::<Vec<_>>(); Ok(ConfigFiles { config: local_path.join("config.json"), tokenizer: local_path.join("tokenizer.json"), weights: safetensors_paths, }) } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; // Fetch the model. Do this offline if local path provided. println!("Fetching model files..."); let start = std::time::Instant::now(); let config_files = match args.local_repo { Some(local_path) => load_from_local(&local_path)?, None => load_from_hub(&args.model_id, &args.revision)?, }; println!("Model file retrieved in {:?}", start.elapsed()); // Inputs will be padded to the longest sequence in the batch. let padding = PaddingParams { strategy: PaddingStrategy::BatchLongest, direction: PaddingDirection::Left, pad_to_multiple_of: None, pad_id: EOS_TOKEN_ID, pad_type_id: 0, pad_token: String::from(EOS_TOKEN), }; // Tokenizer setup let mut tokenizer = Tokenizer::from_file(config_files.tokenizer).map_err(E::msg)?; tokenizer.with_padding(Some(padding)); // Model initialization let device = candle_examples::device(args.cpu)?; let dtype = if device.is_cuda() { DType::BF16 } else { DType::F32 }; let config: Config = serde_json::from_slice(&std::fs::read(config_files.config)?)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&config_files.weights, dtype, &device)? }; let mut model = Model::new(&config, vb)?; println!("Model loaded in {:?}", start.elapsed()); // Encode the queries and the targets let instruct = "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: "; let documents = vec![ format!("{instruct}how much protein should a female eat{EOS_TOKEN}"), format!("{instruct}summit define{EOS_TOKEN}"), format!("As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.{EOS_TOKEN}"), format!("Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments.{EOS_TOKEN}"), ]; let encoded = tokenizer.encode_batch(documents, true).map_err(E::msg)?; let tokens: Vec<&[u32]> = encoded.iter().map(|x| x.get_ids()).collect(); let tokens = Tensor::new(tokens, &device)?; let mask: Vec<&[u32]> = encoded.iter().map(|x| x.get_attention_mask()).collect(); let mask = Tensor::new(mask, &device)?; // Inference let start_gen = std::time::Instant::now(); let logits = model.forward(&tokens, 0, Some(&mask))?; // Extract the last hidden states as embeddings since inputs are padded left. let (_, seq_len, _) = logits.dims3()?; let embd = logits .narrow(1, seq_len - 1, 1)? .squeeze(1)? .to_dtype(DType::F32)?; // Calculate the relativity scores. Note the embeddings should be normalized. let norm = embd.broadcast_div(&embd.sqr()?.sum_keepdim(1)?.sqrt()?)?; let scores = norm.narrow(0, 0, 2)?.matmul(&norm.narrow(0, 2, 2)?.t()?)?; // Print the results println!("Embedding done in {:?}", start_gen.elapsed()); println!("Scores: {:?}", scores.to_vec2::<f32>()?); Ok(()) }
candle/candle-examples/examples/gte-qwen/main.rs/0
{ "file_path": "candle/candle-examples/examples/gte-qwen/main.rs", "repo_id": "candle", "token_count": 2613 }
28
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::{Parser, ValueEnum}; mod model; use model::{Config, Model}; use candle::{DType, Device, Module, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the </s> token"), }; let start_gen = std::time::Instant::now(); for _ in 0..sample_len { let input = Tensor::new(tokens.as_slice(), &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, ValueEnum, Clone, Copy, PartialEq, Eq, Debug)] enum Which { Mamba130m, Mamba370m, Mamba790m, Mamba1_4b, Mamba2_8b, Mamba2_8bSlimPj, } impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl Which { fn model_id(&self) -> &'static str { match self { Self::Mamba130m => "state-spaces/mamba-130m", Self::Mamba370m => "state-spaces/mamba-370m", Self::Mamba790m => "state-spaces/mamba-790m", Self::Mamba1_4b => "state-spaces/mamba-1.4b", Self::Mamba2_8b => "state-spaces/mamba-2.8b", Self::Mamba2_8bSlimPj => "state-spaces/mamba-2.8b-slimpj'", } } fn revision(&self) -> &'static str { match self { Self::Mamba130m | Self::Mamba370m | Self::Mamba790m | Self::Mamba1_4b | Self::Mamba2_8bSlimPj => "refs/pr/1", Self::Mamba2_8b => "refs/pr/4", } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long, default_value = "mamba130m")] which: Which, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] config_file: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id .unwrap_or_else(|| args.which.model_id().to_string()), RepoType::Model, args.revision .unwrap_or_else(|| args.which.revision().to_string()), )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => api .model("EleutherAI/gpt-neox-20b".to_string()) .get("tokenizer.json")?, }; let config_filename = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { vec![repo.get("model.safetensors")?] } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?; let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let model = Model::new(&config, vb.pp("backbone"))?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/mamba-minimal/main.rs/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/main.rs", "repo_id": "candle", "token_count": 4087 }
29
# candle-mobilenetv4 [MobileNetV4 - Universal Models for the Mobile Ecosystem](https://arxiv.org/abs/2404.10518) This candle implementation uses pre-trained MobileNetV4 models from timm for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example mobilenetv4 --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which medium loaded image Tensor[dims 3, 256, 256; f32] model built unicycle, monocycle : 20.18% mountain bike, all-terrain bike, off-roader: 19.77% bicycle-built-for-two, tandem bicycle, tandem: 15.91% crash helmet : 1.15% tricycle, trike, velocipede: 0.67% ```
candle/candle-examples/examples/mobilenetv4/README.md/0
{ "file_path": "candle/candle-examples/examples/mobilenetv4/README.md", "repo_id": "candle", "token_count": 248 }
30
# candle-phi: 1.3b and 2.7b LLM with state of the art performance for <10b models. [Phi-1.5](https://huggingface.co/microsoft/phi-1_5), [Phi-2](https://huggingface.co/microsoft/phi-2), and [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) are language models using only 1.3, 2.7, and 3.8 billion parameters but with state of the art performance compared to models with up to 10 billion parameters. The candle implementation provides both the standard version as well as a quantized variant. ## Running some examples For the v2 version. ```bash $ cargo run --example phi --release -- --model 2 \ --prompt "A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom?" A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom? Solution: The potential energy of the skier is converted into kinetic energy as it slides down the slope. The formula for potential energy is mgh, where m is mass, g is acceleration due to gravity (9.8 m/s^2), and h is height. Since there's no friction, all the potential energy is converted into kinetic energy at the bottom of the slope. The formula for kinetic energy is 1/2mv^2, where v is velocity. We can equate these two formulas: mgh = 1/2mv^2 Solving for v, we get: v = sqrt(2gh) Substituting the given values, we get: v = sqrt(2*9.8*40) = 28 m/s Therefore, the skier speed at the bottom of the slope is 28 m/s. ``` For the v1.5 version. ```bash $ cargo run --example phi --release -- --prompt "def print_prime(n): " def print_prime(n): print("Printing prime numbers") for i in range(2, n+1): if is_prime(i): print(i) def is_prime(n): if n <= 1: return False for i in range(2, int(math.sqrt(n))+1): if n % i == 0: return False return True $ cargo run --example phi --release -- \ --prompt "Explain how to find the median of an array and write the corresponding python function.\nAnswer:" \ --quantized --sample-len 200 Explain how to find the median of an array and write the corresponding python function. Answer: The median is the middle value in an array. If the array has an even number of elements, the median is the average of the two middle values. def median(arr): arr.sort() n = len(arr) if n % 2 == 0: return (arr[n//2 - 1] + arr[n//2]) / 2 else: return arr[n//2] ``` This also supports the [Puffin Phi v2 model](https://huggingface.co/teknium/Puffin-Phi-v2) for human interaction. ``` $ cargo run --example phi --release -- \ --prompt "USER: What would you do on a sunny day in Paris?\nASSISTANT:" \ --sample-len 200 --model puffin-phi-v2 --quantized USER: What would you do on a sunny day in Paris? ASSISTANT: On a sunny day in Paris, you could visit the Musée du Louvre to admire the famous painting "Mona Lisa" by Leonardo da Vinci. You might also want to stroll along the Champs-Élysées and enjoy the beautiful architecture of the buildings around you. Don't forget to stop by a café for a cup of coffee and to soak up the sun!" ```
candle/candle-examples/examples/phi/README.md/0
{ "file_path": "candle/candle-examples/examples/phi/README.md", "repo_id": "candle", "token_count": 1048 }
31
use std::collections::VecDeque; use std::fmt::Display; use candle::{DType, Device, Error, Module, Result, Tensor, Var}; use candle_nn::{ func, linear, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, Sequential, VarBuilder, VarMap, }; use rand::{distributions::Uniform, thread_rng, Rng}; use super::gym_env::GymEnv; pub struct OuNoise { mu: f64, theta: f64, sigma: f64, state: Tensor, } impl OuNoise { pub fn new(mu: f64, theta: f64, sigma: f64, size_action: usize) -> Result<Self> { Ok(Self { mu, theta, sigma, state: Tensor::ones(size_action, DType::F32, &Device::Cpu)?, }) } pub fn sample(&mut self) -> Result<Tensor> { let rand = Tensor::randn_like(&self.state, 0.0, 1.0)?; let dx = ((self.theta * (self.mu - &self.state)?)? + (self.sigma * rand)?)?; self.state = (&self.state + dx)?; Ok(self.state.clone()) } } #[derive(Clone)] struct Transition { state: Tensor, action: Tensor, reward: Tensor, next_state: Tensor, terminated: bool, truncated: bool, } impl Transition { fn new( state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) -> Self { Self { state: state.clone(), action: action.clone(), reward: reward.clone(), next_state: next_state.clone(), terminated, truncated, } } } pub struct ReplayBuffer { buffer: VecDeque<Transition>, capacity: usize, size: usize, } impl ReplayBuffer { pub fn new(capacity: usize) -> Self { Self { buffer: VecDeque::with_capacity(capacity), capacity, size: 0, } } pub fn push( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { if self.size == self.capacity { self.buffer.pop_front(); } else { self.size += 1; } self.buffer.push_back(Transition::new( state, action, reward, next_state, terminated, truncated, )); } #[allow(clippy::type_complexity)] pub fn random_batch( &self, batch_size: usize, ) -> Result<Option<(Tensor, Tensor, Tensor, Tensor, Vec<bool>, Vec<bool>)>> { if self.size < batch_size { Ok(None) } else { let transitions: Vec<&Transition> = thread_rng() .sample_iter(Uniform::from(0..self.size)) .take(batch_size) .map(|i| self.buffer.get(i).unwrap()) .collect(); let states: Vec<Tensor> = transitions .iter() .map(|t| t.state.unsqueeze(0)) .collect::<Result<_>>()?; let actions: Vec<Tensor> = transitions .iter() .map(|t| t.action.unsqueeze(0)) .collect::<Result<_>>()?; let rewards: Vec<Tensor> = transitions .iter() .map(|t| t.reward.unsqueeze(0)) .collect::<Result<_>>()?; let next_states: Vec<Tensor> = transitions .iter() .map(|t| t.next_state.unsqueeze(0)) .collect::<Result<_>>()?; let terminateds: Vec<bool> = transitions.iter().map(|t| t.terminated).collect(); let truncateds: Vec<bool> = transitions.iter().map(|t| t.truncated).collect(); Ok(Some(( Tensor::cat(&states, 0)?, Tensor::cat(&actions, 0)?, Tensor::cat(&rewards, 0)?, Tensor::cat(&next_states, 0)?, terminateds, truncateds, ))) } } } fn track( varmap: &mut VarMap, vb: &VarBuilder, target_prefix: &str, network_prefix: &str, dims: &[(usize, usize)], tau: f64, ) -> Result<()> { for (i, &(in_dim, out_dim)) in dims.iter().enumerate() { let target_w = vb.get((out_dim, in_dim), &format!("{target_prefix}-fc{i}.weight"))?; let network_w = vb.get((out_dim, in_dim), &format!("{network_prefix}-fc{i}.weight"))?; varmap.set_one( format!("{target_prefix}-fc{i}.weight"), ((tau * network_w)? + ((1.0 - tau) * target_w)?)?, )?; let target_b = vb.get(out_dim, &format!("{target_prefix}-fc{i}.bias"))?; let network_b = vb.get(out_dim, &format!("{network_prefix}-fc{i}.bias"))?; varmap.set_one( format!("{target_prefix}-fc{i}.bias"), ((tau * network_b)? + ((1.0 - tau) * target_b)?)?, )?; } Ok(()) } struct Actor<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Actor<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims = vec![(size_state, 400), (400, 300), (300, size_action)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?) .add(func(|xs| xs.tanh())); Ok::<Sequential, Error>(seq) }; let network = make_network("actor")?; let target_network = make_network("target-actor")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0); Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor) -> Result<Tensor> { self.network.forward(state) } fn target_forward(&self, state: &Tensor) -> Result<Tensor> { self.target_network.forward(state) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-actor", "actor", &self.dims, tau, ) } } struct Critic<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Critic<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims: Vec<(usize, usize)> = vec![(size_state + size_action, 400), (400, 300), (300, 1)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?); Ok::<Sequential, Error>(seq) }; let network = make_network("critic")?; let target_network = make_network("target-critic")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0); Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.network.forward(&xs) } fn target_forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.target_network.forward(&xs) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-critic", "critic", &self.dims, tau, ) } } #[allow(clippy::upper_case_acronyms)] pub struct DDPG<'a> { actor: Actor<'a>, actor_optim: AdamW, critic: Critic<'a>, critic_optim: AdamW, gamma: f64, tau: f64, replay_buffer: ReplayBuffer, ou_noise: OuNoise, size_state: usize, size_action: usize, pub train: bool, } impl DDPG<'_> { #[allow(clippy::too_many_arguments)] pub fn new( device: &Device, size_state: usize, size_action: usize, train: bool, actor_lr: f64, critic_lr: f64, gamma: f64, tau: f64, buffer_capacity: usize, ou_noise: OuNoise, ) -> Result<Self> { let filter_by_prefix = |varmap: &VarMap, prefix: &str| { varmap .data() .lock() .unwrap() .iter() .filter_map(|(name, var)| name.starts_with(prefix).then_some(var.clone())) .collect::<Vec<Var>>() }; let actor = Actor::new(device, DType::F32, size_state, size_action)?; let actor_optim = AdamW::new( filter_by_prefix(&actor.varmap, "actor"), ParamsAdamW { lr: actor_lr, ..Default::default() }, )?; let critic = Critic::new(device, DType::F32, size_state, size_action)?; let critic_optim = AdamW::new( filter_by_prefix(&critic.varmap, "critic"), ParamsAdamW { lr: critic_lr, ..Default::default() }, )?; Ok(Self { actor, actor_optim, critic, critic_optim, gamma, tau, replay_buffer: ReplayBuffer::new(buffer_capacity), ou_noise, size_state, size_action, train, }) } pub fn remember( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { self.replay_buffer .push(state, action, reward, next_state, terminated, truncated) } pub fn actions(&mut self, state: &Tensor) -> Result<f32> { let actions = self .actor .forward(&state.detach().unsqueeze(0)?)? .squeeze(0)?; let actions = if self.train { (actions + self.ou_noise.sample()?)? } else { actions }; actions.squeeze(0)?.to_scalar::<f32>() } pub fn train(&mut self, batch_size: usize) -> Result<()> { let (states, actions, rewards, next_states, _, _) = match self.replay_buffer.random_batch(batch_size)? { Some(v) => v, _ => return Ok(()), }; let q_target = self .critic .target_forward(&next_states, &self.actor.target_forward(&next_states)?)?; let q_target = (rewards + (self.gamma * q_target)?.detach())?; let q = self.critic.forward(&states, &actions)?; let diff = (q_target - q)?; let critic_loss = diff.sqr()?.mean_all()?; self.critic_optim.backward_step(&critic_loss)?; let actor_loss = self .critic .forward(&states, &self.actor.forward(&states)?)? .mean_all()? .neg()?; self.actor_optim.backward_step(&actor_loss)?; self.critic.track(self.tau)?; self.actor.track(self.tau)?; Ok(()) } } // The impact of the q value of the next state on the current state's q value. const GAMMA: f64 = 0.99; // The weight for updating the target networks. const TAU: f64 = 0.005; // The capacity of the replay buffer used for sampling training data. const REPLAY_BUFFER_CAPACITY: usize = 100_000; // The training batch size for each training iteration. const TRAINING_BATCH_SIZE: usize = 100; // The total number of episodes. const MAX_EPISODES: usize = 100; // The maximum length of an episode. const EPISODE_LENGTH: usize = 200; // The number of training iterations after one episode finishes. const TRAINING_ITERATIONS: usize = 200; // Ornstein-Uhlenbeck process parameters. const MU: f64 = 0.0; const THETA: f64 = 0.15; const SIGMA: f64 = 0.1; const ACTOR_LEARNING_RATE: f64 = 1e-4; const CRITIC_LEARNING_RATE: f64 = 1e-3; pub fn run() -> Result<()> { let env = GymEnv::new("Pendulum-v1")?; println!("action space: {}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let size_state = env.observation_space().iter().product::<usize>(); let size_action = env.action_space(); let mut agent = DDPG::new( &Device::Cpu, size_state, size_action, true, ACTOR_LEARNING_RATE, CRITIC_LEARNING_RATE, GAMMA, TAU, REPLAY_BUFFER_CAPACITY, OuNoise::new(MU, THETA, SIGMA, size_action)?, )?; let mut rng = rand::thread_rng(); for episode in 0..MAX_EPISODES { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; agent.remember( &state, &Tensor::new(vec![action], &Device::Cpu)?, &Tensor::new(vec![step.reward as f32], &Device::Cpu)?, &step.state, step.terminated, step.truncated, ); if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); for _ in 0..TRAINING_ITERATIONS { agent.train(TRAINING_BATCH_SIZE)?; } } println!("Testing..."); agent.train = false; for episode in 0..10 { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/ddpg.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/ddpg.rs", "repo_id": "candle", "token_count": 8524 }
32
[ { "index": 1, "color": "#787878", "label": "wall" }, { "index": 2, "color": "#B47878", "label": "building;edifice" }, { "index": 3, "color": "#06E6E6", "label": "sky" }, { "index": 4, "color": "#503232", "label": "floor;flooring" }, { "index": 5, "color": "#04C803", "label": "tree" }, { "index": 6, "color": "#787850", "label": "ceiling" }, { "index": 7, "color": "#8C8C8C", "label": "road;route" }, { "index": 8, "color": "#CC05FF", "label": "bed" }, { "index": 9, "color": "#E6E6E6", "label": "windowpane;window" }, { "index": 10, "color": "#04FA07", "label": "grass" }, { "index": 11, "color": "#E005FF", "label": "cabinet" }, { "index": 12, "color": "#EBFF07", "label": "sidewalk;pavement" }, { "index": 13, "color": "#96053D", "label": "person;individual;someone;somebody;mortal;soul" }, { "index": 14, "color": "#787846", "label": "earth;ground" }, { "index": 15, "color": "#08FF33", "label": "door;double;door" }, { "index": 16, "color": "#FF0652", "label": "table" }, { "index": 17, "color": "#8FFF8C", "label": "mountain;mount" }, { "index": 18, "color": "#CCFF04", "label": "plant;flora;plant;life" }, { "index": 19, "color": "#FF3307", "label": "curtain;drape;drapery;mantle;pall" }, { "index": 20, "color": "#CC4603", "label": "chair" }, { "index": 21, "color": "#0066C8", "label": "car;auto;automobile;machine;motorcar" }, { "index": 22, "color": "#3DE6FA", "label": "water" }, { "index": 23, "color": "#FF0633", "label": "painting;picture" }, { "index": 24, "color": "#0B66FF", "label": "sofa;couch;lounge" }, { "index": 25, "color": "#FF0747", "label": "shelf" }, { "index": 26, "color": "#FF09E0", "label": "house" }, { "index": 27, "color": "#0907E6", "label": "sea" }, { "index": 28, "color": "#DCDCDC", "label": "mirror" }, { "index": 29, "color": "#FF095C", "label": "rug;carpet;carpeting" }, { "index": 30, "color": "#7009FF", "label": "field" }, { "index": 31, "color": "#08FFD6", "label": "armchair" }, { "index": 32, "color": "#07FFE0", "label": "seat" }, { "index": 33, "color": "#FFB806", "label": "fence;fencing" }, { "index": 34, "color": "#0AFF47", "label": "desk" }, { "index": 35, "color": "#FF290A", "label": "rock;stone" }, { "index": 36, "color": "#07FFFF", "label": "wardrobe;closet;press" }, { "index": 37, "color": "#E0FF08", "label": "lamp" }, { "index": 38, "color": "#6608FF", "label": "bathtub;bathing;tub;bath;tub" }, { "index": 39, "color": "#FF3D06", "label": "railing;rail" }, { "index": 40, "color": "#FFC207", "label": "cushion" }, { "index": 41, "color": "#FF7A08", "label": "base;pedestal;stand" }, { "index": 42, "color": "#00FF14", "label": "box" }, { "index": 43, "color": "#FF0829", "label": "column;pillar" }, { "index": 44, "color": "#FF0599", "label": "signboard;sign" }, { "index": 45, "color": "#0633FF", "label": "chest;of;drawers;chest;bureau;dresser" }, { "index": 46, "color": "#EB0CFF", "label": "counter" }, { "index": 47, "color": "#A09614", "label": "sand" }, { "index": 48, "color": "#00A3FF", "label": "sink" }, { "index": 49, "color": "#8C8C8C", "label": "skyscraper" }, { "index": 50, "color": "#FA0A0F", "label": "fireplace;hearth;open;fireplace" }, { "index": 51, "color": "#14FF00", "label": "refrigerator;icebox" }, { "index": 52, "color": "#1FFF00", "label": "grandstand;covered;stand" }, { "index": 53, "color": "#FF1F00", "label": "path" }, { "index": 54, "color": "#FFE000", "label": "stairs;steps" }, { "index": 55, "color": "#99FF00", "label": "runway" }, { "index": 56, "color": "#0000FF", "label": "case;display;case;showcase;vitrine" }, { "index": 57, "color": "#FF4700", "label": "pool;table;billiard;table;snooker;table" }, { "index": 58, "color": "#00EBFF", "label": "pillow" }, { "index": 59, "color": "#00ADFF", "label": "screen;door;screen" }, { "index": 60, "color": "#1F00FF", "label": "stairway;staircase" }, { "index": 61, "color": "#0BC8C8", "label": "river" }, { "index": 62, "color": "#FF5200", "label": "bridge;span" }, { "index": 63, "color": "#00FFF5", "label": "bookcase" }, { "index": 64, "color": "#003DFF", "label": "blind;screen" }, { "index": 65, "color": "#00FF70", "label": "coffee;table;cocktail;table" }, { "index": 66, "color": "#00FF85", "label": "toilet;can;commode;crapper;pot;potty;stool;throne" }, { "index": 67, "color": "#FF0000", "label": "flower" }, { "index": 68, "color": "#FFA300", "label": "book" }, { "index": 69, "color": "#FF6600", "label": "hill" }, { "index": 70, "color": "#C2FF00", "label": "bench" }, { "index": 71, "color": "#008FFF", "label": "countertop" }, { "index": 72, "color": "#33FF00", "label": "stove;kitchen;stove;range;kitchen;range;cooking;stove" }, { "index": 73, "color": "#0052FF", "label": "palm;palm;tree" }, { "index": 74, "color": "#00FF29", "label": "kitchen;island" }, { "index": 75, "color": "#00FFAD", "label": "computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system" }, { "index": 76, "color": "#0A00FF", "label": "swivel;chair" }, { "index": 77, "color": "#ADFF00", "label": "boat" }, { "index": 78, "color": "#00FF99", "label": "bar" }, { "index": 79, "color": "#FF5C00", "label": "arcade;machine" }, { "index": 80, "color": "#FF00FF", "label": "hovel;hut;hutch;shack;shanty" }, { "index": 81, "color": "#FF00F5", "label": "bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle" }, { "index": 82, "color": "#FF0066", "label": "towel" }, { "index": 83, "color": "#FFAD00", "label": "light;light;source" }, { "index": 84, "color": "#FF0014", "label": "truck;motortruck" }, { "index": 85, "color": "#FFB8B8", "label": "tower" }, { "index": 86, "color": "#001FFF", "label": "chandelier;pendant;pendent" }, { "index": 87, "color": "#00FF3D", "label": "awning;sunshade;sunblind" }, { "index": 88, "color": "#0047FF", "label": "streetlight;street;lamp" }, { "index": 89, "color": "#FF00CC", "label": "booth;cubicle;stall;kiosk" }, { "index": 90, "color": "#00FFC2", "label": "television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box" }, { "index": 91, "color": "#00FF52", "label": "airplane;aeroplane;plane" }, { "index": 92, "color": "#000AFF", "label": "dirt;track" }, { "index": 93, "color": "#0070FF", "label": "apparel;wearing;apparel;dress;clothes" }, { "index": 94, "color": "#3300FF", "label": "pole" }, { "index": 95, "color": "#00C2FF", "label": "land;ground;soil" }, { "index": 96, "color": "#007AFF", "label": "bannister;banister;balustrade;balusters;handrail" }, { "index": 97, "color": "#00FFA3", "label": "escalator;moving;staircase;moving;stairway" }, { "index": 98, "color": "#FF9900", "label": "ottoman;pouf;pouffe;puff;hassock" }, { "index": 99, "color": "#00FF0A", "label": "bottle" }, { "index": 100, "color": "#FF7000", "label": "buffet;counter;sideboard" }, { "index": 101, "color": "#8FFF00", "label": "poster;posting;placard;notice;bill;card" }, { "index": 102, "color": "#5200FF", "label": "stage" }, { "index": 103, "color": "#A3FF00", "label": "van" }, { "index": 104, "color": "#FFEB00", "label": "ship" }, { "index": 105, "color": "#08B8AA", "label": "fountain" }, { "index": 106, "color": "#8500FF", "label": "conveyer;belt;conveyor;belt;conveyer;conveyor;transporter" }, { "index": 107, "color": "#00FF5C", "label": "canopy" }, { "index": 108, "color": "#B800FF", "label": "washer;automatic;washer;washing;machine" }, { "index": 109, "color": "#FF001F", "label": "plaything;toy" }, { "index": 110, "color": "#00B8FF", "label": "swimming;pool;swimming;bath;natatorium" }, { "index": 111, "color": "#00D6FF", "label": "stool" }, { "index": 112, "color": "#FF0070", "label": "barrel;cask" }, { "index": 113, "color": "#5CFF00", "label": "basket;handbasket" }, { "index": 114, "color": "#00E0FF", "label": "waterfall;falls" }, { "index": 115, "color": "#70E0FF", "label": "tent;collapsible;shelter" }, { "index": 116, "color": "#46B8A0", "label": "bag" }, { "index": 117, "color": "#A300FF", "label": "minibike;motorbike" }, { "index": 118, "color": "#9900FF", "label": "cradle" }, { "index": 119, "color": "#47FF00", "label": "oven" }, { "index": 120, "color": "#FF00A3", "label": "ball" }, { "index": 121, "color": "#FFCC00", "label": "food;solid;food" }, { "index": 122, "color": "#FF008F", "label": "step;stair" }, { "index": 123, "color": "#00FFEB", "label": "tank;storage;tank" }, { "index": 124, "color": "#85FF00", "label": "trade;name;brand;name;brand;marque" }, { "index": 125, "color": "#FF00EB", "label": "microwave;microwave;oven" }, { "index": 126, "color": "#F500FF", "label": "pot;flowerpot" }, { "index": 127, "color": "#FF007A", "label": "animal;animate;being;beast;brute;creature;fauna" }, { "index": 128, "color": "#FFF500", "label": "bicycle;bike;wheel;cycle" }, { "index": 129, "color": "#0ABED4", "label": "lake" }, { "index": 130, "color": "#D6FF00", "label": "dishwasher;dish;washer;dishwashing;machine" }, { "index": 131, "color": "#00CCFF", "label": "screen;silver;screen;projection;screen" }, { "index": 132, "color": "#1400FF", "label": "blanket;cover" }, { "index": 133, "color": "#FFFF00", "label": "sculpture" }, { "index": 134, "color": "#0099FF", "label": "hood;exhaust;hood" }, { "index": 135, "color": "#0029FF", "label": "sconce" }, { "index": 136, "color": "#00FFCC", "label": "vase" }, { "index": 137, "color": "#2900FF", "label": "traffic;light;traffic;signal;stoplight" }, { "index": 138, "color": "#29FF00", "label": "tray" }, { "index": 139, "color": "#AD00FF", "label": "ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin" }, { "index": 140, "color": "#00F5FF", "label": "fan" }, { "index": 141, "color": "#4700FF", "label": "pier;wharf;wharfage;dock" }, { "index": 142, "color": "#7A00FF", "label": "crt;screen" }, { "index": 143, "color": "#00FFB8", "label": "plate" }, { "index": 144, "color": "#005CFF", "label": "monitor;monitoring;device" }, { "index": 145, "color": "#B8FF00", "label": "bulletin;board;notice;board" }, { "index": 146, "color": "#0085FF", "label": "shower" }, { "index": 147, "color": "#FFD600", "label": "radiator" }, { "index": 148, "color": "#19C2C2", "label": "glass;drinking;glass" }, { "index": 149, "color": "#66FF00", "label": "clock" }, { "index": 150, "color": "#5C00FF", "label": "flag" } ]
candle/candle-examples/examples/segformer/assets/labels.json/0
{ "file_path": "candle/candle-examples/examples/segformer/assets/labels.json", "repo_id": "candle", "token_count": 6397 }
33
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::io::Write; use std::path::PathBuf; use candle_transformers::models::t5; use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; const DTYPE: DType = DType::F32; #[derive(Clone, Debug, Copy, ValueEnum)] enum Which { T5Base, T5Small, T5Large, T5_3B, Mt5Base, Mt5Small, Mt5Large, } #[derive(Parser, Debug, Clone)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model repository to use on the HuggingFace hub. #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] model_file: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] config_file: Option<String>, /// Enable decoding. #[arg(long)] decode: bool, // Enable/disable decoding. #[arg(long, default_value = "false")] disable_cache: bool, /// Use this prompt, otherwise compute sentence similarities. #[arg(long)] prompt: Option<String>, /// If set along with --decode, will use this prompt to initialize the decoder. #[arg(long)] decoder_prompt: Option<String>, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, /// The temperature used to generate samples. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The model to be used. #[arg(long, default_value = "t5-small")] which: Which, } struct T5ModelBuilder { device: Device, config: t5::Config, weights_filename: Vec<PathBuf>, } impl T5ModelBuilder { pub fn load(args: &Args) -> Result<(Self, Tokenizer)> { let device = candle_examples::device(args.cpu)?; let (default_model, default_revision) = match args.which { Which::T5Base => ("t5-base", "main"), Which::T5Small => ("t5-small", "refs/pr/15"), Which::T5Large => ("t5-large", "main"), Which::T5_3B => ("t5-3b", "main"), Which::Mt5Base => ("google/mt5-base", "refs/pr/5"), Which::Mt5Small => ("google/mt5-small", "refs/pr/6"), Which::Mt5Large => ("google/mt5-large", "refs/pr/2"), }; let default_model = default_model.to_string(); let default_revision = default_revision.to_string(); let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let repo = Repo::with_revision(model_id.clone(), RepoType::Model, revision); let api = Api::new()?; let repo = api.repo(repo); let config_filename = match &args.config_file { None => repo.get("config.json")?, Some(f) => f.into(), }; let tokenizer_filename = match &args.tokenizer_file { None => match args.which { Which::Mt5Base => api .model("lmz/mt5-tokenizers".into()) .get("mt5-base.tokenizer.json")?, Which::Mt5Small => api .model("lmz/mt5-tokenizers".into()) .get("mt5-small.tokenizer.json")?, Which::Mt5Large => api .model("lmz/mt5-tokenizers".into()) .get("mt5-large.tokenizer.json")?, _ => repo.get("tokenizer.json")?, }, Some(f) => f.into(), }; let weights_filename = match &args.model_file { Some(f) => f.split(',').map(|v| v.into()).collect::<Vec<_>>(), None => { if model_id == "google/flan-t5-xxl" || model_id == "google/flan-ul2" { candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")? } else { vec![repo.get("model.safetensors")?] } } }; let config = std::fs::read_to_string(config_filename)?; let mut config: t5::Config = serde_json::from_str(&config)?; config.use_cache = !args.disable_cache; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; Ok(( Self { device, config, weights_filename, }, tokenizer, )) } pub fn build_encoder(&self) -> Result<t5::T5EncoderModel> { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)? }; Ok(t5::T5EncoderModel::load(vb, &self.config)?) } pub fn build_conditional_generation(&self) -> Result<t5::T5ForConditionalGeneration> { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)? }; Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?) } } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?; let device = &builder.device; let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; match args.prompt { Some(prompt) => { let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; if !args.decode { let mut model = builder.build_encoder()?; let start = std::time::Instant::now(); let ys = model.forward(&input_token_ids)?; println!("{ys}"); println!("Took {:?}", start.elapsed()); } else { let mut model = builder.build_conditional_generation()?; let mut output_token_ids = [builder .config .decoder_start_token_id .unwrap_or(builder.config.pad_token_id) as u32] .to_vec(); if let Some(decoder_prompt) = &args.decoder_prompt { print!("{decoder_prompt}"); output_token_ids.extend( tokenizer .encode(decoder_prompt.to_string(), false) .map_err(E::msg)? .get_ids() .to_vec(), ); } let temperature = if args.temperature <= 0. { None } else { Some(args.temperature) }; let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p); let encoder_output = model.encode(&input_token_ids)?; let start = std::time::Instant::now(); for index in 0.. { if output_token_ids.len() > 512 { break; } let decoder_token_ids = if index == 0 || !builder.config.use_cache { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == builder.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); print!("{text}"); std::io::stdout().flush()?; } } let dt = start.elapsed(); println!( "\n{} tokens generated ({:.2} token/s)\n", output_token_ids.len(), output_token_ids.len() as f64 / dt.as_secs_f64(), ); } } None => { let mut model = builder.build_encoder()?; let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = tokenizer .encode(sentence, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], model.device())?.unsqueeze(0)?; let embeddings = model.forward(&token_ids)?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); all_embeddings.push(embeddings) } let mut similarities = vec![]; for (i, e_i) in all_embeddings.iter().enumerate() { for (j, e_j) in all_embeddings .iter() .enumerate() .take(n_sentences) .skip(i + 1) { let sum_ij = (e_i * e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (e_i * e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (e_j * e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> Result<Tensor> { Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?) }
candle/candle-examples/examples/t5/main.rs/0
{ "file_path": "candle/candle-examples/examples/t5/main.rs", "repo_id": "candle", "token_count": 6911 }
34
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; mod model; use model::{Multiples, YoloV8, YoloV8Pose}; use candle::{DType, Device, IndexOp, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use candle_transformers::object_detection::{non_maximum_suppression, Bbox, KeyPoint}; use clap::{Parser, ValueEnum}; use image::DynamicImage; // Keypoints as reported by ChatGPT :) // Nose // Left Eye // Right Eye // Left Ear // Right Ear // Left Shoulder // Right Shoulder // Left Elbow // Right Elbow // Left Wrist // Right Wrist // Left Hip // Right Hip // Left Knee // Right Knee // Left Ankle // Right Ankle const KP_CONNECTIONS: [(usize, usize); 16] = [ (0, 1), (0, 2), (1, 3), (2, 4), (5, 6), (5, 11), (6, 12), (11, 12), (5, 7), (6, 8), (7, 9), (8, 10), (11, 13), (12, 14), (13, 15), (14, 16), ]; // Model architecture from https://github.com/ultralytics/ultralytics/issues/189 // https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py pub fn report_detect( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, legend_size: u32, ) -> Result<DynamicImage> { let pred = pred.to_device(&Device::Cpu)?; let (pred_size, npreds) = pred.dims2()?; let nclasses = pred_size - 4; // The bounding boxes grouped by (maximum) class index. let mut bboxes: Vec<Vec<Bbox<Vec<KeyPoint>>>> = (0..nclasses).map(|_| vec![]).collect(); // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = *pred[4..].iter().max_by(|x, y| x.total_cmp(y)).unwrap(); if confidence > confidence_threshold { let mut class_index = 0; for i in 0..nclasses { if pred[4 + i] > pred[4 + class_index] { class_index = i } } if pred[class_index + 4] > 0. { let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, data: vec![], }; bboxes[class_index].push(bbox) } } } non_maximum_suppression(&mut bboxes, nms_threshold); // Annotate the original image and print boxes information. let (initial_h, initial_w) = (img.height(), img.width()); let w_ratio = initial_w as f32 / w as f32; let h_ratio = initial_h as f32 / h as f32; let mut img = img.to_rgb8(); let font = Vec::from(include_bytes!("roboto-mono-stripped.ttf") as &[u8]); let font = ab_glyph::FontRef::try_from_slice(&font).map_err(candle::Error::wrap)?; for (class_index, bboxes_for_class) in bboxes.iter().enumerate() { for b in bboxes_for_class.iter() { println!( "{}: {:?}", candle_examples::coco_classes::NAMES[class_index], b ); let xmin = (b.xmin * w_ratio) as i32; let ymin = (b.ymin * h_ratio) as i32; let dx = (b.xmax - b.xmin) * w_ratio; let dy = (b.ymax - b.ymin) * h_ratio; if dx >= 0. && dy >= 0. { imageproc::drawing::draw_hollow_rect_mut( &mut img, imageproc::rect::Rect::at(xmin, ymin).of_size(dx as u32, dy as u32), image::Rgb([255, 0, 0]), ); } if legend_size > 0 { imageproc::drawing::draw_filled_rect_mut( &mut img, imageproc::rect::Rect::at(xmin, ymin).of_size(dx as u32, legend_size), image::Rgb([170, 0, 0]), ); let legend = format!( "{} {:.0}%", candle_examples::coco_classes::NAMES[class_index], 100. * b.confidence ); imageproc::drawing::draw_text_mut( &mut img, image::Rgb([255, 255, 255]), xmin, ymin, ab_glyph::PxScale { x: legend_size as f32 - 1., y: legend_size as f32 - 1., }, &font, &legend, ) } } } Ok(DynamicImage::ImageRgb8(img)) } pub fn report_pose( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, ) -> Result<DynamicImage> { let pred = pred.to_device(&Device::Cpu)?; let (pred_size, npreds) = pred.dims2()?; if pred_size != 17 * 3 + 4 + 1 { candle::bail!("unexpected pred-size {pred_size}"); } let mut bboxes = vec![]; // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = pred[4]; if confidence > confidence_threshold { let keypoints = (0..17) .map(|i| KeyPoint { x: pred[3 * i + 5], y: pred[3 * i + 6], mask: pred[3 * i + 7], }) .collect::<Vec<_>>(); let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, data: keypoints, }; bboxes.push(bbox) } } let mut bboxes = vec![bboxes]; non_maximum_suppression(&mut bboxes, nms_threshold); let bboxes = &bboxes[0]; // Annotate the original image and print boxes information. let (initial_h, initial_w) = (img.height(), img.width()); let w_ratio = initial_w as f32 / w as f32; let h_ratio = initial_h as f32 / h as f32; let mut img = img.to_rgb8(); for b in bboxes.iter() { println!("{b:?}"); let xmin = (b.xmin * w_ratio) as i32; let ymin = (b.ymin * h_ratio) as i32; let dx = (b.xmax - b.xmin) * w_ratio; let dy = (b.ymax - b.ymin) * h_ratio; if dx >= 0. && dy >= 0. { imageproc::drawing::draw_hollow_rect_mut( &mut img, imageproc::rect::Rect::at(xmin, ymin).of_size(dx as u32, dy as u32), image::Rgb([255, 0, 0]), ); } for kp in b.data.iter() { if kp.mask < 0.6 { continue; } let x = (kp.x * w_ratio) as i32; let y = (kp.y * h_ratio) as i32; imageproc::drawing::draw_filled_circle_mut( &mut img, (x, y), 2, image::Rgb([0, 255, 0]), ); } for &(idx1, idx2) in KP_CONNECTIONS.iter() { let kp1 = &b.data[idx1]; let kp2 = &b.data[idx2]; if kp1.mask < 0.6 || kp2.mask < 0.6 { continue; } imageproc::drawing::draw_line_segment_mut( &mut img, (kp1.x * w_ratio, kp1.y * h_ratio), (kp2.x * w_ratio, kp2.y * h_ratio), image::Rgb([255, 255, 0]), ); } } Ok(DynamicImage::ImageRgb8(img)) } #[derive(Clone, Copy, ValueEnum, Debug)] enum Which { N, S, M, L, X, } #[derive(Clone, Copy, ValueEnum, Debug)] enum YoloTask { Detect, Pose, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Model weights, in safetensors format. #[arg(long)] model: Option<String>, /// Which model variant to use. #[arg(long, value_enum, default_value_t = Which::S)] which: Which, images: Vec<String>, /// Threshold for the model confidence level. #[arg(long, default_value_t = 0.25)] confidence_threshold: f32, /// Threshold for non-maximum suppression. #[arg(long, default_value_t = 0.45)] nms_threshold: f32, /// The task to be run. #[arg(long, default_value = "detect")] task: YoloTask, /// The size for the legend, 0 means no legend. #[arg(long, default_value_t = 14)] legend_size: u32, } impl Args { fn model(&self) -> anyhow::Result<std::path::PathBuf> { let path = match &self.model { Some(model) => std::path::PathBuf::from(model), None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-yolo-v8".to_string()); let size = match self.which { Which::N => "n", Which::S => "s", Which::M => "m", Which::L => "l", Which::X => "x", }; let task = match self.task { YoloTask::Pose => "-pose", YoloTask::Detect => "", }; api.get(&format!("yolov8{size}{task}.safetensors"))? } }; Ok(path) } } pub trait Task: Module + Sized { fn load(vb: VarBuilder, multiples: Multiples) -> Result<Self>; fn report( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, legend_size: u32, ) -> Result<DynamicImage>; } impl Task for YoloV8 { fn load(vb: VarBuilder, multiples: Multiples) -> Result<Self> { YoloV8::load(vb, multiples, /* num_classes=*/ 80) } fn report( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, legend_size: u32, ) -> Result<DynamicImage> { report_detect( pred, img, w, h, confidence_threshold, nms_threshold, legend_size, ) } } impl Task for YoloV8Pose { fn load(vb: VarBuilder, multiples: Multiples) -> Result<Self> { YoloV8Pose::load(vb, multiples, /* num_classes=*/ 1, (17, 3)) } fn report( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, _legend_size: u32, ) -> Result<DynamicImage> { report_pose(pred, img, w, h, confidence_threshold, nms_threshold) } } pub fn run<T: Task>(args: Args) -> anyhow::Result<()> { let device = candle_examples::device(args.cpu)?; // Create the model and load the weights from the file. let multiples = match args.which { Which::N => Multiples::n(), Which::S => Multiples::s(), Which::M => Multiples::m(), Which::L => Multiples::l(), Which::X => Multiples::x(), }; let model = args.model()?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let model = T::load(vb, multiples)?; println!("model loaded"); for image_name in args.images.iter() { println!("processing {image_name}"); let mut image_name = std::path::PathBuf::from(image_name); let original_image = image::ImageReader::open(&image_name)? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &device, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = model.forward(&image_t)?.squeeze(0)?; println!("generated predictions {predictions:?}"); let image_t = T::report( &predictions, original_image, width, height, args.confidence_threshold, args.nms_threshold, args.legend_size, )?; image_name.set_extension("pp.jpg"); println!("writing {image_name:?}"); image_t.save(image_name)? } Ok(()) } pub fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; match args.task { YoloTask::Detect => run::<YoloV8>(args)?, YoloTask::Pose => run::<YoloV8Pose>(args)?, } Ok(()) }
candle/candle-examples/examples/yolo-v8/main.rs/0
{ "file_path": "candle/candle-examples/examples/yolo-v8/main.rs", "repo_id": "candle", "token_count": 7410 }
35
#pragma once #define C10_CUDA_CHECK(EXPR) \ do { \ const cudaError_t __err = EXPR; \ } while (0) #define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError())
candle/candle-flash-attn/kernels/error.h/0
{ "file_path": "candle/candle-flash-attn/kernels/error.h", "repo_id": "candle", "token_count": 216 }
36
mod ffi; use candle::backend::BackendStorage; use candle::cuda_backend::cudarc::driver::DevicePtr; use candle::cuda_backend::WrapErr; use candle::{CpuStorage, DType, Layout, Result, Shape, Tensor}; use half::{bf16, f16}; pub struct FlashAttn { pub softmax_scale: f32, pub alibi_slopes: Option<Tensor>, pub window_size_left: Option<usize>, pub window_size_right: Option<usize>, } fn round_multiple(x: usize, m: usize) -> usize { (x + m - 1) / m * m } impl FlashAttn { fn cuda_fwd_t< T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr, >( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, is_bf16: bool, ) -> Result<(candle::CudaStorage, Shape)> { // https://github.com/Dao-AILab/flash-attention/blob/b252072409e69c25f2b9d473cc534e49b24decd2/csrc/flash_attn/flash_api.cpp#L187 let dev = q.device(); let out_shape = q_l.shape().clone(); let out_l = Layout::contiguous(&out_shape); let q = q.as_cuda_slice::<T>()?; let k = k.as_cuda_slice::<T>()?; let v = v.as_cuda_slice::<T>()?; let q = q.slice(q_l.start_offset()..); let k = k.slice(k_l.start_offset()..); let v = v.slice(v_l.start_offset()..); let q_stride = q_l.stride(); let k_stride = k_l.stride(); let v_stride = v_l.stride(); let o_stride = out_l.stride(); let q_rank = q_stride.len(); let k_rank = k_stride.len(); let v_rank = v_stride.len(); let o_rank = o_stride.len(); if q_rank != 4 || k_rank != 4 || v_rank != 4 { candle::bail!( "flash-attn expects input tensors of rank 4 (q: {q_rank}, k: {k_rank}, v: {v_rank}" ) } if q_stride[q_rank - 1] != 1 { candle::bail!("the last dim of q must be contiguous {q_stride:?}") } if k_stride[k_rank - 1] != 1 { candle::bail!("the last dim of k must be contiguous {k_stride:?}") } if v_stride[v_rank - 1] != 1 { candle::bail!("the last dim of v must be contiguous {v_stride:?}") } let (b_sz, seqlen_q, num_heads, head_size_og) = q_l.shape().dims4()?; let (_b_sz, seqlen_k, num_heads_k, _head_size_og) = k_l.shape().dims4()?; let expected_kv = (b_sz, seqlen_k, num_heads_k, head_size_og); if expected_kv != k_l.shape().dims4()? { candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape()) } if expected_kv != v_l.shape().dims4()? { candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape()) } if head_size_og > 256 { candle::bail!("only supports head dimension at most 256 (got {head_size_og})") } if head_size_og % 8 != 0 { // TODO: Handle head sizes that are not a multiple of 8 via some padding. candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})") } if num_heads % num_heads_k != 0 { candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}") } let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes { if alibi_slopes.dtype() != DType::F32 { candle::bail!( "DType mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes.dtype(), DType::F32 ); } let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout(); if num_heads != alibi_slopes_layout.shape().dims1()? { candle::bail!( "shape mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes_layout.shape(), (num_heads) ); } let alibi_slopes = match &*alibi_slopes { candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?, _ => candle::bail!("alibi_slopes must be a cuda tensor"), }; let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..); *alibi_slopes.device_ptr() as *const core::ffi::c_void } else { std::ptr::null() }; // if window_size_left > self.max_seqlen_k or None => -1 let mut window_size_left = self .window_size_left .filter(|v| v <= &seqlen_k) .map(|v| v as i32) .unwrap_or(-1); // if window_size_right > self.max_seqlen_k or None => -1 let mut window_size_right = self .window_size_right .filter(|v| v <= &seqlen_k) .map(|v| v as i32) .unwrap_or(-1); let head_size = round_multiple(head_size_og, 8); let head_size_rounded = round_multiple(head_size, 32); let seqlen_q_rounded = round_multiple(seqlen_q, 128); let seqlen_k_rounded = round_multiple(seqlen_k, 128); let elem_count = out_shape.elem_count(); let dst = unsafe { dev.alloc::<T>(elem_count) }.w()?; let softmax_lse = dev .alloc_zeros::<f32>(b_sz * 128 * num_heads * seqlen_q) .w()?; let is_bf16 = if is_bf16 { 1 } else { 0 }; // Causal is the special case where window_size_right == 0 and window_size_left < 0. // Local is the more general case where window_size_right >= 0 or window_size_left >= 0. let is_causal = if window_size_left < 0 && window_size_right == 0 { 1 } else { 0 }; if window_size_left < 0 && window_size_right >= 0 { window_size_left = seqlen_k as i32; } if window_size_left >= 0 && window_size_right < 0 { window_size_right = seqlen_k as i32; } unsafe { let q_ptr = *q.device_ptr() as *const core::ffi::c_void; let k_ptr = *k.device_ptr() as *const core::ffi::c_void; let v_ptr = *v.device_ptr() as *const core::ffi::c_void; let dst_ptr = *dst.device_ptr() as *const core::ffi::c_void; let softmax_lse_ptr = *softmax_lse.device_ptr() as *const core::ffi::c_void; ffi::run_mha( q_ptr, k_ptr, v_ptr, dst_ptr, softmax_lse_ptr, /* alibi_slopes_ptr */ alibi_slopes_ptr, /* cu_seqlens_q_ptr */ std::ptr::null(), /* cu_seqlens_k_ptr */ std::ptr::null(), /* q_batch_stride */ q_stride[0] as u32, /* k_batch_stride */ k_stride[0] as u32, /* v_batch_stride */ v_stride[0] as u32, /* o_batch_stride */ o_stride[0] as u32, /* alibi_slopes_batch_stride */ 0, /* q_row_stride */ q_stride[q_rank - 3] as u32, /* k_row_stride */ k_stride[k_rank - 3] as u32, /* v_row_stride */ v_stride[v_rank - 3] as u32, /* o_row_stride */ o_stride[o_rank - 3] as u32, /* q_head_stride */ q_stride[q_rank - 2] as u32, /* k_head_stride */ k_stride[k_rank - 2] as u32, /* v_head_stride */ v_stride[v_rank - 2] as u32, /* o_head_stride */ o_stride[o_rank - 2] as u32, /* b */ b_sz as u32, /* h */ num_heads as u32, /* h_k */ num_heads_k as u32, /* d */ head_size as u32, /* d_rounded */ head_size_rounded as u32, /* softmax_scale*/ self.softmax_scale, /* seqlen_q */ seqlen_q as u32, /* seqlen_k */ seqlen_k as u32, /* seqlen_q_rounded */ seqlen_q_rounded as u32, /* seqlen_k_rounded */ seqlen_k_rounded as u32, /* is_bf16 */ is_bf16, /* is_causal */ is_causal, /* window_size_left */ window_size_left, /* window_size_right */ window_size_right, ) } let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone()); Ok((dst, out_shape)) } } impl candle::CustomOp3 for FlashAttn { fn name(&self) -> &'static str { "flash-attn" } fn cpu_fwd( &self, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, ) -> Result<(CpuStorage, Shape)> { candle::bail!("no cpu support for flash-attn") } fn cuda_fwd( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { match q.dtype() { candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false), candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true), dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"), } } } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttn { softmax_scale, alibi_slopes: None, window_size_left, window_size_right, }; q.apply_op3(k, v, op) } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn_windowed( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttn { softmax_scale, alibi_slopes: None, window_size_left, window_size_right, }; q.apply_op3(k, v, op) } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn_alibi( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttn { softmax_scale, alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, }; q.apply_op3(k, v, op) } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn_alibi_windowed( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttn { softmax_scale, alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, }; q.apply_op3(k, v, op) } struct FlashAttnVarLen { pub softmax_scale: f32, pub max_seqlen_q: usize, pub max_seqlen_k: usize, pub seqlens_q: Tensor, pub seqlens_k: Tensor, pub alibi_slopes: Option<Tensor>, pub window_size_left: Option<usize>, pub window_size_right: Option<usize>, } impl FlashAttnVarLen { fn cuda_fwd_t< T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr, >( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, is_bf16: bool, ) -> Result<(candle::CudaStorage, Shape)> { // https://github.com/Dao-AILab/flash-attention/blob/184b992dcb2a0890adaa19eb9b541c3e4f9d2a08/csrc/flash_attn/flash_api.cpp#L327 let dev = q.device(); let out_shape = q_l.shape().clone(); let out_l = Layout::contiguous(&out_shape); let (seqlens_q, seqlens_q_layout) = self.seqlens_q.storage_and_layout(); let seqlens_q = match &*seqlens_q { candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32! _ => candle::bail!("seqlens_q must be a cuda tensor"), }; let seqlens_q = match seqlens_q_layout.contiguous_offsets() { Some((o1, o2)) => seqlens_q.slice(o1..o2), None => candle::bail!("seqlens_q has to be contiguous"), }; let (seqlens_k, seqlens_k_layout) = self.seqlens_k.storage_and_layout(); let seqlens_k = match &*seqlens_k { candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32! _ => candle::bail!("seqlens_k must be a cuda tensor"), }; let seqlens_k = match seqlens_k_layout.contiguous_offsets() { Some((o1, o2)) => seqlens_k.slice(o1..o2), None => candle::bail!("seqlens_k has to be contiguous"), }; let q = q.as_cuda_slice::<f16>()?; let k = k.as_cuda_slice::<f16>()?; let v = v.as_cuda_slice::<f16>()?; let q = q.slice(q_l.start_offset()..); let k = k.slice(k_l.start_offset()..); let v = v.slice(v_l.start_offset()..); let q_stride = q_l.stride(); let k_stride = k_l.stride(); let v_stride = v_l.stride(); let o_stride = out_l.stride(); let q_rank = q_stride.len(); let k_rank = k_stride.len(); let v_rank = v_stride.len(); let o_rank = o_stride.len(); if q_rank != 3 || k_rank != 3 || v_rank != 3 { candle::bail!( "flash-attn-varlen expects input tensors of rank 3 (q: {q_rank}, k: {k_rank}, v: {v_rank}" ) } if q_stride[q_rank - 1] != 1 { candle::bail!("the last dim of q must be contiguous {q_stride:?}") } if k_stride[k_rank - 1] != 1 { candle::bail!("the last dim of k must be contiguous {k_stride:?}") } if v_stride[v_rank - 1] != 1 { candle::bail!("the last dim of v must be contiguous {v_stride:?}") } let (_total_q, num_heads, head_size_og) = q_l.shape().dims3()?; let (total_k, num_heads_k, _head_size_og) = k_l.shape().dims3()?; let expected_kv = (total_k, num_heads_k, head_size_og); if expected_kv != k_l.shape().dims3()? { candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape()) } if expected_kv != v_l.shape().dims3()? { candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape()) } if head_size_og > 256 { candle::bail!("only supports head dimension at most 256 (got {head_size_og})") } if head_size_og % 8 != 0 { // TODO: Handle head sizes that are not a multiple of 8 via some padding. candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})") } if num_heads % num_heads_k != 0 { candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}") } let nseqlens_q = seqlens_q_layout.shape().dims1()?; if nseqlens_q < 2 { candle::bail!("seqlens_q should have a len >= 2 {nseqlens_q}") } let nseqlens_k = seqlens_k_layout.shape().dims1()?; if nseqlens_k != nseqlens_q { candle::bail!("seqlens_q and seqlens_k should have the same number of elements {nseqlens_q} <> {nseqlens_k}") } let batch_size = nseqlens_q - 1; let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes { if alibi_slopes.dtype() != DType::F32 { candle::bail!( "DType mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes.dtype(), DType::F32 ); } let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout(); if num_heads != alibi_slopes_layout.shape().dims1()? { candle::bail!( "shape mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes_layout.shape(), (num_heads) ); } let alibi_slopes = match &*alibi_slopes { candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?, _ => candle::bail!("alibi_slopes must be a cuda tensor"), }; let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..); *alibi_slopes.device_ptr() as *const core::ffi::c_void } else { std::ptr::null() }; // if window_size_left > self.max_seqlen_k or None => -1 let mut window_size_left = self .window_size_left .filter(|v| v <= &self.max_seqlen_k) .map(|v| v as i32) .unwrap_or(-1); // if window_size_right > self.max_seqlen_k or None => -1 let mut window_size_right = self .window_size_right .filter(|v| v <= &self.max_seqlen_k) .map(|v| v as i32) .unwrap_or(-1); let head_size = round_multiple(head_size_og, 8); let head_size_rounded = round_multiple(head_size, 32); let seqlen_q_rounded = round_multiple(self.max_seqlen_q, 128); let seqlen_k_rounded = round_multiple(self.max_seqlen_k, 128); let elem_count = out_shape.elem_count(); let dst = unsafe { dev.alloc::<f16>(elem_count) }.w()?; let softmax_lse = dev .alloc_zeros::<f32>(batch_size * num_heads * self.max_seqlen_q) .w()?; let is_bf16 = if is_bf16 { 1 } else { 0 }; // Causal is the special case where window_size_right == 0 and window_size_left < 0. // Local is the more general case where window_size_right >= 0 or window_size_left >= 0. let is_causal = if window_size_left < 0 && window_size_right == 0 { 1 } else { 0 }; if window_size_left < 0 && window_size_right >= 0 { window_size_left = self.max_seqlen_k as i32; } if window_size_left >= 0 && window_size_right < 0 { window_size_right = self.max_seqlen_k as i32; } unsafe { let q_ptr = *q.device_ptr() as *const core::ffi::c_void; let k_ptr = *k.device_ptr() as *const core::ffi::c_void; let v_ptr = *v.device_ptr() as *const core::ffi::c_void; let dst_ptr = *dst.device_ptr() as *const core::ffi::c_void; let softmax_lse_ptr = *softmax_lse.device_ptr() as *const core::ffi::c_void; let seqlens_q_ptr = *seqlens_q.device_ptr() as *const core::ffi::c_int; let seqlens_k_ptr = *seqlens_k.device_ptr() as *const core::ffi::c_int; ffi::run_mha( q_ptr, k_ptr, v_ptr, dst_ptr, softmax_lse_ptr, /* alibi_slopes_ptr */ alibi_slopes_ptr, /* cu_seqlens_q_ptr */ seqlens_q_ptr, /* cu_seqlens_k_ptr */ seqlens_k_ptr, /* q_batch_stride */ 0, /* k_batch_stride */ 0, /* v_batch_stride */ 0, /* o_batch_stride */ 0, /* alibi_slopes_batch_stride */ 0, /* q_row_stride */ q_stride[q_rank - 3] as u32, /* k_row_stride */ k_stride[k_rank - 3] as u32, /* v_row_stride */ v_stride[v_rank - 3] as u32, /* o_row_stride */ o_stride[o_rank - 3] as u32, /* q_head_stride */ q_stride[q_rank - 2] as u32, /* k_head_stride */ k_stride[k_rank - 2] as u32, /* v_head_stride */ v_stride[v_rank - 2] as u32, /* o_head_stride */ o_stride[o_rank - 2] as u32, /* b */ batch_size as u32, /* h */ num_heads as u32, /* h_k */ num_heads_k as u32, /* d */ head_size as u32, /* d_rounded */ head_size_rounded as u32, /* softmax_scale*/ self.softmax_scale, /* seqlen_q */ self.max_seqlen_q as u32, /* seqlen_k */ self.max_seqlen_k as u32, /* seqlen_q_rounded */ seqlen_q_rounded as u32, /* seqlen_k_rounded */ seqlen_k_rounded as u32, /* is_bf16 */ is_bf16, /* is_causal */ is_causal, /* window_size_left */ window_size_left, /* window_size_right */ window_size_right, ) } let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone()); Ok((dst, out_shape)) } } impl candle::CustomOp3 for FlashAttnVarLen { fn name(&self) -> &'static str { "flash-attn-varlen" } fn cpu_fwd( &self, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, ) -> Result<(CpuStorage, Shape)> { candle::bail!("no cpu support for flash-attn") } fn cuda_fwd( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { match q.dtype() { candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false), candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true), dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"), } } } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. pub fn flash_attn_varlen( q: &Tensor, k: &Tensor, v: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: None, window_size_left, window_size_right, }; q.apply_op3(k, v, op) } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` pub fn flash_attn_varlen_windowed( q: &Tensor, k: &Tensor, v: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: None, window_size_left, window_size_right, }; q.apply_op3(k, v, op) } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. pub fn flash_attn_varlen_alibi( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, }; q.apply_op3(k, v, op) } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` pub fn flash_attn_varlen_alibi_windowed( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, }; q.apply_op3(k, v, op) }
candle/candle-flash-attn/src/lib.rs/0
{ "file_path": "candle/candle-flash-attn/src/lib.rs", "repo_id": "candle", "token_count": 15978 }
37
#include "cuda_utils.cuh" #include <cmath> #include <stdint.h> #define WARP_SIZE 32 const int BLOCK_SIZE = 1024; // TODO: Maybe add some fast_sum_f16_f32 variant that not only accumulate in f32 // but also expect a f32 output so that this can be used for normalization e.g. // in softmax. // Fast reduce sum kernel, this assumes that the dimensions to loop over are at // the end, each block is responsible for populating one value in the output // array. There are at most 1024 threads per block. template <typename T> __device__ void fast_sum(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, T *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = 0; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); shr[tid] += src[strided_i]; idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s) shr[tid] += shr[tid + s]; } if (tid == 0) dst[dst_id] = shr[0]; } static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32); a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32); } return a; } static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { x += __shfl_xor_sync(0xffffffff, x, mask, 32); } return x; } // LayerNorm implementation adapted from ggml, accumulation is made using f32. // https://github.com/ggerganov/llama.cpp/blob/d59bd97065cd7ded6c4ecab54b1d5e0b1b11e318/ggml-cuda.cu#L477 template <typename T> __device__ void layernorm(const T * x, T * dst, const T * alpha, const T * beta, const int ncols, const float eps) { const int row = blockIdx.x*blockDim.y + threadIdx.y; const int tid = threadIdx.x; const int block_size = blockDim.x; float2 mean_var = make_float2(0.f, 0.f); for (int col = tid; col < ncols; col += block_size) { const float xi = x[row*ncols + col]; mean_var.x += xi; mean_var.y += xi * xi; } // sum up partial sums mean_var = warp_reduce_sum(mean_var); if (block_size > WARP_SIZE) { __shared__ float2 s_sum[32]; int warp_id = threadIdx.x / WARP_SIZE; int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = mean_var; } __syncthreads(); mean_var = s_sum[lane_id]; mean_var = warp_reduce_sum(mean_var); } const float mean = mean_var.x / ncols; const float var = mean_var.y / ncols - mean * mean; const float inv_std = rsqrtf(var + eps); if (alpha == nullptr && beta == nullptr) { for (int col = tid; col < ncols; col += block_size) { float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs); } } else if (alpha == nullptr && beta != nullptr) { for (int col = tid; col < ncols; col += block_size) { float b = static_cast<float>(beta[col]); float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs + b); } } else if (alpha != nullptr && beta == nullptr) { for (int col = tid; col < ncols; col += block_size) { float a = static_cast<float>(alpha[col]); float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs * a); } } else { for (int col = tid; col < ncols; col += block_size) { float a = static_cast<float>(alpha[col]); float b = static_cast<float>(beta[col]); float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs * a + b); } } } // RmsNorm implementation adapted from ggml, accumulation is made using f32. // https://github.com/ggerganov/llama.cpp/blob/d59bd97065cd7ded6c4ecab54b1d5e0b1b11e318/ggml-cuda.cu#L523 template <typename T> __device__ void rmsnorm(const T * x, T * dst, const T * alpha, const int ncols, const float eps) { const int row = blockIdx.x*blockDim.y + threadIdx.y; const int tid = threadIdx.x; const int block_size = blockDim.x; float tmp = 0.0f; // partial sum for thread in warp for (int col = tid; col < ncols; col += block_size) { const float xi = static_cast<float>(x[row*ncols + col]); tmp += xi * xi; } // sum up partial sums tmp = warp_reduce_sum(tmp); if (block_size > WARP_SIZE) { __shared__ float s_sum[32]; int warp_id = threadIdx.x / WARP_SIZE; int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = tmp; } __syncthreads(); tmp = s_sum[lane_id]; tmp = warp_reduce_sum(tmp); } const float mean = tmp / ncols; const float scale = rsqrtf(mean + eps); if (alpha == nullptr) { for (int col = tid; col < ncols; col += block_size) { dst[row*ncols + col] = static_cast<T>(scale * static_cast<float>(x[row*ncols + col])); } } else { for (int col = tid; col < ncols; col += block_size) { float a = static_cast<float>(alpha[col]); dst[row*ncols + col] = static_cast<T>(scale * static_cast<float>(x[row*ncols + col]) * a); } } } // Softmax implementation adapted from ggml. // https://github.com/ggerganov/llama.cpp/blob/d59bd97065cd7ded6c4ecab54b1d5e0b1b11e318/ggml-cuda.cu#L4159 template <typename T, typename ACC> __device__ void softmax(const T * x, T * dst, const int ncols) { const int row = blockDim.x*blockIdx.x + threadIdx.x; const int block_size = blockDim.y; const int tid = threadIdx.y; T max_val = -INFINITY; for (int col = tid; col < ncols; col += block_size) { const int i = row*ncols + col; max_val = maxg(max_val, x[i]); } // find the max value in the block #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { max_val = maxg(max_val, __shfl_xor_sync(0xffffffff, max_val, mask, 32)); } ACC tmp = 0.; for (int col = tid; col < ncols; col += block_size) { const int i = row*ncols + col; const T val = expg(x[i] - max_val); tmp += static_cast<ACC>(val); dst[i] = val; } // sum up partial sums #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } const ACC inv_tmp = 1. / tmp; for (int col = tid; col < ncols; col += block_size) { const int i = row*ncols + col; dst[i] *= inv_tmp; } } template <typename T> __device__ void ropei(const T * src, const T * cos, const T * sin, T * dst, const uint32_t bh, const uint32_t td) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2 * idx >= bh * td) return; uint32_t rope_idx = idx % (td / 2); T c = cos[rope_idx]; T s = sin[rope_idx]; dst[2 * idx] = src[2 * idx] * c - src[2 * idx + 1] * s; dst[2 * idx + 1] = src[2 * idx] * s + src[2 * idx + 1] * c; } template <typename T> __device__ void rope(const T * src, const T * cos, const T * sin, T * dst, const uint32_t bh, const uint32_t td, const uint32_t d) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2 * idx >= bh * td) return; uint32_t i_bh = idx / (td / 2); uint32_t i_td = idx - (td / 2) * i_bh; uint32_t i_t = i_td / (d / 2); uint32_t i_d = i_td - (d / 2) * i_t; uint32_t i1 = i_bh * td + i_t * d + i_d; uint32_t i2 = i1 + d / 2; uint32_t i_cs = i_t * (d / 2) + i_d; T c = cos[i_cs]; T s = sin[i_cs]; dst[i1] = src[i1] * c - src[i2] * s; dst[i2] = src[i1] * s + src[i2] * c; } template <typename T> __device__ void rope_thd( const T * src, const T * cos, const T * sin, T * dst, const uint32_t b, const uint32_t t, const uint32_t h, const uint32_t d ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2 * idx >= b * t * h * d) return; uint32_t i_bth = idx / (d / 2); uint32_t i_d = idx - (d / 2) * i_bth; uint32_t i_t = (i_bth / h) % t; uint32_t i1 = i_bth * d + i_d; uint32_t i2 = i1 + d / 2; uint32_t i_cs = i_t * (d / 2) + i_d; T c = cos[i_cs]; T s = sin[i_cs]; dst[i1] = src[i1] * c - src[i2] * s; dst[i2] = src[i1] * s + src[i2] * c; } template <typename T> __device__ void fast_max(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, T *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = -INFINITY; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); shr[tid] = maxg(shr[tid], src[strided_i]); idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s) shr[tid] = maxg(shr[tid], shr[tid + s]); } if (tid == 0) dst[dst_id] = shr[0]; } template <typename T> __device__ void fast_min(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, T *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = INFINITY; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); shr[tid] = ming(shr[tid], src[strided_i]); idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s) shr[tid] = ming(shr[tid], shr[tid + s]); } if (tid == 0) dst[dst_id] = shr[0]; } template <typename T> __device__ void fast_argmin(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, uint32_t *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; __shared__ uint32_t shr_index[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; // Not sure how that works on uint32_t and uint8_t but it seems to do ok. shr[tid] = INFINITY; shr_index[tid] = 0xFFFFFFFF; bool not_set = true; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); if (not_set || src[strided_i] < shr[tid]) { shr[tid] = src[strided_i]; // Assume that the reduction takes place over the last dimension which is contiguous. shr_index[tid] = idx % dims[num_dims - 1]; not_set = false; } idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s && shr[tid + s] < shr[tid]) { shr[tid] = shr[tid + s]; shr_index[tid] = shr_index[tid + s]; } } if (tid == 0) dst[dst_id] = shr_index[0]; } template <typename T> __device__ void fast_argmax(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, uint32_t *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; __shared__ uint32_t shr_index[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = -INFINITY; shr_index[tid] = 0xFFFFFFFF; bool not_set = true; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); if (not_set || src[strided_i] > shr[tid]) { shr[tid] = src[strided_i]; // Assume that the reduction takes place over the last dimension which is contiguous. shr_index[tid] = idx % dims[num_dims - 1]; not_set = false; } idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s && shr[tid + s] > shr[tid]) { shr[tid] = shr[tid + s]; shr_index[tid] = shr_index[tid + s]; } } if (tid == 0) dst[dst_id] = shr_index[0]; } #define FAST_OP(TYPENAME, MIN_NAME, MAX_NAME, ARGMIN_NAME, ARGMAX_NAME, SUM_NAME) \ extern "C" __global__ void ARGMIN_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ uint32_t *dst) { \ fast_argmin(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void ARGMAX_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ uint32_t *dst) { \ fast_argmax(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void MIN_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ TYPENAME *dst) { \ fast_min(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void MAX_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ TYPENAME *dst) { \ fast_max(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void SUM_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ TYPENAME *dst) { \ fast_sum(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } #define SUM_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, const size_t num_dims, const size_t num_sum_dims, \ const size_t *info, const TYPENAME *inp, TYPENAME *out) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ const size_t *sum_dims_l = info + 2 * num_dims; \ const size_t *sum_dims_s = info + 2 * num_dims + num_sum_dims; \ if (is_contiguous(num_dims, dims, strides)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; \ i += blockDim.x * gridDim.x) { \ size_t dst_index = i; \ for (unsigned int nd = 0; nd < num_sum_dims; ++nd) { \ size_t stride = sum_dims_s[nd]; \ size_t pre = dst_index / stride; \ size_t post = dst_index % stride; \ dst_index = (pre / sum_dims_l[nd]) * stride + post; \ } \ atomicAdd(out + dst_index, inp[i]); \ } \ } else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; \ i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ size_t dst_index = i; \ for (unsigned int nd = 0; nd < num_sum_dims; ++nd) { \ size_t stride = sum_dims_s[nd]; \ size_t pre = dst_index / stride; \ size_t post = dst_index % stride; \ dst_index = (pre / sum_dims_l[nd]) * stride + post; \ } \ atomicAdd(out + dst_index, inp[strided_i]); \ } \ } \ } #define SOFTMAX_OP(TYPENAME, ACC_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, TYPENAME *dst, \ const int n_cols) { \ softmax<TYPENAME, ACC_TYPENAME>(src, dst, n_cols); \ } \ #define RMSNORM_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, TYPENAME *dst, const TYPENAME *alpha, \ const int n_cols, const float eps) { \ rmsnorm<TYPENAME>(src, dst, alpha, n_cols, eps); \ } \ #define LAYERNORM_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, TYPENAME *dst, const TYPENAME *alpha, \ const TYPENAME *beta, const int n_cols, const float eps) { \ layernorm<TYPENAME>(src, dst, alpha, beta, n_cols, eps); \ } \ #define ROPE_OP(TYPENAME, FN_NAME, FN_NAME_I, FN_NAME_THD) \ extern "C" __global__ void FN_NAME_I( \ const TYPENAME *src, \ const TYPENAME *cos, \ const TYPENAME *sin, \ TYPENAME *dst, \ const uint32_t bh, \ const uint32_t td) { \ ropei<TYPENAME>(src, cos, sin, dst, bh, td); \ } \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, \ const TYPENAME *cos, \ const TYPENAME *sin, \ TYPENAME *dst, \ const uint32_t bh, \ const uint32_t td, \ const uint32_t d) { \ rope<TYPENAME>(src, cos, sin, dst, bh, td, d); \ } \ extern "C" __global__ void FN_NAME_THD( \ const TYPENAME *src, \ const TYPENAME *cos, \ const TYPENAME *sin, \ TYPENAME *dst, \ const uint32_t b, \ const uint32_t t, \ const uint32_t h, \ const uint32_t d) { \ rope_thd<TYPENAME>(src, cos, sin, dst, b, t, h, d); \ } \ #if __CUDA_ARCH__ >= 800 SOFTMAX_OP(__nv_bfloat16, float, softmax_bf16) RMSNORM_OP(__nv_bfloat16, rmsnorm_bf16) LAYERNORM_OP(__nv_bfloat16, layernorm_bf16) ROPE_OP(__nv_bfloat16, rope_bf16, rope_i_bf16, rope_thd_bf16) SUM_OP(__nv_bfloat16, sum_bf16) FAST_OP(__nv_bfloat16, fast_min_bf16, fast_max_bf16, fast_argmin_bf16, fast_argmax_bf16, fast_sum_bf16) #endif #if __CUDA_ARCH__ >= 530 SOFTMAX_OP(__half, float, softmax_f16) RMSNORM_OP(__half, rmsnorm_f16) LAYERNORM_OP(__half, layernorm_f16) ROPE_OP(__half, rope_f16, rope_i_f16, rope_thd_f16) SUM_OP(__half, sum_f16) FAST_OP(__half, fast_min_f16, fast_max_f16, fast_argmin_f16, fast_argmax_f16, fast_sum_f16) #endif SUM_OP(float, sum_f32) SUM_OP(double, sum_f64) SUM_OP(uint32_t, sum_u32) SOFTMAX_OP(float, float, softmax_f32) SOFTMAX_OP(double, double, softmax_f64) RMSNORM_OP(float, rmsnorm_f32) RMSNORM_OP(double, rmsnorm_f64) LAYERNORM_OP(float, layernorm_f32) LAYERNORM_OP(double, layernorm_f64) ROPE_OP(float, rope_f32, rope_i_f32, rope_thd_f32) ROPE_OP(double, rope_f64, rope_i_f64, rope_thd_f64) FAST_OP(float, fast_min_f32, fast_max_f32, fast_argmin_f32, fast_argmax_f32, fast_sum_f32) FAST_OP(double, fast_min_f64, fast_max_f64, fast_argmin_f64, fast_argmax_f64, fast_sum_f64) FAST_OP(uint32_t, fast_min_u32, fast_max_u32, fast_argmin_u32, fast_argmax_u32, fast_sum_u32) FAST_OP(int64_t, fast_min_i64, fast_max_i64, fast_argmin_i64, fast_argmax_i64, fast_sum_i64) FAST_OP(uint8_t, fast_min_u8, fast_max_u8, fast_argmin_u8, fast_argmax_u8, fast_sum_u8)
candle/candle-kernels/src/reduce.cu/0
{ "file_path": "candle/candle-kernels/src/reduce.cu", "repo_id": "candle", "token_count": 12841 }
38
// Imported from https://github.com/ggerganov/llama.cpp/blob/master/ggml-metal.metal #include <metal_stdlib> using namespace metal; #define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; } #define SORT_ASC 1 #define SORT_DESC 0 template<int order, typename T> METAL_FUNC void argsort( device const T * x, device uint32_t * dst, constant int64_t & ncols, constant int64_t & ncols_pad, threadgroup uint32_t * shared_values [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]) { int col = tpitg[0]; int row = tgpig[1]; if (col >= ncols_pad) return; device const T * x_row = x + row * ncols; threadgroup uint32_t * dst_row = shared_values; // initialize indices dst_row[col] = col; threadgroup_barrier(mem_flags::mem_threadgroup); for (int k = 2; k <= ncols_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ncols || (dst_row[ixj] < ncols && (order == SORT_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj]); } } else { if (dst_row[ixj] >= ncols || (dst_row[col] < ncols && (order == SORT_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj]); } } } threadgroup_barrier(mem_flags::mem_threadgroup); } } // copy the result to dst without the padding if (col < ncols) { dst[row * ncols + col] = dst_row[col]; } } #define ARGSORT(T, RUST_T) \ kernel void asort_asc_##RUST_T( \ device const T * x, \ device uint32_t * dst, \ constant int64_t & ncols, \ constant int64_t & ncols_pad, \ threadgroup uint32_t * shared_values [[threadgroup(0)]], \ uint3 tgpig[[threadgroup_position_in_grid]], \ uint3 tpitg[[thread_position_in_threadgroup]] \ ) { \ argsort<SORT_ASC, T>(x, dst, ncols, ncols_pad, shared_values, tgpig, tpitg); \ } \ kernel void asort_desc_##RUST_T( \ device const T * x, \ device uint32_t * dst, \ constant int64_t & ncols, \ constant int64_t & ncols_pad, \ threadgroup uint32_t * shared_values [[threadgroup(0)]], \ uint3 tgpig[[threadgroup_position_in_grid]], \ uint3 tpitg[[thread_position_in_threadgroup]] \ ) { \ argsort<SORT_DESC, T>(x, dst, ncols, ncols_pad, shared_values, tgpig, tpitg); \ } \ ARGSORT(float, f32) ARGSORT(half, f16) ARGSORT(uint8_t, u8) ARGSORT(uint32_t, u32) #if __METAL_VERSION__ >= 220 ARGSORT(int64_t, i64) #endif #if defined(__HAVE_BFLOAT__) ARGSORT(bfloat, bf16) #endif
candle/candle-metal-kernels/src/sort.metal/0
{ "file_path": "candle/candle-metal-kernels/src/sort.metal", "repo_id": "candle", "token_count": 1748 }
39
/// This example contains some simple benchmarks so that it's easy to run them in perf etc. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::quantized::GgmlType; use candle::{CpuStorage, Device, Layout, Module, Result, Shape, Tensor, D}; use clap::{Parser, Subcommand}; const CHECK_CONV2D: bool = false; trait Benchmark { type PreProcessData; type RunResult; fn preprocess() -> Result<Self::PreProcessData>; fn run_one(_: &Self::PreProcessData) -> Result<Self::RunResult>; const ITERS: usize; } struct Im2Col { h_k: usize, w_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col { fn hw_out(&self, h: usize, w: usize) -> (usize, usize) { let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1; let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1; (h_out, w_out) } } impl candle::CustomOp1 for Im2Col { fn name(&self) -> &'static str { "im2col" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { let &Self { h_k, w_k, stride, dilation, padding, } = self; let (b, c, h, w) = layout.shape().dims4()?; let (h_out, w_out) = self.hw_out(h, w); let slice = storage.as_slice::<f32>()?; let src = &slice[layout.start_offset()..]; let mut dst = vec![0f32; b * h_out * w_out * c * h_k * w_k]; let (src_s0, src_s1, src_s2, src_s3) = { let s = layout.stride(); (s[0], s[1], s[2], s[3]) }; // TODO: provide specialized kernels for the common use cases. // - h_k = w_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * h_out * w_out * c * h_k * w_k; for h_idx in 0..h_out { let dst_idx = dst_idx + h_idx * w_out * c * h_k * w_k; for w_idx in 0..w_out { let dst_idx = dst_idx + w_idx * c * h_k * w_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * h_k * w_k; let src_idx = c_idx * src_s1 + src_idx; for h_k_idx in 0..h_k { let src_h = h_idx * stride + h_k_idx * dilation; if padding != 0 && (src_h < padding || src_h >= h + padding) { continue; } let src_h = src_h - padding; let src_idx = src_idx + src_h * src_s2; let dst_idx = dst_idx + h_k_idx * w_k; for w_k_idx in 0..w_k { let src_w = w_idx * stride + w_k_idx * dilation; if padding != 0 && (src_w < padding || src_w >= w + padding) { continue; } let src_w = src_w - padding; let src_idx = src_idx + src_w * src_s3; let dst_idx = dst_idx + w_k_idx; dst[dst_idx] = src[src_idx] } } } } } } let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b * h_out * w_out, c * h_k * w_k).into())) } } // Conv1d example as used in whisper. struct Conv1d; impl Benchmark for Conv1d { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let inp = Tensor::randn(0f32, 1., (1, 384, 3000), &Device::Cpu)?; let w = Tensor::randn(0f32, 1., (384, 384, 3), &Device::Cpu)?; Ok((inp, w)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.conv1d(&d.1, 0, 1, 1, 1) } const ITERS: usize = 5; } // Conv2d example as used in stable-diffusion. struct Conv2d; impl Benchmark for Conv2d { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?; let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?; Ok((inp, w)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.conv2d(&d.1, 0, 1, 1, 1) } const ITERS: usize = 5; } // Conv2d example as used in stable-diffusion, im2col implementation. struct Conv2dIm2Col; impl Benchmark for Conv2dIm2Col { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?; let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?; Ok((inp, w)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { // d.0.conv2d(&d.1, 0, 1, 1, 1) let (b, _, h, w) = d.0.dims4()?; let (_, _, h_k, w_k) = d.1.dims4()?; let op = Im2Col { h_k, w_k, stride: 1, dilation: 1, padding: 0, }; let (h_out, w_out) = op.hw_out(h, w); let col = d.0.apply_op1_no_bwd(&op)?; let res = col.matmul(&d.1.flatten_from(1)?.t()?)?; let res = res .reshape((b, h_out, w_out, ()))? .permute((0, 3, 1, 2))? .contiguous()?; if CHECK_CONV2D { let res2 = d.0.conv2d(&d.1, op.padding, op.stride, op.dilation, 1); let diff = (&res - res2)?.sqr()?.mean_all()?; println!("{diff}"); } Ok(res) } const ITERS: usize = 5; } struct MatMul; impl Benchmark for MatMul { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let lhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?; let rhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?; Ok((lhs, rhs)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.matmul(&d.1) } const ITERS: usize = 100; } struct MatVec; impl Benchmark for MatVec { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let lhs = Tensor::randn(0f32, 1., (1024 * 4, 1024 * 4), &Device::Cpu)?; let rhs = Tensor::randn(0f32, 1., (1024 * 4, 1), &Device::Cpu)?; Ok((lhs, rhs)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.matmul(&d.1) } const ITERS: usize = 100; } // This benchmark is similar to: // https://github.com/ggerganov/llama.cpp/blob/master/examples/benchmark/benchmark-matmult.cpp struct QMatMul; impl Benchmark for QMatMul { type PreProcessData = (candle::quantized::QMatMul, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let zeros = vec![candle::quantized::k_quants::BlockQ4_0::zeros(); 4096 * 11008 / 32]; let mm = candle::quantized::QTensor::new( candle::quantized::QStorage::Cpu(Box::new(zeros)), (4096, 11008), )?; let mm = candle::quantized::QMatMul::from_qtensor(mm)?; let arg = Tensor::randn(0f32, 1., (128, 11008), &Device::Cpu)?; Ok((mm, arg)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.forward(&d.1) } const ITERS: usize = 100; } struct Cat; impl Benchmark for Cat { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let lhs = Tensor::randn(0f32, 1., (1, 32, 2000, 128), &Device::Cpu)?; let rhs = Tensor::randn(0f32, 1., (1, 32, 1, 128), &Device::Cpu)?; Ok((lhs, rhs)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { Tensor::cat(&[&d.0, &d.1], 2) } const ITERS: usize = 1000; } struct Softmax; impl Benchmark for Softmax { type PreProcessData = Tensor; type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { // Typical whisper tiny size. let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?; Ok(x) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { candle_nn::ops::softmax(d, D::Minus1) } const ITERS: usize = 100; } struct SoftmaxLastDim; impl Benchmark for SoftmaxLastDim { type PreProcessData = Tensor; type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { // Typical whisper tiny size. let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?; Ok(x) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { candle_nn::ops::softmax_last_dim(d) } const ITERS: usize = 100; } fn run<B: Benchmark>(iters: Option<usize>) -> Result<()> { use std::hint::black_box; let iters = iters.unwrap_or(B::ITERS); let d = B::preprocess()?; let start = std::time::Instant::now(); for _iter in 0..iters { let _res = black_box(B::run_one(black_box(&d))?); } println!("{:?}", start.elapsed() / iters as u32); Ok(()) } #[derive(Subcommand, Debug, Clone)] enum Task { Conv1d, Conv2d, Conv2dIm2Col, Matmul, Matvec, Qmatmul, Softmax, SoftmaxLastDim, Cat, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct Args { /// The benchmark to be run. #[command(subcommand)] task: Task, #[arg(long)] iters: Option<usize>, } fn main() -> Result<()> { let args = Args::parse(); match args.task { Task::Conv1d => run::<Conv1d>(args.iters)?, Task::Conv2d => run::<Conv2d>(args.iters)?, Task::Conv2dIm2Col => run::<Conv2dIm2Col>(args.iters)?, Task::Matmul => run::<MatMul>(args.iters)?, Task::Matvec => run::<MatVec>(args.iters)?, Task::Softmax => run::<Softmax>(args.iters)?, Task::SoftmaxLastDim => run::<SoftmaxLastDim>(args.iters)?, Task::Qmatmul => run::<QMatMul>(args.iters)?, Task::Cat => run::<Cat>(args.iters)?, } Ok(()) }
candle/candle-nn/examples/cpu_benchmarks.rs/0
{ "file_path": "candle/candle-nn/examples/cpu_benchmarks.rs", "repo_id": "candle", "token_count": 5543 }
40
//! Recurrent Neural Networks use candle::{DType, Device, IndexOp, Result, Tensor}; /// Trait for Recurrent Neural Networks. #[allow(clippy::upper_case_acronyms)] pub trait RNN { type State: Clone; /// A zero state from which the recurrent network is usually initialized. fn zero_state(&self, batch_dim: usize) -> Result<Self::State>; /// Applies a single step of the recurrent network. /// /// The input should have dimensions [batch_size, features]. fn step(&self, input: &Tensor, state: &Self::State) -> Result<Self::State>; /// Applies multiple steps of the recurrent network. /// /// The input should have dimensions [batch_size, seq_len, features]. /// The initial state is the result of applying zero_state. fn seq(&self, input: &Tensor) -> Result<Vec<Self::State>> { let batch_dim = input.dim(0)?; let state = self.zero_state(batch_dim)?; self.seq_init(input, &state) } /// Applies multiple steps of the recurrent network. /// /// The input should have dimensions [batch_size, seq_len, features]. fn seq_init(&self, input: &Tensor, init_state: &Self::State) -> Result<Vec<Self::State>> { let (_b_size, seq_len, _features) = input.dims3()?; let mut output = Vec::with_capacity(seq_len); for seq_index in 0..seq_len { let input = input.i((.., seq_index, ..))?.contiguous()?; let state = if seq_index == 0 { self.step(&input, init_state)? } else { self.step(&input, &output[seq_index - 1])? }; output.push(state); } Ok(output) } /// Converts a sequence of state to a tensor. fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor>; } /// The state for a LSTM network, this contains two tensors. #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] pub struct LSTMState { pub h: Tensor, pub c: Tensor, } impl LSTMState { pub fn new(h: Tensor, c: Tensor) -> Self { LSTMState { h, c } } /// The hidden state vector, which is also the output of the LSTM. pub fn h(&self) -> &Tensor { &self.h } /// The cell state vector. pub fn c(&self) -> &Tensor { &self.c } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone, Copy)] pub struct LSTMConfig { pub w_ih_init: super::Init, pub w_hh_init: super::Init, pub b_ih_init: Option<super::Init>, pub b_hh_init: Option<super::Init>, pub layer_idx: usize, } impl Default for LSTMConfig { fn default() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: Some(super::Init::Const(0.)), b_hh_init: Some(super::Init::Const(0.)), layer_idx: 0, } } } impl LSTMConfig { pub fn default_no_bias() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: None, b_hh_init: None, layer_idx: 0, } } } /// A Long Short-Term Memory (LSTM) layer. /// /// <https://en.wikipedia.org/wiki/Long_short-term_memory> #[allow(clippy::upper_case_acronyms, unused)] #[derive(Clone, Debug)] pub struct LSTM { w_ih: Tensor, w_hh: Tensor, b_ih: Option<Tensor>, b_hh: Option<Tensor>, hidden_dim: usize, config: LSTMConfig, device: Device, dtype: DType, } /// Creates a LSTM layer. pub fn lstm( in_dim: usize, hidden_dim: usize, config: LSTMConfig, vb: crate::VarBuilder, ) -> Result<LSTM> { let layer_idx = config.layer_idx; let w_ih = vb.get_with_hints( (4 * hidden_dim, in_dim), &format!("weight_ih_l{layer_idx}"), // Only a single layer is supported. config.w_ih_init, )?; let w_hh = vb.get_with_hints( (4 * hidden_dim, hidden_dim), &format!("weight_hh_l{layer_idx}"), // Only a single layer is supported. config.w_hh_init, )?; let b_ih = match config.b_ih_init { Some(init) => { Some(vb.get_with_hints(4 * hidden_dim, &format!("bias_ih_l{layer_idx}"), init)?) } None => None, }; let b_hh = match config.b_hh_init { Some(init) => { Some(vb.get_with_hints(4 * hidden_dim, &format!("bias_hh_l{layer_idx}"), init)?) } None => None, }; Ok(LSTM { w_ih, w_hh, b_ih, b_hh, hidden_dim, config, device: vb.device().clone(), dtype: vb.dtype(), }) } impl RNN for LSTM { type State = LSTMState; fn zero_state(&self, batch_dim: usize) -> Result<Self::State> { let zeros = Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?; Ok(Self::State { h: zeros.clone(), c: zeros.clone(), }) } fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> { let w_ih = input.matmul(&self.w_ih.t()?)?; let w_hh = in_state.h.matmul(&self.w_hh.t()?)?; let w_ih = match &self.b_ih { None => w_ih, Some(b_ih) => w_ih.broadcast_add(b_ih)?, }; let w_hh = match &self.b_hh { None => w_hh, Some(b_hh) => w_hh.broadcast_add(b_hh)?, }; let chunks = (&w_ih + &w_hh)?.chunk(4, 1)?; let in_gate = crate::ops::sigmoid(&chunks[0])?; let forget_gate = crate::ops::sigmoid(&chunks[1])?; let cell_gate = chunks[2].tanh()?; let out_gate = crate::ops::sigmoid(&chunks[3])?; let next_c = ((forget_gate * &in_state.c)? + (in_gate * cell_gate)?)?; let next_h = (out_gate * next_c.tanh()?)?; Ok(LSTMState { c: next_c, h: next_h, }) } fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> { let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>(); Tensor::stack(&states, 1) } } /// The state for a GRU network, this contains a single tensor. #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] pub struct GRUState { pub h: Tensor, } impl GRUState { /// The hidden state vector, which is also the output of the LSTM. pub fn h(&self) -> &Tensor { &self.h } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone, Copy)] pub struct GRUConfig { pub w_ih_init: super::Init, pub w_hh_init: super::Init, pub b_ih_init: Option<super::Init>, pub b_hh_init: Option<super::Init>, } impl Default for GRUConfig { fn default() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: Some(super::Init::Const(0.)), b_hh_init: Some(super::Init::Const(0.)), } } } impl GRUConfig { pub fn default_no_bias() -> Self { Self { w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM, w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM, b_ih_init: None, b_hh_init: None, } } } /// A Gated Recurrent Unit (GRU) layer. /// /// <https://en.wikipedia.org/wiki/Gated_recurrent_unit> #[allow(clippy::upper_case_acronyms, unused)] #[derive(Clone, Debug)] pub struct GRU { w_ih: Tensor, w_hh: Tensor, b_ih: Option<Tensor>, b_hh: Option<Tensor>, hidden_dim: usize, config: GRUConfig, device: Device, dtype: DType, } /// Creates a GRU layer. pub fn gru( in_dim: usize, hidden_dim: usize, config: GRUConfig, vb: crate::VarBuilder, ) -> Result<GRU> { let w_ih = vb.get_with_hints( (3 * hidden_dim, in_dim), "weight_ih_l0", // Only a single layer is supported. config.w_ih_init, )?; let w_hh = vb.get_with_hints( (3 * hidden_dim, hidden_dim), "weight_hh_l0", // Only a single layer is supported. config.w_hh_init, )?; let b_ih = match config.b_ih_init { Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_ih_l0", init)?), None => None, }; let b_hh = match config.b_hh_init { Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_hh_l0", init)?), None => None, }; Ok(GRU { w_ih, w_hh, b_ih, b_hh, hidden_dim, config, device: vb.device().clone(), dtype: vb.dtype(), }) } impl RNN for GRU { type State = GRUState; fn zero_state(&self, batch_dim: usize) -> Result<Self::State> { let h = Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?; Ok(Self::State { h }) } fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> { let w_ih = input.matmul(&self.w_ih.t()?)?; let w_hh = in_state.h.matmul(&self.w_hh.t()?)?; let w_ih = match &self.b_ih { None => w_ih, Some(b_ih) => w_ih.broadcast_add(b_ih)?, }; let w_hh = match &self.b_hh { None => w_hh, Some(b_hh) => w_hh.broadcast_add(b_hh)?, }; let chunks_ih = w_ih.chunk(3, 1)?; let chunks_hh = w_hh.chunk(3, 1)?; let r_gate = crate::ops::sigmoid(&(&chunks_ih[0] + &chunks_hh[0])?)?; let z_gate = crate::ops::sigmoid(&(&chunks_ih[1] + &chunks_hh[1])?)?; let n_gate = (&chunks_ih[2] + (r_gate * &chunks_hh[2])?)?.tanh(); let next_h = ((&z_gate * &in_state.h)? - ((&z_gate - 1.)? * n_gate)?)?; Ok(GRUState { h: next_h }) } fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> { let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>(); Tensor::cat(&states, 1) } }
candle/candle-nn/src/rnn.rs/0
{ "file_path": "candle/candle-nn/src/rnn.rs", "repo_id": "candle", "token_count": 4925 }
41
use crate::onnx::attribute_proto::AttributeType; use crate::onnx::tensor_proto::DataType; use crate::onnx::{self, GraphProto}; use candle::{bail, DType, Device, Result, Tensor}; use std::{collections::HashMap, usize}; pub type Value = Tensor; pub fn dtype(dt: DataType) -> Option<DType> { match dt { DataType::Uint8 => Some(DType::U8), DataType::Uint32 => Some(DType::U32), DataType::Int64 => Some(DType::I64), DataType::Float16 => Some(DType::F16), DataType::Float => Some(DType::F32), DataType::Double => Some(DType::F64), DataType::Bool => Some(DType::U8), _ => None, } } trait Attr { const TYPE: AttributeType; fn get(attr: &onnx::AttributeProto) -> Result<&Self>; } trait AttrOwned: Sized { const TYPE: AttributeType; fn get(attr: &onnx::AttributeProto) -> Result<Self>; } impl Attr for i64 { const TYPE: AttributeType = AttributeType::Int; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(&attr.i) } } impl Attr for f32 { const TYPE: AttributeType = AttributeType::Float; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(&attr.f) } } impl Attr for [i64] { const TYPE: AttributeType = AttributeType::Ints; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(attr.ints.as_slice()) } } impl Attr for str { const TYPE: AttributeType = AttributeType::String; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { std::str::from_utf8(&attr.s).map_err(candle::Error::wrap) } } impl Attr for GraphProto { const TYPE: AttributeType = AttributeType::Graph; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { attr.g .as_ref() .ok_or_else(|| candle::Error::Msg("attribute does not contain graph".to_string())) } } impl AttrOwned for Vec<String> { const TYPE: AttributeType = AttributeType::Strings; fn get(attr: &onnx::AttributeProto) -> Result<Self> { let mut ret = vec![]; for bytes in attr.strings.iter() { let s = String::from_utf8(bytes.clone()).map_err(candle::Error::wrap)?; ret.push(s); } Ok(ret) } } impl AttrOwned for Tensor { const TYPE: AttributeType = AttributeType::Tensor; fn get(attr: &onnx::AttributeProto) -> Result<Self> { let tensor_proto = match &attr.t { Some(value) => value, None => bail!( "attribute {} was of type TENSOR, but no tensor was found", attr.name ), }; let data_type = match DataType::try_from(tensor_proto.data_type) { Ok(value) => value, Err(_) => bail!( "attribute {} of type TENSOR was an invalid data_type number {}", attr.name, tensor_proto.data_type ), }; let dtype = match dtype(data_type) { Some(value) => value, None => bail!( "attribute {} of type TENSOR has an unsupported data_type {}", attr.name, data_type.as_str_name() ), }; let mut dims = Vec::with_capacity(tensor_proto.dims.len()); for dim in &tensor_proto.dims { if dim < &0 { bail!( "attribute {} of type TENSOR has a negative dimension, which is unsupported", attr.name ) } dims.push(*dim as usize) } Tensor::from_raw_buffer(&tensor_proto.raw_data, dtype, &dims, &Device::Cpu) } } fn get_attr_<'a>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a onnx::AttributeProto> { match node.attribute.iter().find(|attr| attr.name == name) { None => { bail!( "cannot find the '{name}' attribute in '{}' for {}", node.op_type, node.name ) } Some(dt) => Ok(dt), } } fn get_attr<'a, T: Attr + ?Sized>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a T> { let attr = get_attr_(node, name)?; if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } T::get(attr) } fn get_attr_opt<'a, T: Attr + ?Sized>( node: &'a onnx::NodeProto, name: &str, ) -> Result<Option<&'a T>> { match node.attribute.iter().find(|attr| attr.name == name) { None => Ok(None), Some(attr) => { if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } let val = T::get(attr)?; Ok(Some(val)) } } } fn get_attr_opt_owned<T: AttrOwned>(node: &onnx::NodeProto, name: &str) -> Result<Option<T>> { match node.attribute.iter().find(|attr| attr.name == name) { None => Ok(None), Some(attr) => { if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } let val = T::get(attr)?; Ok(Some(val)) } } } pub fn get_tensor(t: &onnx::TensorProto, name: &str) -> Result<Tensor> { let dims: Vec<usize> = t.dims.iter().map(|&x| x as usize).collect(); match DataType::try_from(t.data_type) { Ok(DataType::Int32) => { if t.int32_data.is_empty() { let len = t.raw_data.len() / 4; let data: &[i32] = unsafe { std::slice::from_raw_parts(t.raw_data.as_ptr() as *const i32, len) }; let data = data.iter().map(|v| *v as i64).collect::<Vec<_>>(); Tensor::from_vec(data, len, &Device::Cpu) } else { let data = t.int32_data.iter().map(|v| *v as i64).collect::<Vec<_>>(); Tensor::from_vec(data, t.int32_data.len(), &Device::Cpu) } } Ok(dt) => match dtype(dt) { Some(dt) => { if dt == DType::F32 && !t.float_data.is_empty() { Tensor::from_slice(&t.float_data, dims.as_slice(), &Device::Cpu) } else if dt == DType::F64 && !t.double_data.is_empty() { Tensor::from_slice(&t.double_data, dims.as_slice(), &Device::Cpu) } else if dt == DType::I64 && !t.int64_data.is_empty() { Tensor::from_slice(&t.int64_data, dims.as_slice(), &Device::Cpu) } else { Tensor::from_raw_buffer( t.raw_data.as_slice(), dt, dims.as_slice(), &Device::Cpu, ) } } None => { bail!("unsupported 'value' data-type {dt:?} for {name}") } }, Err(_) => { bail!("unsupported 'value' data-type {} for {name}", t.data_type,) } } } // This function provides a direct evaluation of the proto. // Longer-term, we should first convert the proto to an intermediate representation of the compute // graph so as to make multiple evaluations more efficient. // An example upside of this would be to remove intermediary values when they are not needed // anymore. pub fn simple_eval( model: &onnx::ModelProto, mut inputs: HashMap<String, Value>, ) -> Result<HashMap<String, Value>> { let graph = match &model.graph { None => bail!("no graph defined in proto"), Some(graph) => graph, }; simple_eval_(graph, &mut inputs) } fn simple_eval_( graph: &onnx::GraphProto, values: &mut HashMap<String, Value>, ) -> Result<HashMap<String, Value>> { for t in graph.initializer.iter() { let tensor = get_tensor(t, t.name.as_str())?; values.insert(t.name.to_string(), tensor); } for input in graph.input.iter() { let input_type = match &input.r#type { Some(input_type) => input_type, None => continue, }; let input_type = match &input_type.value { Some(input_type) => input_type, None => continue, }; let tensor_type = match input_type { onnx::type_proto::Value::TensorType(tt) => tt, _ => continue, }; let tensor = match values.get(&input.name) { None => bail!("missing input {}", input.name), Some(tensor) => tensor, }; let dt = match DataType::try_from(tensor_type.elem_type) { Ok(dt) => match dtype(dt) { Some(dt) => dt, None => { bail!("unsupported 'value' data-type {dt:?} for {}", input.name) } }, type_ => bail!("unsupported input type {type_:?}"), }; match &tensor_type.shape { None => continue, Some(shape) => { if shape.dim.len() != tensor.rank() { bail!( "unexpected rank for {}, got {:?}, expected {:?}", input.name, shape.dim, tensor.shape() ) } for (idx, (d, &dim)) in shape.dim.iter().zip(tensor.dims().iter()).enumerate() { match &d.value { Some(onnx::tensor_shape_proto::dimension::Value::DimValue(v)) => { if *v as usize != dim { bail!( "unexpected dim {idx} for {}, got {:?}, expected {:?}", input.name, shape.dim, tensor.shape() ) } } // We do not check equality constraints for the DimParam dimensions for now. Some(onnx::tensor_shape_proto::dimension::Value::DimParam(_)) | None => (), } } } }; if dt != tensor.dtype() { bail!( "unexpected dtype for {}, got {:?}, expected {dt:?}", input.name, tensor.dtype() ) } } // The nodes are topologically sorted so we can just process them in order. for node in graph.node.iter() { let get = |input_name: &str| match values.get(input_name) { Some(value) => Ok(value), None => bail!("cannot find {input_name} for op {}", node.name), }; // TODO: Validate node.input for each operator. match node.op_type.as_str() { "Add" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_add(input1)?; values.insert(node.output[0].clone(), output); } "Sub" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_sub(input1)?; values.insert(node.output[0].clone(), output); } "Mul" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_mul(input1)?; values.insert(node.output[0].clone(), output); } "Div" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_div(input1)?; values.insert(node.output[0].clone(), output); } "Pow" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; // HACK: current implementation of broadcast_pow cannot handle negative base, // so we use powf where we can, which *does* correctly handle negative base. if let Ok(exp) = (|| input1.to_dtype(DType::F64)?.to_scalar::<f64>())() { let output = input0.powf(exp as f64)?; values.insert(node.output[0].clone(), output); } else { let output = input0.broadcast_pow(input1)?; values.insert(node.output[0].clone(), output); } } "Exp" => { let xs = get(&node.input[0])?; let output = xs.exp()?; values.insert(node.output[0].clone(), output); } "Equal" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_eq(input1)?; values.insert(node.output[0].clone(), output); } "Not" => { let xs = get(&node.input[0])?; let xs = xs.eq(&xs.zeros_like()?)?; values.insert(node.output[0].clone(), xs); } "MatMul" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_matmul(input1)?; values.insert(node.output[0].clone(), output); } "Reshape" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?.to_vec1::<i64>()?; // TODO: Check that there is at most a single -1 or 0, handle other neg values. let mut other_than_minus1 = 1usize; for &v in input1.iter() { if v != -1 && v != 0 { other_than_minus1 *= v as usize } } let input1 = input1 .iter() .enumerate() .map(|(idx, &v)| match v { -1 => Ok(input0.elem_count() / other_than_minus1), 0 => input0.dim(idx), _ => Ok(v as usize), }) .collect::<Result<Vec<usize>>>()?; let output = input0.reshape(input1)?; values.insert(node.output[0].clone(), output); } "LogSoftmax" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<i64>(node, "axis")? { None => candle_nn::ops::softmax_last_dim(input)?, Some(&axis) => { let axis = input.normalize_axis(axis)?; candle_nn::ops::log_softmax(input, axis)? } }; values.insert(node.output[0].clone(), output); } "Softmax" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<i64>(node, "axis")? { None => candle_nn::ops::softmax_last_dim(input)?, Some(&axis) => { let axis = input.normalize_axis(axis)?; candle_nn::ops::softmax(input, axis)? } }; values.insert(node.output[0].clone(), output); } "Transpose" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<[i64]>(node, "perm")? { None => input.t()?, Some(perm) => { let perm = perm.iter().map(|&v| v as usize).collect::<Vec<_>>(); input.permute(perm)? } }; values.insert(node.output[0].clone(), output); } "Dropout" => { let input = get(&node.input[0])?; // Do not apply dropout at the moment, consider that we're only doing inference. values.insert(node.output[0].clone(), input.clone()); } "MaxPool" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#MaxPool let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; if let Some(d) = dilations { if d.iter().any(|&v| v != 1) { bail!("MaxPool with dilation != 1, {dilations:?}") } } if let Some(d) = pads { if d.iter().any(|&v| v != 0) { bail!("MaxPool with pads != 0, {pads:?}") } } let xs = get(&node.input[0])?; let (k1, k2) = match kernel_shape { [k1, k2] => (*k1 as usize, *k2 as usize), _ => bail!("only 2d MaxPool is supported, kernel shape {kernel_shape:?}"), }; let ys = match strides { None => xs.max_pool2d((k1, k2))?, Some([s1, s2]) => { xs.max_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))? } Some(strides) => bail!("only 2d MaxPool is supported, strides {strides:?}"), }; values.insert(node.output[0].clone(), ys); } "AveragePool" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#AveragePool let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; if let Some(d) = dilations { if d.iter().any(|&v| v != 1) { bail!("AvgPool with dilation != 1, {dilations:?}") } } if let Some(d) = pads { if d.iter().any(|&v| v != 0) { bail!("AvgPool with pads != 0, {pads:?}") } } let xs = get(&node.input[0])?; let (k1, k2) = match kernel_shape { [k1, k2] => (*k1 as usize, *k2 as usize), _ => bail!("only 2d AvgPool is supported, kernel shape {kernel_shape:?}"), }; let ys = match strides { None => xs.avg_pool2d((k1, k2))?, Some([s1, s2]) => { xs.avg_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))? } Some(strides) => bail!("only 2d AvgPool is supported, strides {strides:?}"), }; values.insert(node.output[0].clone(), ys); } "BatchNormalization" => { let training_mode = get_attr_opt::<i64>(node, "training_mode")?; if training_mode.copied().unwrap_or(0) != 0 { bail!("training mode is not supported for BatchNorm") } let eps = get_attr_opt::<f32>(node, "epsilon")? .copied() .unwrap_or(1e-5); let xs = get(&node.input[0])?; let weight = get(&node.input[1])?; let bias = get(&node.input[2])?; let running_mean = get(&node.input[3])?; let running_var = get(&node.input[4])?; let target_shape: Vec<usize> = xs .dims() .iter() .enumerate() .map(|(idx, v)| if idx == 1 { *v } else { 1 }) .collect(); let target_shape = target_shape.as_slice(); let xs = xs .broadcast_sub(&running_mean.reshape(target_shape)?)? .broadcast_div(&(running_var.reshape(target_shape)? + eps as f64)?.sqrt()?)?; let weight = weight.reshape(target_shape)?; let bias = bias.reshape(target_shape)?; let xs = xs.broadcast_mul(&weight)?.broadcast_add(&bias)?; values.insert(node.output[0].clone(), xs); } "Squeeze" => { let xs = get(&node.input[0])?; let mut axes = if node.input.len() <= 1 { // contract all the dimensions with size 1 except the batch dim. xs.dims() .iter() .enumerate() .flat_map(|(idx, &s)| if s == 1 && idx > 0 { Some(idx) } else { None }) .collect() } else { get(&node.input[1])? .to_vec1::<i64>()? .iter() .map(|&i| xs.normalize_axis(i)) .collect::<Result<Vec<_>>>()? }; axes.sort(); let mut xs = xs.clone(); for &axis in axes.iter().rev() { xs = xs.squeeze(axis)? } values.insert(node.output[0].clone(), xs); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConstantOfShape "ConstantOfShape" => { let input = get(&node.input[0])?; let value = get_attr_opt_owned::<Tensor>(node, "value")?.unwrap_or(Tensor::zeros( (), DType::F32, &Device::Cpu, )?); let xs = Tensor::ones(input.shape(), value.dtype(), input.device())? .broadcast_mul(&value)?; values.insert(node.output[0].clone(), xs); } "Unsqueeze" => { let xs = get(&node.input[0])?; let axes = match get_attr_opt::<[i64]>(node, "axes")? { Some(axis) => axis.to_vec(), None => get(&node.input[1])?.to_vec1::<i64>()?, }; let mut axes = axes .iter() .map(|&i| { if i == xs.rank() as i64 { Ok(xs.rank()) } else if i < 0 { // normalize_axis doesn't work correctly here // because we actually want normalized with respect // to the final size, not the current (off by one) Ok(xs.rank() - (-i as usize) + 1) } else { xs.normalize_axis(i) } }) .collect::<Result<Vec<_>>>()?; axes.sort(); let mut xs = xs.clone(); for &axis in axes.iter().rev() { xs = xs.unsqueeze(axis)? } values.insert(node.output[0].clone(), xs); } "Clip" => { let xs = get(&node.input[0])?; let xs = if node.input.len() >= 2 { let mins = get(&node.input[1])?; xs.broadcast_maximum(mins)? } else { xs.clone() }; let xs = if node.input.len() >= 3 { let maxs = get(&node.input[2])?; xs.broadcast_minimum(maxs)? } else { xs.clone() }; values.insert(node.output[0].clone(), xs); } "Gather" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Gather let xs = get(&node.input[0])?; let indices = get(&node.input[1])?; let axis = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(0); let axis = xs.normalize_axis(axis)?; // index_select does not support negative indices, so normalize them // to positive indices. let indices = &{ let zeros = Tensor::zeros(indices.shape(), indices.dtype(), indices.device())?; let max = Tensor::new(xs.dims()[axis] as i64, indices.device())? .to_dtype(indices.dtype())?; let mask = indices.lt(&zeros)?; mask.to_dtype(indices.dtype())? .broadcast_mul(&max)? .add(&indices)? }; // In Pytorch or Numpy this can be done by indexing the xs tensor using the indices // tensor directly, but candle does not support tensor indexing at the moment, so // some workarounds must be done. let xs = match indices.dims() { [] => { let index = indices.to_vec0::<i64>()? as usize; xs.narrow(axis, index, 1)?.squeeze(axis)? } [_] => xs.index_select(indices, axis)?, [first, _] => { let mut v = Vec::with_capacity(*first); for i in 0..*first { v.push(xs.index_select(&indices.get(i)?, axis)?) } Tensor::stack(&v, axis)? } _ => { // TODO: Provide an op to handle the ONNX generalized gather op ideally in a // differentiable way. todo!("implement gather for {xs:?} {indices:?} axis {axis}") } }; values.insert(node.output[0].clone(), xs); } "Shape" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Shape let xs = get(&node.input[0])?; let start = get_attr_opt::<i64>(node, "start")?.copied().unwrap_or(0); let end = get_attr_opt::<i64>(node, "end")?.copied().unwrap_or(-1); let start = xs.normalize_axis(start)?; let end = xs.normalize_axis(end)?; let mut dims = vec![]; for idx in start..=end { dims.push(xs.dim(idx)? as i64) } let dims = Tensor::from_vec(dims, xs.rank(), xs.device())?; values.insert(node.output[0].clone(), dims); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Size "Size" => { let data = get(&node.input[0])?; let size: usize = data.dims().iter().product(); let output = Tensor::from_slice(&[size as i64], (), data.device())?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Sqrt "Sqrt" => { let xs = get(&node.input[0])?; let output = xs.sqrt()?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Range "Range" => { let start = get(&node.input[0])?; let limit = get(&node.input[1])?; let delta = get(&node.input[2])?; macro_rules! arange_step { ($t: ty) => { Tensor::arange_step( start.to_vec0::<$t>()?, limit.to_vec0::<$t>()?, delta.to_vec0::<$t>()?, &Device::Cpu, )? }; } let output = match start.dtype() { DType::U8 => arange_step!(u8), DType::U32 => arange_step!(u32), DType::I64 => arange_step!(i64), DType::BF16 => arange_step!(f32), DType::F16 => arange_step!(f32), DType::F32 => arange_step!(f32), DType::F64 => arange_step!(f64), }; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Greater "Greater" => { let a = get(&node.input[0])?; let b = get(&node.input[1])?; let output = a.broadcast_gt(b)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Less "Less" => { let a = get(&node.input[0])?; let b = get(&node.input[1])?; let output = a.broadcast_lt(b)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Log "Log" => { let a = get(&node.input[0])?; let output = a.log()?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Min "Min" => { let mut output = get(&node.input[0])?.clone(); for input in node.input.iter() { let input = get(input)?; output = output.broadcast_minimum(input)? } values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Where "Where" => { let cond = get(&node.input[0])?; let a = get(&node.input[1])?; let b = get(&node.input[2])?; let output = cond.where_cond(a, b)?; values.insert(node.output[0].clone(), output); } "Conv" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Conv let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let groups = get_attr_opt::<i64>(node, "group")?.copied().unwrap_or(1); let _kernel_shape = get_attr_opt::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; let xs = get(&node.input[0])?; let ws = get(&node.input[1])?; let ys = match ws.rank() { 3 => { let (pads, xs) = match pads { None => (0, xs.clone()), Some([p]) => (*p as usize, xs.clone()), Some([p1, p2]) => { if p1 != p2 { (0usize, xs.pad_with_zeros(2, *p1 as usize, *p2 as usize)?) } else { (*p1 as usize, xs.clone()) } } Some(pads) => { bail!("more pads than expected in conv1d {pads:?} {}", node.name) } }; let strides = match strides { None => 1, Some([p]) => *p as usize, Some(s) => { bail!("more strides than expected in conv1d {s:?} {}", node.name) } }; let dilations = match dilations { None => 1, Some([p]) => *p as usize, Some(s) => { bail!("more dilations than expected in conv1d {s:?} {}", node.name) } }; xs.conv1d(ws, pads, strides, dilations, groups as usize)? } 4 => { let (pads, xs) = match pads { None => (0, xs.clone()), Some([p]) => (*p as usize, xs.clone()), Some(&[p1, p2, p3, p4]) => { let p1 = p1 as usize; let p2 = p2 as usize; let p3 = p3 as usize; let p4 = p4 as usize; if p1 != p2 || p1 != p3 || p1 != p4 { (0, xs.pad_with_zeros(2, p1, p3)?.pad_with_zeros(3, p2, p4)?) } else { (p1, xs.clone()) } } Some(pads) => { bail!("more pads than expected in conv2d {pads:?} {}", node.name) } }; let strides = match strides { None => 1, Some([p]) => *p as usize, Some([p1, p2]) => { if p1 != p2 { bail!( "strides have to be the same on both axis {pads:?} {}", node.name ) } *p1 as usize } Some(s) => { bail!("more strides than expected in conv2d {s:?} {}", node.name) } }; let dilations = match dilations { None => 1, Some([p]) => *p as usize, Some([p1, p2]) => { if p1 != p2 { bail!( "dilations have to be the same on both axis {pads:?} {}", node.name ) } *p1 as usize } Some(s) => { bail!("more dilations than expected in conv2d {s:?} {}", node.name) } }; xs.conv2d(ws, pads, strides, dilations, groups as usize)? } rank => bail!( "unsupported rank for weight matrix {rank} in conv {}", node.name ), }; let ys = if node.input.len() > 2 { let bs = get(&node.input[2])?; let mut bs_shape = vec![1; ys.rank()]; bs_shape[1] = bs.elem_count(); ys.broadcast_add(&bs.reshape(bs_shape)?)? } else { ys }; values.insert(node.output[0].clone(), ys); } "Concat" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Concat let inputs = node .input .iter() .map(|n| Ok(get(n.as_str())?.clone())) .collect::<Result<Vec<Value>>>()?; let axis: i64 = *get_attr(node, "axis")?; if inputs.is_empty() { bail!("empty concat") }; let axis = inputs[0].normalize_axis(axis)?; let output = Tensor::cat(&inputs, axis)?; values.insert(node.output[0].clone(), output); } "Abs" => { let input = get(&node.input[0])?; let output = input.abs()?; values.insert(node.output[0].clone(), output); } "Cos" => { let input = get(&node.input[0])?; let output = input.cos()?; values.insert(node.output[0].clone(), output); } "Sin" => { let input = get(&node.input[0])?; let output = input.sin()?; values.insert(node.output[0].clone(), output); } "Neg" => { let input = get(&node.input[0])?; let output = input.neg()?; values.insert(node.output[0].clone(), output); } "Erf" => { let input = get(&node.input[0])?; let output = input.erf()?; values.insert(node.output[0].clone(), output); } "Tanh" => { let input = get(&node.input[0])?; let output = input.tanh()?; values.insert(node.output[0].clone(), output); } "Sigmoid" => { let input = get(&node.input[0])?; let output = candle_nn::ops::sigmoid(input)?; values.insert(node.output[0].clone(), output); } "Gelu" => { let input = get(&node.input[0])?; let output = input.gelu_erf()?; values.insert(node.output[0].clone(), output); } "Relu" => { let input = get(&node.input[0])?; let output = input.relu()?; values.insert(node.output[0].clone(), output); } "Ceil" => { let input = get(&node.input[0])?; let output = input.ceil()?; values.insert(node.output[0].clone(), output); } "Floor" => { let input = get(&node.input[0])?; let output = input.floor()?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Constant "Constant" => { let value = match node.attribute.iter().find(|attr| attr.name == "value") { None => { // TODO: support sparse_value etc. bail!("cannot find 'value' attr in 'Constant' for {}", node.name) } Some(value) => value, }; let output = match value.r#type() { AttributeType::Tensor => { let t = value.t.as_ref().unwrap(); get_tensor(t, &node.name)? } rtype => bail!("unsupported 'value' type {rtype:?} for {}", node.name), }; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Cast "Cast" => { let input = get(&node.input[0])?; let dt: i64 = *get_attr(node, "to")?; let dtype = match DataType::try_from(dt as i32) { Ok(DataType::Int32) => DType::I64, Ok(dt) => match dtype(dt) { Some(dt) => dt, None => { bail!("unsupported 'to' value {dt:?} for cast {}", node.name) } }, Err(_) => { bail!("unsupported 'to' value {dt:?} for cast {}", node.name) } }; let output = input.to_dtype(dtype)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#CumSum "CumSum" => { let exclusive = get_attr_opt::<i64>(node, "exclusive")? .copied() .unwrap_or(0); let reverse = get_attr_opt::<i64>(node, "reverse")?.copied().unwrap_or(0); if exclusive != 0 { bail!("only exclusive == 0 is supported in CumSum") } if reverse != 0 { bail!("only reverse == 0 is supported in CumSum") } let input = get(&node.input[0])?; let axis = get(&node.input[1])? .to_dtype(DType::U32)? .to_vec0::<u32>()?; let output = input.cumsum(axis as usize)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#flatten "Flatten" => { let axis = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(1) as usize; let input = get(&node.input[0])?; let first_part: usize = input.shape().dims().iter().take(axis).product(); let end_index = input.shape().dims().iter().product::<usize>(); let new_shape = (first_part, end_index / first_part); let output = input.reshape(new_shape)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#identity "Identity" => { let input = get(&node.input[0])?; values.insert(node.output[0].clone(), input.clone()); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#if "If" => { // protobuf encodes boolean false as 0 and true as 1 let cond = get(&node.input[0])?.get(0)?.to_scalar::<u8>()?; let attr_name = if cond != 0 { "then_branch" } else { "else_branch" }; let sub_graph = get_attr::<GraphProto>(node, attr_name)?; if sub_graph.output.len() != node.output.len() { bail!( "If node {:?} is malformed: branch outputs ({}) don't match node outputs ({})", node.name, sub_graph.output.len(), node.output.len() ); } let branch_out = simple_eval_(sub_graph, values)?; for (i, out) in node.output.iter().enumerate() { values.insert( out.clone(), branch_out.get(&sub_graph.output[i].name).unwrap().clone(), ); } } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#pad "Pad" => { let mode = get_attr_opt(node, "mode")?.unwrap_or("constant"); let data = get(&node.input[0])?; let pads = get(&node.input[1])?; if node.input.len() > 2 { bail!( "unsupported number of inputs {} for Pad node {:?}, expected 2", node.input.len(), node.name ); } if pads.rank() != 1 { bail!("Pad expects 'pads' input to be 1D vector: {pads:?}"); } if pads.dim(0).unwrap() != 2 * data.rank() { bail!("Pad expects 'pads' input len to be 2 * rank of 'data' input: pads: {}, data rank: {}", pads, data.rank()); } let pads = pads.to_vec1::<i64>()?; let (pads_pre, pads_post) = pads.split_at(pads.len() / 2); match mode { "reflect" => { let mut out = data.clone(); for (i, &dim) in data.dims().iter().enumerate().rev() { if pads_pre[i] == 0 && pads_post[i] == 0 { continue; } fn zigzag(min: i64, max: i64) -> impl Iterator<Item = i64> { std::iter::repeat((min..max).chain((min + 1..=max).rev())).flatten() } let idx = if dim > 1 { let cycle_len = dim * 2 - 2; let skip = cycle_len - ((pads_pre[i] as usize) % cycle_len); let idx = zigzag(0, (dim - 1) as i64) .skip(skip) .take((pads_pre[i] as usize) + dim + (pads_post[i] as usize)); Tensor::from_iter(idx, out.device())? } else { Tensor::full(0i64, (dim,), out.device())? }; out = out.index_select(&idx, i)?; } values.insert(node.output[0].clone(), out); } _ => bail!( "unsupported 'mode' value {mode:?} for Pad node {:?}", node.name ), } } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#slice "Slice" => { let data = get(&node.input[0])?; let starts = get(&node.input[1])?; let ends = get(&node.input[2])?; let default_axes; let default_steps; let axes: &Tensor; let steps: &Tensor; // If axes are omitted, they are set to [0, ..., r-1]. If steps are omitted, // they are set to [1, ..., 1] of length len(starts) match node.input.len() { 3 => { let len = starts.dims()[0]; default_axes = Some(Tensor::arange(0, len as i64, starts.device())?); axes = default_axes.as_ref().unwrap(); default_steps = Some(Tensor::ones((len,), DType::I64, starts.device())?); steps = default_steps.as_ref().unwrap(); } 4 => { let len = starts.dims()[0]; axes = get(&node.input[3])?; default_steps = Some(Tensor::ones((len,), DType::I64, starts.device())?); steps = default_steps.as_ref().unwrap(); } 5 => { steps = get(&node.input[4])?; axes = get(&node.input[3])?; } _ => bail!( "Slice node is invalid, expected 3-5 inputs, got {}: {:?}", node.input.len(), node ), } let mut out = data.clone(); for (i, axis) in axes.to_vec1::<i64>()?.into_iter().enumerate() { // All negative elements of axes are made non-negative by // adding r to them, where r = rank(input). let axis = if axis < 0 { axis + data.rank() as i64 } else { axis } as usize; let data_dim = data.dims()[axis] as i64; let mut s = starts.get(i)?.to_scalar::<i64>()?; let mut e = ends.get(i)?.to_scalar::<i64>()?; // All negative values in starts[i] and ends[i] have // dims[axes[i]] added to them, where dims are the // dimensions of input. if s < 0 { s += data_dim; } if e < 0 { e += data_dim; } let p = steps.get(i)?.to_scalar::<i64>()?; // starts[i] is clamped into the range [0, dims[axes[i]]] // for positive stepping and [0, dims[axes[i]]-1] for // negative stepping. // for positive stepping ends[axes[i]] is clamped to // [0, dims[axes[i]]], while for negative stepping it is // clamped to [-1, dims[axes[i]]-1]. if p >= 0 { s = s.clamp(0, data_dim); e = e.clamp(0, data_dim); } else { s = s.clamp(0, data_dim - 1); e = e.clamp(-1, data_dim - 1); } let indexes = Tensor::arange_step(s, e, p, data.device())?; out = out.index_select(&indexes, axis)? } values.insert(node.output[0].clone(), out); } // https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-13 // TODO: This version is only compatible with ReduceMean V13 and below. "ReduceMean" => { let input = get(&node.input[0])?; let axes = get_attr_opt::<[i64]>(node, "axes")?; let keepdims = get_attr_opt::<i64>(node, "keepdims")?.copied().unwrap_or(1); let n_dims = input.dims().len(); let axes: Vec<usize> = if let Some(axes) = axes { axes.iter() .map(|e| (if e < &0 { (n_dims as i64) + *e } else { *e }) as usize) .collect() } else { (0..n_dims).collect() }; let output = if keepdims == 1 { input.mean_keepdim(axes)? } else { input.mean(axes)? }; values.insert(node.output[0].clone(), output); } random_type @ ("RandomUniform" | "RandomNormal") => { let dt: i64 = get_attr_opt(node, "dtype")?.copied().unwrap_or(1); // 1 is float // type by // default let dtype = match DataType::try_from(dt as i32) { Ok(dt) => match dtype(dt) { Some(DType::U8 | DType::U32 | DType::I64) => { bail!( "unsupported 'dtype' value {dt:?}, only floats are allowed, for {random_type} {}", node.name ) } Some(dt) => dt, None => { bail!( "unsupported 'dtype' value {dt:?} for {random_type} {}", node.name ) } }, Err(_) => { bail!( "unsupported 'dtype' value {dt:?} for {random_type} {}", node.name ) } }; let seed: Option<f32> = get_attr_opt(node, "seed")?.copied(); if seed.is_some() { bail!("seed for {random_type} is currently not supported") }; let shape: Vec<usize> = get_attr::<[i64]>(node, "shape")? .iter() .map(|x| *x as usize) .collect(); let output = if random_type == "RandomUniform" { let low: f32 = get_attr_opt(node, "low")?.copied().unwrap_or(0.0); let high: f32 = get_attr_opt(node, "high")?.copied().unwrap_or(1.0); Tensor::rand(low, high, shape, &Device::Cpu)?.to_dtype(dtype)? } else { let mean: f32 = get_attr_opt(node, "mean")?.copied().unwrap_or(0.0); let scale: f32 = get_attr_opt(node, "scale")?.copied().unwrap_or(1.0); Tensor::randn(mean, scale, shape, &Device::Cpu)?.to_dtype(dtype)? }; values.insert(node.output[0].clone(), output); } "ArgMin" => { let input = get(&node.input[0])?; let axis_i64: i64 = get_attr_opt(node, "axis")?.copied().unwrap_or(0); let rank_i64: i64 = input.rank().try_into().unwrap(); if axis_i64 < -rank_i64 || axis_i64 >= rank_i64 { bail!( "axis ({}) out of accepted range [-rank, rank-1] which was [{}, {}]", axis_i64, -rank_i64, rank_i64 - 1 ) } let axis = input.normalize_axis(axis_i64)?; let keepdims: i64 = get_attr_opt(node, "keepdims")?.copied().unwrap_or(1); let select_last_index: i64 = get_attr_opt(node, "select_last_index")? .copied() .unwrap_or(0); if select_last_index == 1 { bail!("select_last_index for ArgMin is currently not supported") } let output = if keepdims == 1 { input.argmin_keepdim(axis)? } else { input.argmin(axis)? } .to_dtype(DType::I64)?; values.insert(node.output[0].clone(), output); } "ArgMax" => { let input = get(&node.input[0])?; let axis_i64: i64 = get_attr_opt(node, "axis")?.copied().unwrap_or(0); let rank_i64: i64 = input.rank().try_into().unwrap(); if axis_i64 < -rank_i64 || axis_i64 >= rank_i64 { bail!( "axis ({}) out of accepted range [-rank, rank-1] which was [{}, {}]", axis_i64, -rank_i64, rank_i64 - 1 ) } let axis = input.normalize_axis(axis_i64)?; let keepdims: i64 = get_attr_opt(node, "keepdims")?.copied().unwrap_or(1); let select_last_index: i64 = get_attr_opt(node, "select_last_index")? .copied() .unwrap_or(0); if select_last_index == 1 { bail!("select_last_index for ArgMin is currently not supported") } let output = if keepdims == 1 { input.argmax_keepdim(axis)? } else { input.argmax(axis)? } .to_dtype(DType::I64)?; values.insert(node.output[0].clone(), output); } "LeakyRelu" => { let input = get(&node.input[0])?; let dt = input.dtype(); match dt { DType::U8 | DType::U32 | DType::I64 => { bail!( "unsupported dtype {}, only float types are allowed for LeakyRelu", dt.as_str() ) } DType::BF16 | DType::F16 | DType::F32 | DType::F64 => {} } let alpha = get_attr_opt::<f32>(node, "alpha")?.copied().unwrap_or(0.01); let output = candle_nn::ops::leaky_relu(input, alpha.into())?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Gemm "Gemm" => { let a = get(&node.input[0])?; let b = get(&node.input[1])?; let c = get(&node.input[2])?; let alpha = get_attr_opt::<f32>(node, "alpha")?.copied().unwrap_or(1.0); let beta = get_attr_opt::<f32>(node, "beta")?.copied().unwrap_or(1.0); let alpha = Tensor::full(alpha, a.shape(), &Device::Cpu)?; let beta = Tensor::full(beta, c.shape(), &Device::Cpu)?; let trans_a = get_attr_opt::<i64>(node, "transA")?.copied().unwrap_or(0); let trans_b = get_attr_opt::<i64>(node, "transB")?.copied().unwrap_or(0); let a = if trans_a == 0 { a.clone() } else { a.t()? }; let b = if trans_b == 0 { b.clone() } else { b.t()? }; let output = a .broadcast_mul(&alpha)? .broadcast_matmul(&b)? .broadcast_add(&c.broadcast_mul(&beta)?)?; values.insert(node.output[0].clone(), output); } "LSTM" => { let direction = get_attr_opt(node, "direction")?.unwrap_or("forward"); if direction != "forward" { bail!("LSTM currently only supports direction == \"forward\""); } let num_directions = if direction == "bidirectional" { 2 } else { 1 }; let hidden_size: i64 = get_attr(node, "hidden_size").copied()?; let input_forget = get_attr_opt(node, "input_forget")?.copied().unwrap_or(0); if input_forget != 0 { bail!("LSTM currently only supports input_forget == 0"); } let activations_default = vec![ "Sigmoid".to_string(), "Tanh".to_string(), "Tanh".to_string(), ]; let activations = get_attr_opt_owned::<Vec<String>>(node, "activations")? .unwrap_or(activations_default.clone()); if activations != activations_default { bail!("LSTM currently only supports default activations ({activations_default:?})"); } // activation_alpha and activation_beta don't apply to (Sigmoid, Tanh, Tanh) so ignoring them is okay if get_attr_opt::<f32>(node, "clip")?.is_some() { bail!("LSTM does not currently support clip attribute"); } // The shape format of inputs X, initial_h and outputs Y, Y_h. // If 0, the following shapes are expected: // X.shape = [seq_length, batch_size, input_size], // Y.shape = [seq_length, num_directions, batch_size, hidden_size], // initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. // If 1, the following shapes are expected: // X.shape = [batch_size, seq_length, input_size], // Y.shape = [batch_size, seq_length, num_directions, hidden_size], // initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. let layout = get_attr_opt(node, "layout")?.copied().unwrap_or(0); if layout != 0 { bail!("LSTM currently only supports layout == 0"); } // The input sequences packed (and potentially padded) into one 3-D tensor // with the shape of `[seq_length, batch_size, input_size]`. let x = get(&node.input[0])?; // XXX: depends on layout let (seq_length, batch_size, input_size) = x.dims3()?; // The weight tensor for the gates. // Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. // The tensor has shape `[num_directions, 4*hidden_size, input_size]`. let w = get(&node.input[1])?; // The recurrence weight tensor. // Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. // This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`. let r = get(&node.input[2])?; let get_opt = |i: usize| { node.input .get(i) .filter(|s: &&String| !s.is_empty()) .map(|s| get(s)) }; // The bias tensor for input gate. // Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. // This tensor has shape `[num_directions, 8*hidden_size]`. // Optional: If not specified - assumed to be 0. let b_default: Tensor; let b = match get_opt(3) { Some(n) => n?, None => { b_default = Tensor::zeros( (num_directions, 8 * hidden_size as usize), DType::F32, x.device(), )?; &b_default } }; // Optional tensor specifying lengths of the sequences in a batch. // If not specified - assumed all sequences in the batch to have length `seq_length`. // It has shape `[batch_size]`. let seq_lens_default: Tensor; let seq_lens = match get_opt(4) { Some(n) => n?, None => { seq_lens_default = Tensor::full(seq_length as i64, (batch_size,), x.device())?; &seq_lens_default } }; let seq_lens_is_default = (seq_lens.to_vec1::<i64>()?.iter()).all(|e| *e as usize == seq_length); if !seq_lens_is_default { bail!("LSTM currently only supports default value of seq_lens"); } // Optional initial value of the hidden. If not specified - assumed to be 0. // It has shape `[num_directions, batch_size, hidden_size]`. let initial_h_default: Tensor; let initial_h = match get_opt(5) { Some(n) => n?, _ => { initial_h_default = Tensor::zeros( (num_directions, batch_size, hidden_size as usize), DType::F32, x.device(), )?; &initial_h_default } }; // Optional initial value of the cell. // If not specified - assumed to be 0. // It has shape `[num_directions, batch_size, hidden_size]`. let initial_c_default: Tensor; let initial_c = match node.input.get(6) { Some(n) if !n.is_empty() => get(n)?, _ => { initial_c_default = Tensor::zeros( (num_directions, batch_size, hidden_size as usize), DType::F32, x.device(), )?; &initial_c_default } }; // The weight tensor for peepholes. // Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. // It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0. let p_default = Tensor::zeros( (num_directions, 3 * hidden_size as usize), DType::F32, x.device(), )?; let p = get_opt(7).unwrap_or(Ok(&p_default))?; let p_is_zeros = (p.to_vec2::<f32>()?.iter()).all(|v| v.iter().all(|e| *e == 0.0)); if !p_is_zeros { bail!( "LSTM currently only supports default value of p (a Tensor of all zeroes)" ); } // these all have [num_directions, ...] shapes let w = w.get(0)?; // w[iofc] has shape [4*hidden_size, input_size] let r = r.get(0)?; // r[iofc] has shape [4*hidden_size, hidden_size] let b = b.get(0)?; // concat of [wb[iofc],rb[iofc]] has shape [8*hidden_size] let idx_wb = Tensor::arange(0 * hidden_size, 4 * hidden_size, x.device())?; let idx_rb = Tensor::arange(4 * hidden_size, 8 * hidden_size, x.device())?; let wb = b.index_select(&idx_wb, 0)?; let rb = b.index_select(&idx_rb, 0)?; let c = initial_c.get(0)?; let h = initial_h.get(0)?; // w, r, wb, rb are all iofc but lstm expects ifco // so we need to move some stuff around let idx_i = Tensor::arange(0 * hidden_size, 1 * hidden_size, x.device())?; let idx_o = Tensor::arange(1 * hidden_size, 2 * hidden_size, x.device())?; let idx_f = Tensor::arange(2 * hidden_size, 3 * hidden_size, x.device())?; let idx_c = Tensor::arange(3 * hidden_size, 4 * hidden_size, x.device())?; let idx_ifco = Tensor::cat(&[&idx_i, &idx_f, &idx_c, &idx_o], 0)?; let w = w.index_select(&idx_ifco, 0)?; let r = r.index_select(&idx_ifco, 0)?; let wb = wb.index_select(&idx_ifco, 0)?; let rb = rb.index_select(&idx_ifco, 0)?; let vmap = candle_nn::VarMap::new(); vmap.data().lock().unwrap().extend([ ("weight_ih_l0".to_string(), candle::Var::from_tensor(&w)?), ("weight_hh_l0".to_string(), candle::Var::from_tensor(&r)?), ("bias_ih_l0".to_string(), candle::Var::from_tensor(&wb)?), ("bias_hh_l0".to_string(), candle::Var::from_tensor(&rb)?), ]); use candle_nn::rnn::RNN as _; let lstm = candle_nn::rnn::lstm( input_size, hidden_size as usize, candle_nn::rnn::LSTMConfig::default(), candle_nn::VarBuilder::from_varmap(&vmap, w.dtype(), w.device()), )?; let mut lstm_state = candle_nn::rnn::LSTMState::new(h, c); let mut h_acc = if node.output.get(0).map(String::as_str).unwrap_or("") != "" { Some(vec![]) } else { None }; for t in 0..seq_length { let x = x.get(t)?; lstm_state = lstm.step(&x, &lstm_state)?; if let Some(h_acc) = &mut h_acc { h_acc.push(lstm_state.clone()); } } assert_eq!(num_directions, 1, "if support for bidirectional is ever added, outputs will have to be concatenated, not simply reshaped"); if let Some(name) = node.output.get(0) { let h_acc = h_acc.as_ref().unwrap(); let h_acc = lstm.states_to_tensor(h_acc)?; let h_acc = h_acc.reshape(( seq_length, num_directions, batch_size, hidden_size as usize, ))?; values.insert(name.clone(), h_acc); } if let Some(name) = node.output.get(1) { values.insert( name.clone(), lstm_state.h().reshape(( num_directions, batch_size, hidden_size as usize, ))?, ); } if let Some(name) = node.output.get(2) { values.insert( name.clone(), lstm_state.c().reshape(( num_directions, batch_size, hidden_size as usize, ))?, ); } } op_type => bail!("unsupported op_type {op_type} for op {node:?}"), } } graph .output .iter() .map(|output| match values.remove(&output.name) { None => bail!("cannot find output {}", output.name), Some(value) => Ok((output.name.clone(), value)), }) .collect() }
candle/candle-onnx/src/eval.rs/0
{ "file_path": "candle/candle-onnx/src/eval.rs", "repo_id": "candle", "token_count": 42834 }
42
import candle from typing import Dict, Tuple, Any from candle import Tensor, QTensor, utils, nn from candle.nn import Module, ModuleList def masked_fill(on_false: Tensor, mask: Tensor, on_true: Tensor): shape = mask.shape on_true = candle.tensor(on_true).broadcast_as(shape) return mask.where_cond(on_true, on_false) def precompute_freqs_cis(hparams: Dict[str, Any], freq_base: float, max_seq_len: int): head_dim = hparams["n_embd"] // hparams["n_head"] theta = [1.0 / freq_base ** (i / head_dim) for i in range(0, head_dim, 2)] theta = candle.tensor(theta) idx_theta = [float(i) for i in range(max_seq_len)] idx_theta = candle.tensor(idx_theta).reshape((max_seq_len, 1)) m = idx_theta.matmul(theta.unsqueeze(0)) return (m.cos(), m.sin()) class RmsNorm(Module): def __init__(self, qtensor: QTensor): super().__init__() self.weight = qtensor.dequantize() def forward(self, x: Tensor) -> Tensor: b_size, seq_len, hidden_size = x.shape norm_x = x.sqr().sum_keepdim(2) / hidden_size x_normed = x.broadcast_div((norm_x + 1e-5).sqrt()) return x_normed.broadcast_mul(self.weight) class QuantizedLayer(Module): def __init__( self, layer_idx: int, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor], cos_sin: Tuple[Tensor, Tensor], ): super().__init__() p = f"layers.{layer_idx}" self.attention_wq = all_tensors[f"{p}.attention.wq.weight"] self.attention_wk = all_tensors[f"{p}.attention.wk.weight"] self.attention_wv = all_tensors[f"{p}.attention.wv.weight"] self.attention_wo = all_tensors[f"{p}.attention.wo.weight"] self.ffw1 = all_tensors[f"{p}.feed_forward.w1.weight"] self.ffw2 = all_tensors[f"{p}.feed_forward.w2.weight"] self.ffw3 = all_tensors[f"{p}.feed_forward.w3.weight"] self.attn_norm = RmsNorm(all_tensors[f"{p}.attention_norm.weight"]) self.ffn_norm = RmsNorm(all_tensors[f"{p}.ffn_norm.weight"]) self.n_head = hparams["n_head"] self.n_kv_head = self.n_head self.head_dim = hparams["n_embd"] // self.n_head self.kv_cache = None self.cos = cos_sin[0] self.sin = cos_sin[1] self._non_persistent_buffers_set.add("cos") self._non_persistent_buffers_set.add("sin") def forward(self, x: Tensor, mask: Tensor, index_pos: int) -> Tensor: residual = x x = self.attn_norm(x) attn = self.forward_attn(x, mask, index_pos) x = attn + residual residual = x x = self.ffn_norm(x) w1 = self.ffw1.matmul_t(x) w3 = self.ffw3.matmul_t(x) mlp = self.ffw2.matmul_t(nn.silu(w1) * w3) return mlp + residual def forward_attn(self, x: Tensor, mask: Tensor, index_pos: int): b_size, seq_len, n_embd = x.shape q = self.attention_wq.matmul_t(x) k = self.attention_wk.matmul_t(x) v = self.attention_wv.matmul_t(x) q = q.reshape((b_size, seq_len, self.n_head, self.head_dim)).transpose(1, 2) k = k.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2) v = v.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2) q = self.apply_rotary_emb(q, index_pos) k = self.apply_rotary_emb(k, index_pos) if self.kv_cache is not None and index_pos > 0: prev_k, prev_v = self.kv_cache k = candle.cat([prev_k, k], 2).contiguous() v = candle.cat([prev_v, v], 2).contiguous() self.kv_cache = (k, v) # TODO: maybe repeat k/v here if we start supporting MQA. att = q.matmul(k.t()) / self.head_dim**0.5 mask = mask.broadcast_as(att.shape) att = masked_fill(att, mask, float("-inf")) att = nn.softmax(att, -1) y = att.matmul(v.contiguous()) y = y.transpose(1, 2).reshape((b_size, seq_len, n_embd)) return self.attention_wo.matmul_t(y) def apply_rotary_emb(self, x: Tensor, index_pos: int): b_size, n_head, seq_len, n_embd = x.shape cos = self.cos.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1)) sin = self.sin.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1)) x = x.reshape((b_size, n_head, seq_len, n_embd // 2, 2)) x0 = x.narrow(-1, 0, 1) x1 = x.narrow(-1, 1, 1) y0 = x0.broadcast_mul(cos) - x1.broadcast_mul(sin) y1 = x0.broadcast_mul(sin) + x1.broadcast_mul(cos) rope = candle.cat([y0, y1], -1) return rope.flatten_from(-2) class QuantizedLlama(Module): def __init__(self, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor]): super().__init__() self.tok_embeddings = all_tensors["tok_embeddings.weight"].dequantize() self.norm = RmsNorm(all_tensors["norm.weight"]) self.output = all_tensors["output.weight"] self.layers = ModuleList() rope_freq = hparams.get("rope_freq", 10000.0) cos_sin = precompute_freqs_cis(hparams, rope_freq, hparams["context_length"]) for layer_idx in range(hparams["n_layer"]): layer = QuantizedLayer(layer_idx, hparams, all_tensors, cos_sin) self.layers.append(layer) def forward(self, token: Tensor, index_pos: int) -> Tensor: b_size, seq_len = token.shape vocab_size, hidden_size = self.tok_embeddings.shape token = token.reshape((b_size * seq_len,)) x = self.tok_embeddings.index_select(token, 0) x = x.reshape((b_size, seq_len, hidden_size)) mask = [int(j > i) for j in range(seq_len) for i in range(seq_len)] mask = candle.tensor(mask).reshape((seq_len, seq_len)) for layer in self.layers: x = layer(x, mask, index_pos) x = self.norm(x) x = x.narrow(1, -1, 1).squeeze(1) x = self.output.matmul_t(x) return x
candle/candle-pyo3/py_src/candle/models/llama.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/models/llama.py", "repo_id": "candle", "token_count": 2981 }
43
#![allow(clippy::redundant_closure_call)] use pyo3::exceptions::{PyTypeError, PyValueError}; use pyo3::prelude::*; use pyo3::pyclass::CompareOp; use pyo3::types::{IntoPyDict, PyDict, PyTuple}; use pyo3::ToPyObject; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::os::raw::c_long; use std::sync::Arc; use half::{bf16, f16}; #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use ::candle::{quantized::QTensor, DType, Device, Module, Tensor, WithDType}; mod utils; use utils::wrap_err; mod shape; use shape::{PyShape, PyShapeWithHole}; #[cfg(feature = "onnx")] mod onnx; #[derive(Clone, Debug)] #[pyclass(name = "Tensor")] /// A `candle` tensor. struct PyTensor(Tensor); impl std::ops::Deref for PyTensor { type Target = Tensor; fn deref(&self) -> &Self::Target { &self.0 } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[pyclass(name = "DType")] /// A `candle` dtype. struct PyDType(DType); #[pymethods] impl PyDType { fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } } impl PyDType { fn from_pyobject(ob: PyObject, py: Python<'_>) -> PyResult<Self> { use std::str::FromStr; if let Ok(dtype) = ob.extract::<String>(py) { let dtype = DType::from_str(&dtype) .map_err(|_| PyTypeError::new_err(format!("invalid dtype '{dtype}'")))?; Ok(Self(dtype)) } else { ob.extract(py) } } } static CUDA_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); static METAL_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum PyDevice { Cpu, Cuda, Metal, } impl PyDevice { fn from_device(device: &Device) -> Self { match device { Device::Cpu => Self::Cpu, Device::Cuda(_) => Self::Cuda, Device::Metal(_) => Self::Metal, } } fn as_device(&self) -> PyResult<Device> { match self { Self::Cpu => Ok(Device::Cpu), Self::Cuda => { let mut device = CUDA_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_cuda(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } Self::Metal => { let mut device = METAL_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_metal(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } } } } impl<'source> FromPyObject<'source> for PyDevice { fn extract(ob: &'source PyAny) -> PyResult<Self> { let device: String = ob.extract()?; let device = match device.as_str() { "cpu" => PyDevice::Cpu, "cuda" => PyDevice::Cuda, _ => Err(PyTypeError::new_err(format!("invalid device '{device}'")))?, }; Ok(device) } } impl ToPyObject for PyDevice { fn to_object(&self, py: Python<'_>) -> PyObject { let str = match self { PyDevice::Cpu => "cpu", PyDevice::Cuda => "cuda", PyDevice::Metal => "metal", }; str.to_object(py) } } trait PyWithDType: WithDType { fn to_py(&self, py: Python<'_>) -> PyObject; } macro_rules! pydtype { ($ty:ty, $conv:expr) => { impl PyWithDType for $ty { fn to_py(&self, py: Python<'_>) -> PyObject { $conv(*self).to_object(py) } } }; } pydtype!(i64, |v| v); pydtype!(u8, |v| v); pydtype!(u32, |v| v); pydtype!(f16, f32::from); pydtype!(bf16, f32::from); pydtype!(f32, |v| v); pydtype!(f64, |v| v); fn actual_index(t: &Tensor, dim: usize, index: i64) -> ::candle::Result<usize> { let dim = t.dim(dim)?; if 0 <= index { let index = index as usize; if dim <= index { ::candle::bail!("index {index} is too large for tensor dimension {dim}") } Ok(index) } else { if (dim as i64) < -index { ::candle::bail!("index {index} is too low for tensor dimension {dim}") } Ok((dim as i64 + index) as usize) } } fn actual_dim(t: &Tensor, dim: i64) -> ::candle::Result<usize> { let rank = t.rank(); if 0 <= dim { let dim = dim as usize; if rank <= dim { ::candle::bail!("dimension index {dim} is too large for tensor rank {rank}") } Ok(dim) } else { if (rank as i64) < -dim { ::candle::bail!("dimension index {dim} is too low for tensor rank {rank}") } Ok((rank as i64 + dim) as usize) } } // TODO: Something similar to this should probably be a part of candle core. trait MapDType { type Output; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output>; fn map(&self, t: &Tensor) -> PyResult<Self::Output> { match t.dtype() { DType::U8 => self.f::<u8>(t), DType::U32 => self.f::<u32>(t), DType::I64 => self.f::<i64>(t), DType::BF16 => self.f::<bf16>(t), DType::F16 => self.f::<f16>(t), DType::F32 => self.f::<f32>(t), DType::F64 => self.f::<f64>(t), } } } enum Indexer { Index(usize), Slice(usize, usize), Ellipsis, Expand, IndexSelect(Tensor), } #[derive(Clone, Debug)] struct TorchTensor(PyObject); impl<'source> pyo3::FromPyObject<'source> for TorchTensor { fn extract(ob: &'source PyAny) -> PyResult<Self> { let numpy_value: PyObject = ob.getattr("numpy")?.call0()?.extract()?; Ok(TorchTensor(numpy_value)) } } #[pymethods] impl PyTensor { #[new] #[pyo3(text_signature = "(self, data:_ArrayLike)")] // TODO: Handle arbitrary input dtype and shape. /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. fn new(py: Python<'_>, data: PyObject) -> PyResult<Self> { use Device::Cpu; let tensor = if let Ok(vs) = data.extract::<u32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<i64>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<f32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<u32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<i64>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<f32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<u32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<i64>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<f32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<u32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<i64>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<f32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(TorchTensor(numpy)) = data.extract::<TorchTensor>(py) { return PyTensor::new(py, numpy); } else { let ty = data.bind(py).get_type(); Err(PyTypeError::new_err(format!( "incorrect type {ty} for tensor" )))? }; Ok(Self(tensor)) } /// Gets the tensor's data as a Python scalar or array-like object. /// &RETURNS&: _ArrayLike fn values(&self, py: Python<'_>) -> PyResult<PyObject> { struct M<'a>(Python<'a>); impl<'a> MapDType for M<'a> { type Output = PyObject; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output> { match t.rank() { 0 => Ok(t.to_scalar::<T>().map_err(wrap_err)?.to_py(self.0)), 1 => { let v = t.to_vec1::<T>().map_err(wrap_err)?; let v = v.iter().map(|v| v.to_py(self.0)).collect::<Vec<_>>(); Ok(v.to_object(self.0)) } 2 => { let v = t.to_vec2::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect::<Vec<Vec<_>>>(); Ok(v.to_object(self.0)) } 3 => { let v = t.to_vec3::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| { v.iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect() }) .collect::<Vec<Vec<Vec<_>>>>(); Ok(v.to_object(self.0)) } n => Err(PyTypeError::new_err(format!( "TODO: conversion to PyObject is not handled for rank {n}" )))?, } } } // TODO: Handle arbitrary shapes. M(py).map(self) } /// Converts candle's tensor to pytorch's tensor /// &RETURNS&: torch.Tensor fn to_torch(&self, py: Python<'_>) -> PyResult<PyObject> { let candle_values = self.values(py)?; let torch_tensor: PyObject = py .import_bound("torch")? .getattr("tensor")? .call1((candle_values,))? .extract()?; Ok(torch_tensor) } #[getter] /// Gets the tensor's shape. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new_bound(py, self.0.dims()).to_object(py) } #[getter] /// Gets the tensor's element count. /// &RETURNS&: int fn nelement(&self) -> usize { self.0.elem_count() } #[getter] /// Gets the tensor's strides. /// &RETURNS&: Tuple[int] fn stride(&self, py: Python<'_>) -> PyObject { PyTuple::new_bound(py, self.0.stride()).to_object(py) } #[getter] /// Gets the tensor's dtype. /// &RETURNS&: DType fn dtype(&self) -> PyDType { PyDType(self.0.dtype()) } #[getter] /// Gets the tensor's device. /// &RETURNS&: Device fn device(&self, py: Python<'_>) -> PyObject { PyDevice::from_device(self.0.device()).to_object(py) } #[getter] /// Gets the tensor's rank. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } fn __repr__(&self) -> String { format!("{}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Performs the `abs` operation on the tensor. /// &RETURNS&: Tensor fn abs(&self) -> PyResult<Self> { Ok(PyTensor(self.0.abs().map_err(wrap_err)?)) } /// Performs the `sin` operation on the tensor. /// &RETURNS&: Tensor fn sin(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sin().map_err(wrap_err)?)) } /// Performs the `cos` operation on the tensor. /// &RETURNS&: Tensor fn cos(&self) -> PyResult<Self> { Ok(PyTensor(self.0.cos().map_err(wrap_err)?)) } /// Performs the `log` operation on the tensor. /// &RETURNS&: Tensor fn log(&self) -> PyResult<Self> { Ok(PyTensor(self.0.log().map_err(wrap_err)?)) } /// Squares the tensor. /// &RETURNS&: Tensor fn sqr(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqr().map_err(wrap_err)?)) } /// Calculates the square root of the tensor. /// &RETURNS&: Tensor fn sqrt(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqrt().map_err(wrap_err)?)) } /// Get the `recip` of the tensor. /// &RETURNS&: Tensor fn recip(&self) -> PyResult<Self> { Ok(PyTensor(self.0.recip().map_err(wrap_err)?)) } /// Performs the `exp` operation on the tensor. /// &RETURNS&: Tensor fn exp(&self) -> PyResult<Self> { Ok(PyTensor(self.0.exp().map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, p:float)")] /// Performs the `pow` operation on the tensor with the given exponent. /// &RETURNS&: Tensor fn powf(&self, p: f64) -> PyResult<Self> { Ok(PyTensor(self.0.powf(p).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor, dim:int)")] /// Select values for the input tensor at the target indexes across the specified dimension. /// /// The `indexes` is argument is an int tensor with a single dimension. /// The output has the same number of dimension as the `self` input. The target dimension of /// the output has length the length of `indexes` and the values are taken from `self` using /// the index from `indexes`. Other dimensions have the same number of elements as the input /// tensor. /// &RETURNS&: Tensor fn index_select(&self, rhs: &Self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.index_select(rhs, dim).map_err(wrap_err)?)) } /// Gathers values along an axis specified by dim. fn gather(&self, index: &Self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.gather(index, dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Performs a matrix multiplication between the two tensors. /// &RETURNS&: Tensor fn matmul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.matmul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Adds the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_add(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_add(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Subtracts the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_sub(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_sub(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Multiplies the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_mul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_mul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Divides the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_div(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_div(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, on_true:Tensor, on_false:Tensor)")] /// Returns a tensor with the same shape as the input tensor, the values are taken from /// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the /// input tensor is equal to zero. /// &RETURNS&: Tensor fn where_cond(&self, on_true: &Self, on_false: &Self) -> PyResult<Self> { Ok(PyTensor( self.0.where_cond(on_true, on_false).map_err(wrap_err)?, )) } #[getter] /// Index a tensor. /// &RETURNS&: Tensor fn __getitem__(&self, py: Python, idx: PyObject) -> PyResult<Self> { let mut indexers: Vec<Indexer> = vec![]; let dims = self.0.shape().dims(); fn to_absolute_index(index: isize, current_dim: usize, dims: &[usize]) -> PyResult<usize> { // Convert a relative index to an absolute index e.g. tensor[-1] -> tensor[0] let actual_index = if index < 0 { dims[current_dim] as isize + index } else { index }; // Check that the index is in range if actual_index < 0 || actual_index >= dims[current_dim] as isize { return Err(PyValueError::new_err(format!( "index out of range for dimension '{i}' with indexer '{value}'", i = current_dim, value = index ))); } Ok(actual_index as usize) } fn extract_indexer( py_indexer: &Bound<PyAny>, current_dim: usize, dims: &[usize], index_argument_count: usize, ) -> PyResult<(Indexer, usize)> { if let Ok(index) = py_indexer.extract() { // Handle a single index e.g. tensor[0] or tensor[-1] Ok(( Indexer::Index(to_absolute_index(index, current_dim, dims)?), current_dim + 1, )) } else if let Ok(slice) = py_indexer.downcast::<pyo3::types::PySlice>() { // Handle a single slice e.g. tensor[0:1] or tensor[0:-1] let index = slice.indices(dims[current_dim] as c_long)?; Ok(( Indexer::Slice(index.start as usize, index.stop as usize), current_dim + 1, )) } else if let Ok(tensor) = py_indexer.extract::<PyTensor>() { // Handle a tensor as indices e.g. tensor[tensor([0,1])] let t = tensor.0; if t.rank() != 1 { return Err(PyTypeError::new_err( "multi-dimensional tensor indexing is not supported", )); } Ok((Indexer::IndexSelect(t), current_dim + 1)) } else if let Ok(list) = py_indexer.downcast::<pyo3::types::PyList>() { // Handle a list of indices e.g. tensor[[0,1]] let mut indexes = vec![]; for item in list.iter() { let index = item.extract::<i64>()?; indexes.push(index); } Ok(( Indexer::IndexSelect( Tensor::from_vec(indexes, list.len(), &Device::Cpu).map_err(wrap_err)?, ), current_dim + 1, )) } else if py_indexer.is(&py_indexer.py().Ellipsis()) { // Handle '...' e.g. tensor[..., 0] if current_dim > 0 { return Err(PyTypeError::new_err( "Ellipsis ('...') can only be used at the start of an indexing operation", )); } Ok((Indexer::Ellipsis, dims.len() - (index_argument_count - 1))) } else if py_indexer.is_none() { // Handle None e.g. tensor[None, 0] Ok((Indexer::Expand, current_dim)) } else { Err(PyTypeError::new_err(format!( "unsupported indexer {}", py_indexer ))) } } if let Ok(tuple) = idx.downcast_bound::<pyo3::types::PyTuple>(py) { let not_none_count: usize = tuple.iter().filter(|x| !x.is_none()).count(); if not_none_count > dims.len() { return Err(PyValueError::new_err("provided too many indices")); } let mut current_dim = 0; for item in tuple.iter() { let (indexer, new_current_dim) = extract_indexer(&item, current_dim, dims, not_none_count)?; current_dim = new_current_dim; indexers.push(indexer); } } else { let (indexer, _) = extract_indexer(idx.downcast_bound::<PyAny>(py)?, 0, dims, 1)?; indexers.push(indexer); } let mut x = self.0.clone(); let mut current_dim = 0; // Apply the indexers for indexer in indexers.iter() { x = match indexer { Indexer::Index(n) => x .narrow(current_dim, *n, 1) .map_err(wrap_err)? .squeeze(current_dim) .map_err(wrap_err)?, Indexer::Slice(start, stop) => { let out = x .narrow(current_dim, *start, stop.saturating_sub(*start)) .map_err(wrap_err)?; current_dim += 1; out } Indexer::Ellipsis => { // Ellipsis is a special case, it means that all remaining dimensions should be // selected => advance the current_dim to the last dimension we have indexers for current_dim += dims.len() - (indexers.len() - 1); x } Indexer::Expand => { // Expand is a special case, it means that a new dimension should be added => unsqueeze and advance the current_dim let out = x.unsqueeze(current_dim).map_err(wrap_err)?; current_dim += 1; out } Indexer::IndexSelect(indexes) => { let out = x .index_select( &indexes.to_device(x.device()).map_err(wrap_err)?, current_dim, ) .map_err(wrap_err)?; current_dim += 1; out } } } Ok(Self(x)) } /// Add two tensors. /// &RETURNS&: Tensor fn __add__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_add(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 + rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for add"))? }; Ok(Self(tensor)) } fn __radd__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { self.__add__(rhs) } /// Multiply two tensors. /// &RETURNS&: Tensor fn __mul__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_mul(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 * rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for mul"))? }; Ok(Self(tensor)) } fn __rmul__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { self.__mul__(rhs) } /// Subtract two tensors. /// &RETURNS&: Tensor fn __sub__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_sub(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 - rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for sub"))? }; Ok(Self(tensor)) } /// Divide two tensors. /// &RETURNS&: Tensor fn __truediv__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_div(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 / rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for div"))? }; Ok(Self(tensor)) } /// Rich-compare two tensors. /// &RETURNS&: Tensor fn __richcmp__(&self, rhs: &Bound<PyAny>, op: CompareOp) -> PyResult<Self> { let compare = |lhs: &Tensor, rhs: &Tensor| { let t = match op { CompareOp::Eq => lhs.eq(rhs), CompareOp::Ne => lhs.ne(rhs), CompareOp::Lt => lhs.lt(rhs), CompareOp::Le => lhs.le(rhs), CompareOp::Gt => lhs.gt(rhs), CompareOp::Ge => lhs.ge(rhs), }; Ok(PyTensor(t.map_err(wrap_err)?)) }; if let Ok(rhs) = rhs.extract::<PyTensor>() { if self.0.shape() == rhs.0.shape() { compare(&self.0, &rhs.0) } else { // We broadcast manually here because `candle.cmp` does not support automatic broadcasting let broadcast_shape = self .0 .shape() .broadcast_shape_binary_op(rhs.0.shape(), "cmp") .map_err(wrap_err)?; let broadcasted_lhs = self.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; let broadcasted_rhs = rhs.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; compare(&broadcasted_lhs, &broadcasted_rhs) } } else if let Ok(rhs) = rhs.extract::<f64>() { let scalar_tensor = Tensor::new(rhs, self.0.device()) .map_err(wrap_err)? .to_dtype(self.0.dtype()) .map_err(wrap_err)? .broadcast_as(self.0.shape()) .map_err(wrap_err)?; compare(&self.0, &scalar_tensor) } else { return Err(PyTypeError::new_err("unsupported rhs for __richcmp__")); } } fn __hash__(&self) -> u64 { // we have overridden __richcmp__ => py03 wants us to also override __hash__ // we simply hash the address of the tensor let mut hasher = DefaultHasher::new(); let pointer = &self.0 as *const Tensor; let address = pointer as usize; address.hash(&mut hasher); hasher.finish() } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Reshapes the tensor to the given shape. /// &RETURNS&: Tensor fn reshape(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .reshape(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape. /// &RETURNS&: Tensor fn broadcast_as(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_as(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape, adding new dimensions on the left. /// &RETURNS&: Tensor fn broadcast_left(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_left(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with the specified dimension removed if its size was one. /// &RETURNS&: Tensor fn squeeze(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.squeeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with a dimension of size one inserted at the specified position. /// &RETURNS&: Tensor fn unsqueeze(&self, dim: usize) -> PyResult<Self> { Ok(PyTensor(self.0.unsqueeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, index:int)")] /// Gets the value at the specified index. /// &RETURNS&: Tensor fn get(&self, index: i64) -> PyResult<Self> { let index = actual_index(self, 0, index).map_err(wrap_err)?; Ok(PyTensor(self.0.get(index).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim1:int, dim2:int)")] /// Returns a tensor that is a transposed version of the input, the given dimensions are swapped. /// &RETURNS&: Tensor fn transpose(&self, dim1: usize, dim2: usize) -> PyResult<Self> { Ok(PyTensor(self.0.transpose(dim1, dim2).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int, start:int, len:int)")] /// Returns a new tensor that is a narrowed version of the input, the dimension `dim` /// ranges from `start` to `start + len`. /// &RETURNS&: Tensor fn narrow(&self, dim: i64, start: i64, len: usize) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; let start = actual_index(self, dim, start).map_err(wrap_err)?; Ok(PyTensor(self.0.narrow(dim, start, len).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the maximum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmax_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmax_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the minimum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmin_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmin_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the maximum value across the selected dimension. /// &RETURNS&: Tensor fn max_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.max_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the minimum value across the selected dimension. /// &RETURNS&: Tensor fn min_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.min_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:Union[int, List[int]])")] /// Returns the sum of all elements in the input tensor. The sum is performed over all the input dimensions. /// &RETURNS&: Tensor fn sum_keepdim(&self, dims: PyObject, py: Python<'_>) -> PyResult<Self> { let dims = if let Ok(dim) = dims.extract::<usize>(py) { vec![dim] } else { dims.extract::<Vec<usize>>(py)? }; Ok(PyTensor( self.0.sum_keepdim(dims.as_slice()).map_err(wrap_err)?, )) } /// Returns the sum of the tensor. /// &RETURNS&: Tensor fn sum_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sum_all().map_err(wrap_err)?)) } /// Returns the mean of the tensor. /// &RETURNS&: Tensor fn mean_all(&self) -> PyResult<Self> { let elements = self.0.elem_count(); let sum = self.0.sum_all().map_err(wrap_err)?; let mean = (sum / elements as f64).map_err(wrap_err)?; Ok(PyTensor(mean)) } #[pyo3(text_signature = "(self, dim:int)")] /// Flattens the tensor on the dimension indexes from `dim` (inclusive) to the last dimension. /// &RETURNS&: Tensor fn flatten_from(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_from(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] ///Flattens the tensor on the dimension indexes from `0` to `dim` (inclusive). /// &RETURNS&: Tensor fn flatten_to(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_to(dim).map_err(wrap_err)?)) } /// Flattens the tensor into a 1D tensor. /// &RETURNS&: Tensor fn flatten_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.flatten_all().map_err(wrap_err)?)) } /// Transposes the tensor. /// &RETURNS&: Tensor fn t(&self) -> PyResult<Self> { Ok(PyTensor(self.0.t().map_err(wrap_err)?)) } /// Makes the tensor contiguous in memory. /// &RETURNS&: Tensor fn contiguous(&self) -> PyResult<Self> { Ok(PyTensor(self.0.contiguous().map_err(wrap_err)?)) } /// Returns true if the tensor is contiguous in C order. /// &RETURNS&: bool fn is_contiguous(&self) -> bool { self.0.is_contiguous() } /// Returns true if the tensor is contiguous in Fortran order. /// &RETURNS&: bool fn is_fortran_contiguous(&self) -> bool { self.0.is_fortran_contiguous() } /// Detach the tensor from the computation graph. /// &RETURNS&: Tensor fn detach(&self) -> Self { PyTensor(self.0.detach()) } /// Returns a copy of the tensor. /// &RETURNS&: Tensor fn copy(&self) -> PyResult<Self> { Ok(PyTensor(self.0.copy().map_err(wrap_err)?)) } #[pyo3(signature = (*args, **kwargs), text_signature = "(self, *args, **kwargs)")] /// Performs Tensor dtype and/or device conversion. /// &RETURNS&: Tensor fn to(&self, args: &Bound<PyTuple>, kwargs: Option<&Bound<PyDict>>) -> PyResult<Self> { let mut device: Option<PyDevice> = None; let mut dtype: Option<PyDType> = None; let mut other: Option<PyTensor> = None; fn handle_duplicates<T>( opt: &mut Option<T>, extraction_result: PyResult<T>, err_msg: &'static str, ) -> PyResult<()> { if let Ok(successful_extraction) = extraction_result { if opt.is_some() { return Err(PyValueError::new_err(err_msg)); } *opt = Some(successful_extraction); } Ok(()) } //handle args for arg in args.iter() { if arg.extract::<PyDevice>().is_ok() { handle_duplicates( &mut device, arg.extract::<PyDevice>(), "cannot specify multiple devices", )?; } else if arg.extract::<PyDType>().is_ok() { handle_duplicates( &mut dtype, arg.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } else if arg.extract::<PyTensor>().is_ok() { handle_duplicates( &mut other, arg.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } else { return Err(PyTypeError::new_err(format!( "unsupported argument type `{:#?}`", arg.get_type().name() ))); } } if let Some(kwargs) = kwargs { if let Ok(Some(any)) = kwargs.get_item("dtype") { handle_duplicates( &mut dtype, any.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } if let Ok(Some(any)) = kwargs.get_item("device") { handle_duplicates( &mut device, any.extract::<PyDevice>(), "cannot specify multiple devices", )?; } if let Ok(Some(any)) = kwargs.get_item("other") { handle_duplicates( &mut other, any.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } } if let Some(other) = other { if device.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a device", )); } if dtype.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a dtype", )); } dtype = Some(other.dtype()); device = Some(PyDevice::from_device(other.0.device())); } let result = match (device, dtype) { (Some(device), Some(dtype)) => self .0 .to_device(&device.as_device()?) .map_err(wrap_err)? .to_dtype(dtype.0) .map_err(wrap_err)?, (Some(device), None) => self.0.to_device(&device.as_device()?).map_err(wrap_err)?, (None, Some(dtype)) => self.0.to_dtype(dtype.0).map_err(wrap_err)?, (None, None) => return Err(PyTypeError::new_err("No valid dtype or device specified")), }; Ok(PyTensor(result)) } #[pyo3(text_signature = "(self, dtype:Union[str,DType])")] /// Convert the tensor to a new dtype. /// &RETURNS&: Tensor fn to_dtype(&self, dtype: PyObject, py: Python<'_>) -> PyResult<Self> { let dtype = PyDType::from_pyobject(dtype, py)?; Ok(PyTensor(self.0.to_dtype(dtype.0).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, device:Union[str,Device])")] /// Move the tensor to a new device. /// &RETURNS&: Tensor fn to_device(&self, device: PyDevice) -> PyResult<Self> { let device = device.as_device()?; Ok(PyTensor(self.0.to_device(&device).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, quantized_dtype:str)")] /// Quantize the tensor. /// &RETURNS&: QTensor fn quantize(&self, quantized_dtype: &str) -> PyResult<PyQTensor> { use ::candle::quantized; let res = match quantized_dtype.to_lowercase().as_str() { "q2k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q2K), "q3k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q3K), "q4_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4_0), "q4_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4_1), "q4k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4K), "q5_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5_0), "q5_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5_1), "q5k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5K), "q6k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q6K), "q8_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8_0), "q8_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8_1), "q8k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8K), "f16" => quantized::QTensor::quantize(self, quantized::GgmlDType::F16), "f32" => quantized::QTensor::quantize(self, quantized::GgmlDType::F32), dt => { return Err(PyErr::new::<PyValueError, _>(format!( "unknown quantized-dtype {dt}" ))) } }; Ok(PyQTensor(Arc::new(res.map_err(wrap_err)?))) } } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int )")] /// Concatenate the tensors across one axis. /// &RETURNS&: Tensor fn cat(tensors: Vec<PyTensor>, dim: i64) -> PyResult<PyTensor> { if tensors.is_empty() { return Err(PyErr::new::<PyValueError, _>("empty input to cat")); } let dim = actual_dim(&tensors[0], dim).map_err(wrap_err)?; let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::cat(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int)")] /// Stack the tensors along a new axis. /// &RETURNS&: Tensor fn stack(tensors: Vec<PyTensor>, dim: usize) -> PyResult<PyTensor> { let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::stack(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(data:_ArrayLike)")] /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. /// &RETURNS&: Tensor fn tensor(py: Python<'_>, data: PyObject) -> PyResult<PyTensor> { PyTensor::new(py, data) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values. /// &RETURNS&: Tensor fn rand(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::rand(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values from a normal distribution. /// &RETURNS&: Tensor fn randn(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::randn(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None),text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with ones. /// &RETURNS&: Tensor fn ones( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::ones(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None), text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with zeros. /// &RETURNS&: Tensor fn zeros( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::zeros(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[derive(Debug, Clone)] #[pyclass(name = "QTensor")] /// A quantized tensor. struct PyQTensor(Arc<QTensor>); impl std::ops::Deref for PyQTensor { type Target = QTensor; fn deref(&self) -> &Self::Target { self.0.as_ref() } } #[pymethods] impl PyQTensor { #[getter] ///Gets the tensors quantized dtype. /// &RETURNS&: str fn ggml_dtype(&self) -> String { format!("{:?}", self.0.dtype()) } #[getter] ///Gets the rank of the tensor. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } #[getter] ///Gets the shape of the tensor. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new_bound(py, self.0.shape().dims()).to_object(py) } fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Dequantizes the tensor. /// &RETURNS&: Tensor fn dequantize(&self) -> PyResult<PyTensor> { let tensor = self.0.dequantize(&Device::Cpu).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyo3(text_signature = "(self, lhs:Tensor)")] /// Performs a quantized matrix multiplication, with the quantized tensor as the right hand side. /// &RETURNS&: Tensor fn matmul_t(&self, lhs: &PyTensor) -> PyResult<PyTensor> { let qmatmul = ::candle::quantized::QMatMul::from_arc(self.0.clone()).map_err(wrap_err)?; let res = qmatmul.forward(lhs).map_err(wrap_err)?; Ok(PyTensor(res)) } } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike])")] /// Loads a safetensors file. Returns a dictionary mapping tensor names to tensors. /// &RETURNS&: Dict[str,Tensor] fn load_safetensors(path: &str, py: Python<'_>) -> PyResult<PyObject> { let res = ::candle::safetensors::load(path, &Device::Cpu).map_err(wrap_err)?; let res = res .into_iter() .map(|(key, value)| (key, PyTensor(value).into_py(py))) .collect::<Vec<_>>(); Ok(res.into_py_dict_bound(py).to_object(py)) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], tensors:Dict[str,Tensor])")] /// Saves a dictionary of tensors to a safetensors file. /// &RETURNS&: None fn save_safetensors( path: &str, tensors: std::collections::HashMap<String, PyTensor>, ) -> PyResult<()> { let tensors = tensors .into_iter() .map(|(s, t)| (s, t.0)) .collect::<std::collections::HashMap<_, _>>(); ::candle::safetensors::save(&tensors, path).map_err(wrap_err) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], device: Optional[Device] = None)")] /// Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors, /// a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any], List[str]] fn load_ggml( path: &str, device: Option<PyDevice>, py: Python<'_>, ) -> PyResult<(PyObject, PyObject, PyObject)> { let mut file = std::fs::File::open(path)?; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let ggml = ::candle::quantized::ggml_file::Content::read(&mut file, &device).map_err(wrap_err)?; let tensors = ggml .tensors .into_iter() .map(|(key, qtensor)| Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py)))) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict_bound(py).to_object(py); let hparams = [ ("n_vocab", ggml.hparams.n_vocab), ("n_embd", ggml.hparams.n_embd), ("n_mult", ggml.hparams.n_mult), ("n_head", ggml.hparams.n_head), ("n_layer", ggml.hparams.n_layer), ("n_rot", ggml.hparams.n_rot), ("ftype", ggml.hparams.ftype), ]; let hparams = hparams.into_py_dict_bound(py).to_object(py); let vocab = ggml .vocab .token_score_pairs .iter() .map(|(bytes, _)| String::from_utf8_lossy(bytes.as_slice()).to_string()) .collect::<Vec<String>>() .to_object(py); Ok((tensors, hparams, vocab)) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], device: Optional[Device] = None)")] /// Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors, /// and the second maps metadata keys to metadata values. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any]] fn load_gguf( path: &str, device: Option<PyDevice>, py: Python<'_>, ) -> PyResult<(PyObject, PyObject)> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; use ::candle::quantized::gguf_file; fn gguf_value_to_pyobject(v: &gguf_file::Value, py: Python<'_>) -> PyResult<PyObject> { let v: PyObject = match v { gguf_file::Value::U8(x) => x.into_py(py), gguf_file::Value::I8(x) => x.into_py(py), gguf_file::Value::U16(x) => x.into_py(py), gguf_file::Value::I16(x) => x.into_py(py), gguf_file::Value::U32(x) => x.into_py(py), gguf_file::Value::I32(x) => x.into_py(py), gguf_file::Value::U64(x) => x.into_py(py), gguf_file::Value::I64(x) => x.into_py(py), gguf_file::Value::F32(x) => x.into_py(py), gguf_file::Value::F64(x) => x.into_py(py), gguf_file::Value::Bool(x) => x.into_py(py), gguf_file::Value::String(x) => x.into_py(py), gguf_file::Value::Array(x) => { let list = pyo3::types::PyList::empty_bound(py); for elem in x.iter() { list.append(gguf_value_to_pyobject(elem, py)?)?; } list.into() } }; Ok(v) } let mut file = std::fs::File::open(path)?; let gguf = gguf_file::Content::read(&mut file).map_err(wrap_err)?; let tensors = gguf .tensor_infos .keys() .map(|key| { let qtensor = gguf.tensor(&mut file, key, &device)?; Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py))) }) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict_bound(py).to_object(py); let metadata = gguf .metadata .iter() .map(|(key, value)| Ok((key, gguf_value_to_pyobject(value, py)?))) .collect::<PyResult<Vec<_>>>()? .into_py_dict_bound(py) .to_object(py); Ok((tensors, metadata)) } #[pyfunction] #[pyo3( text_signature = "(path:Union[str,PathLike], tensors:Dict[str,QTensor], metadata:Dict[str,Any])" )] /// Save quanitzed tensors and metadata to a GGUF file. fn save_gguf(path: &str, tensors: PyObject, metadata: PyObject, py: Python<'_>) -> PyResult<()> { use ::candle::quantized::gguf_file; fn pyobject_to_gguf_value(v: &Bound<PyAny>, py: Python<'_>) -> PyResult<gguf_file::Value> { let v: gguf_file::Value = if let Ok(x) = v.extract::<u8>() { gguf_file::Value::U8(x) } else if let Ok(x) = v.extract::<i8>() { gguf_file::Value::I8(x) } else if let Ok(x) = v.extract::<u16>() { gguf_file::Value::U16(x) } else if let Ok(x) = v.extract::<i16>() { gguf_file::Value::I16(x) } else if let Ok(x) = v.extract::<u32>() { gguf_file::Value::U32(x) } else if let Ok(x) = v.extract::<i32>() { gguf_file::Value::I32(x) } else if let Ok(x) = v.extract::<u64>() { gguf_file::Value::U64(x) } else if let Ok(x) = v.extract::<i64>() { gguf_file::Value::I64(x) } else if let Ok(x) = v.extract::<f32>() { gguf_file::Value::F32(x) } else if let Ok(x) = v.extract::<f64>() { gguf_file::Value::F64(x) } else if let Ok(x) = v.extract::<bool>() { gguf_file::Value::Bool(x) } else if let Ok(x) = v.extract::<String>() { gguf_file::Value::String(x) } else if let Ok(x) = v.extract::<Vec<PyObject>>() { let x = x .into_iter() .map(|f| pyobject_to_gguf_value(f.bind(py), py)) .collect::<PyResult<Vec<_>>>()?; gguf_file::Value::Array(x) } else { return Err(PyErr::new::<PyValueError, _>(format!( "unsupported type {:?}", v ))); }; Ok(v) } let tensors = tensors .extract::<&PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, value.extract::<PyQTensor>()?.0, )) }) .collect::<PyResult<Vec<_>>>()?; let metadata = metadata .extract::<&PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, pyobject_to_gguf_value(&value.as_borrowed(), py)?, )) }) .collect::<PyResult<Vec<_>>>()?; let converted_metadata: Vec<_> = metadata .iter() .map(|(name, value)| (name.as_str(), value)) .collect(); let converted_tensors: Vec<_> = tensors .iter() .map(|(name, tensor)| (name.as_str(), tensor.as_ref())) .collect(); let mut file = std::fs::File::create(path)?; gguf_file::write(&mut file, &converted_metadata, &converted_tensors).map_err(wrap_err) } #[pyfunction] /// Returns true if the 'cuda' backend is available. /// &RETURNS&: bool fn cuda_is_available() -> bool { ::candle::utils::cuda_is_available() } #[pyfunction] /// Returns true if candle was compiled with 'accelerate' support. /// &RETURNS&: bool fn has_accelerate() -> bool { ::candle::utils::has_accelerate() } #[pyfunction] /// Returns true if candle was compiled with MKL support. /// &RETURNS&: bool fn has_mkl() -> bool { ::candle::utils::has_mkl() } #[pyfunction] /// Returns the number of threads used by the candle. /// &RETURNS&: int fn get_num_threads() -> usize { ::candle::utils::get_num_threads() } fn candle_utils(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap_pyfunction!(cuda_is_available, m)?)?; m.add_function(wrap_pyfunction!(get_num_threads, m)?)?; m.add_function(wrap_pyfunction!(has_accelerate, m)?)?; m.add_function(wrap_pyfunction!(has_mkl, m)?)?; m.add_function(wrap_pyfunction!(load_ggml, m)?)?; m.add_function(wrap_pyfunction!(load_gguf, m)?)?; m.add_function(wrap_pyfunction!(save_gguf, m)?)?; m.add_function(wrap_pyfunction!(load_safetensors, m)?)?; m.add_function(wrap_pyfunction!(save_safetensors, m)?)?; Ok(()) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor, dim:int)")] /// Applies the Softmax function to a given tensor.# /// &RETURNS&: Tensor fn softmax(tensor: PyTensor, dim: i64) -> PyResult<PyTensor> { let dim = actual_dim(&tensor, dim).map_err(wrap_err)?; let sm = candle_nn::ops::softmax(&tensor.0, dim).map_err(wrap_err)?; Ok(PyTensor(sm)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d avg-pool function to a given tensor.# /// &RETURNS&: Tensor fn avg_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .avg_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d max-pool function to a given tensor.# /// &RETURNS&: Tensor fn max_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .max_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Sigmoid Linear Unit (SiLU) function to a given tensor. /// &RETURNS&: Tensor fn silu(tensor: PyTensor) -> PyResult<PyTensor> { let s = candle_nn::ops::silu(&tensor.0).map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Gaussian Error Linear Unit (GELU) function to a given tensor. /// &RETURNS&: Tensor fn gelu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.gelu_erf().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Rectified Linear Unit (ReLU) function to a given tensor. /// &RETURNS&: Tensor fn relu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.relu().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the tanh function to a given tensor. /// &RETURNS&: Tensor fn tanh(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.tanh().map_err(wrap_err)?; Ok(PyTensor(s)) } fn candle_functional_m(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap_pyfunction!(silu, m)?)?; m.add_function(wrap_pyfunction!(softmax, m)?)?; m.add_function(wrap_pyfunction!(max_pool2d, m)?)?; m.add_function(wrap_pyfunction!(avg_pool2d, m)?)?; m.add_function(wrap_pyfunction!(gelu, m)?)?; m.add_function(wrap_pyfunction!(relu, m)?)?; m.add_function(wrap_pyfunction!(tanh, m)?)?; Ok(()) } #[cfg(feature = "onnx")] fn candle_onnx_m(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { use onnx::{PyONNXModel, PyONNXTensorDescriptor}; m.add_class::<PyONNXModel>()?; m.add_class::<PyONNXTensorDescriptor>()?; Ok(()) } #[pymodule] fn candle(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { let utils = PyModule::new_bound(py, "utils")?; candle_utils(py, &utils)?; m.add_submodule(&utils)?; let nn = PyModule::new_bound(py, "functional")?; candle_functional_m(py, &nn)?; m.add_submodule(&nn)?; #[cfg(feature = "onnx")] { let onnx = PyModule::new_bound(py, "onnx")?; candle_onnx_m(py, &onnx)?; m.add_submodule(&onnx)?; } m.add_class::<PyTensor>()?; m.add_class::<PyQTensor>()?; m.add_class::<PyDType>()?; m.add("u8", PyDType(DType::U8))?; m.add("u32", PyDType(DType::U32))?; m.add("i64", PyDType(DType::I64))?; m.add("bf16", PyDType(DType::BF16))?; m.add("f16", PyDType(DType::F16))?; m.add("f32", PyDType(DType::F32))?; m.add("f64", PyDType(DType::F64))?; m.add_function(wrap_pyfunction!(cat, m)?)?; m.add_function(wrap_pyfunction!(ones, m)?)?; m.add_function(wrap_pyfunction!(rand, m)?)?; m.add_function(wrap_pyfunction!(randn, m)?)?; m.add_function(wrap_pyfunction!(tensor, m)?)?; m.add_function(wrap_pyfunction!(stack, m)?)?; m.add_function(wrap_pyfunction!(zeros, m)?)?; Ok(()) }
candle/candle-pyo3/src/lib.rs/0
{ "file_path": "candle/candle-pyo3/src/lib.rs", "repo_id": "candle", "token_count": 29734 }
44
use candle::{DType, Error, Result, Tensor}; use rand::{distributions::Distribution, SeedableRng}; #[derive(Clone, PartialEq, Debug)] pub enum Sampling { ArgMax, All { temperature: f64 }, TopK { k: usize, temperature: f64 }, TopP { p: f64, temperature: f64 }, TopKThenTopP { k: usize, p: f64, temperature: f64 }, } pub struct LogitsProcessor { rng: rand::rngs::StdRng, sampling: Sampling, } impl LogitsProcessor { pub fn from_sampling(seed: u64, sampling: Sampling) -> Self { let rng = rand::rngs::StdRng::seed_from_u64(seed); Self { rng, sampling } } pub fn new(seed: u64, temperature: Option<f64>, top_p: Option<f64>) -> Self { let temperature = temperature.and_then(|v| if v < 1e-7 { None } else { Some(v) }); let sampling = match temperature { None => Sampling::ArgMax, Some(temperature) => match top_p { None => Sampling::All { temperature }, Some(p) => Sampling::TopP { p, temperature }, }, }; Self::from_sampling(seed, sampling) } fn sample_argmax(&mut self, logits: Tensor) -> Result<u32> { let logits_v: Vec<f32> = logits.to_vec1()?; let next_token = logits_v .iter() .enumerate() .max_by(|(_, u), (_, v)| u.total_cmp(v)) .map(|(i, _)| i as u32) .unwrap(); Ok(next_token) } fn sample_multinomial(&mut self, prs: &Vec<f32>) -> Result<u32> { let distr = rand::distributions::WeightedIndex::new(prs).map_err(Error::wrap)?; let next_token = distr.sample(&mut self.rng) as u32; Ok(next_token) } /// top-p sampling (or "nucleus sampling") samples from the smallest set of tokens that exceed /// probability top_p. This way we never sample tokens that have very low probabilities and are /// less likely to go "off the rails". fn sample_topp(&mut self, prs: &mut Vec<f32>, top_p: f32) -> Result<u32> { let mut argsort_indices = (0..prs.len()).collect::<Vec<_>>(); // Sort by descending probability. argsort_indices.sort_by(|&i, &j| prs[j].total_cmp(&prs[i])); // Clamp smaller probabilities to zero. let mut cumsum = 0.; for index in &argsort_indices { if cumsum >= top_p { prs[*index] = 0.0; } else { cumsum += prs[*index]; } } // Sample with clamped probabilities. self.sample_multinomial(prs) } // top-k sampling samples from the k tokens with the largest probabilities. fn sample_topk(&mut self, prs: &mut Vec<f32>, top_k: usize) -> Result<u32> { if top_k >= prs.len() { self.sample_multinomial(prs) } else { let mut argsort_indices = (0..prs.len()).collect::<Vec<_>>(); let (indices, _, _) = argsort_indices.select_nth_unstable_by(top_k, |&i, &j| prs[j].total_cmp(&prs[i])); let prs = indices.iter().map(|&i| prs[i]).collect::<Vec<_>>(); let index = self.sample_multinomial(&prs)?; Ok(indices[index as usize] as u32) } } // top-k sampling samples from the k tokens with the largest probabilities. // then top-p sampling. fn sample_topk_topp(&mut self, prs: &mut Vec<f32>, top_k: usize, top_p: f32) -> Result<u32> { if top_k >= prs.len() { self.sample_topp(prs, top_p) } else { let mut argsort_indices = (0..prs.len()).collect::<Vec<_>>(); let (indices, _, _) = argsort_indices.select_nth_unstable_by(top_k, |&i, &j| prs[j].total_cmp(&prs[i])); let mut prs = indices.iter().map(|&i| prs[i]).collect::<Vec<_>>(); let sum_p = prs.iter().sum::<f32>(); let index = if top_p <= 0.0 || top_p >= sum_p { self.sample_multinomial(&prs)? } else { self.sample_topp(&mut prs, top_p)? }; Ok(indices[index as usize] as u32) } } pub fn sample(&mut self, logits: &Tensor) -> Result<u32> { self.sample_f(logits, |_| {}) } pub fn sample_f(&mut self, logits: &Tensor, f: impl FnOnce(&mut [f32])) -> Result<u32> { let logits = logits.to_dtype(DType::F32)?; let prs = |temperature: f64| -> Result<Vec<f32>> { let logits = (&logits / temperature)?; let prs = candle_nn::ops::softmax_last_dim(&logits)?; let mut prs = prs.to_vec1()?; f(&mut prs); Ok(prs) }; let next_token = match &self.sampling { Sampling::ArgMax => self.sample_argmax(logits)?, Sampling::All { temperature } => { let prs = prs(*temperature)?; self.sample_multinomial(&prs)? } Sampling::TopP { p, temperature } => { let mut prs = prs(*temperature)?; if *p <= 0.0 || *p >= 1.0 { // simply sample from the predicted probability distribution self.sample_multinomial(&prs)? } else { // top-p (nucleus) sampling, clamping the least likely tokens to zero self.sample_topp(&mut prs, *p as f32)? } } Sampling::TopK { k, temperature } => { let mut prs = prs(*temperature)?; self.sample_topk(&mut prs, *k)? } Sampling::TopKThenTopP { k, p, temperature } => { let mut prs = prs(*temperature)?; self.sample_topk_topp(&mut prs, *k, *p as f32)? } }; Ok(next_token) } }
candle/candle-transformers/src/generation/mod.rs/0
{ "file_path": "candle/candle-transformers/src/generation/mod.rs", "repo_id": "candle", "token_count": 2910 }
45
use candle::D::Minus1; use candle::{Module, Result, Tensor}; use candle_nn::ops::Identity; use candle_nn::{ batch_norm, conv2d, conv2d_no_bias, conv_transpose2d, linear, seq, Activation, BatchNorm, BatchNormConfig, Conv2d, Conv2dConfig, ConvTranspose2dConfig, Sequential, VarBuilder, }; use crate::models::dinov2::DinoVisionTransformer; pub struct DepthAnythingV2Config { out_channel_sizes: [usize; 4], in_channel_size: usize, // embed_dim in the Dino model num_features: usize, use_batch_norm: bool, use_class_token: bool, layer_ids_vits: Vec<usize>, input_image_size: usize, target_patch_size: usize, } impl DepthAnythingV2Config { #[allow(clippy::too_many_arguments)] pub fn new( out_channel_sizes: [usize; 4], in_channel_size: usize, num_features: usize, use_batch_norm: bool, use_class_token: bool, layer_ids_vits: Vec<usize>, input_image_size: usize, target_patch_size: usize, ) -> Self { Self { out_channel_sizes, in_channel_size, num_features, use_batch_norm, use_class_token, layer_ids_vits, input_image_size, target_patch_size, } } pub fn vit_small() -> Self { Self { out_channel_sizes: [48, 96, 192, 384], in_channel_size: 384, num_features: 64, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![2, 5, 8, 11], input_image_size: 518, target_patch_size: 518 / 14, } } pub fn vit_base() -> Self { Self { out_channel_sizes: [96, 192, 384, 768], in_channel_size: 768, num_features: 128, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![2, 5, 8, 11], input_image_size: 518, target_patch_size: 518 / 14, } } pub fn vit_large() -> Self { Self { out_channel_sizes: [256, 512, 1024, 1024], in_channel_size: 1024, num_features: 256, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![4, 11, 17, 23], input_image_size: 518, target_patch_size: 518 / 14, } } pub fn vit_giant() -> Self { Self { out_channel_sizes: [1536, 1536, 1536, 1536], in_channel_size: 1536, num_features: 384, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![9, 19, 29, 39], input_image_size: 518, target_patch_size: 518 / 14, } } } pub struct ResidualConvUnit { activation: Activation, conv1: Conv2d, conv2: Conv2d, batch_norm1: Option<BatchNorm>, batch_norm2: Option<BatchNorm>, } impl ResidualConvUnit { pub fn new( conf: &DepthAnythingV2Config, activation: Activation, vb: VarBuilder, ) -> Result<Self> { const KERNEL_SIZE: usize = 3; let conv_cfg = Conv2dConfig { padding: 1, stride: 1, dilation: 1, groups: 1, }; let conv1 = conv2d( conf.num_features, conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("conv1"), )?; let conv2 = conv2d( conf.num_features, conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("conv2"), )?; let (batch_norm1, batch_norm2) = match conf.use_batch_norm { true => { let batch_norm_cfg = BatchNormConfig { eps: 1e-05, remove_mean: false, affine: true, momentum: 0.1, }; ( Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn1"))?), Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn2"))?), ) } false => (None, None), }; Ok(Self { activation, conv1, conv2, batch_norm1, batch_norm2, }) } } impl Module for ResidualConvUnit { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let out = self.activation.forward(xs)?; let out = self.conv1.forward(&out)?; let out = if let Some(batch_norm1) = &self.batch_norm1 { batch_norm1.forward_train(&out)? } else { out }; let out = self.activation.forward(&out)?; let out = self.conv2.forward(&out)?; let out = if let Some(batch_norm2) = &self.batch_norm2 { batch_norm2.forward_train(&out)? } else { out }; out + xs } } pub struct FeatureFusionBlock { res_conv_unit1: ResidualConvUnit, res_conv_unit2: ResidualConvUnit, output_conv: Conv2d, target_patch_size: usize, } impl FeatureFusionBlock { pub fn new( conf: &DepthAnythingV2Config, target_patch_size: usize, activation: Activation, vb: VarBuilder, ) -> Result<Self> { const KERNEL_SIZE: usize = 1; let conv_cfg = Conv2dConfig { padding: 0, stride: 1, dilation: 1, groups: 1, }; let output_conv = conv2d( conf.num_features, conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("out_conv"), )?; let res_conv_unit1 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit1"))?; let res_conv_unit2 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit2"))?; Ok(Self { res_conv_unit1, res_conv_unit2, output_conv, target_patch_size, }) } } impl Module for FeatureFusionBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let out = self.res_conv_unit2.forward(xs)?; let out = out.interpolate2d(self.target_patch_size, self.target_patch_size)?; self.output_conv.forward(&out) } } pub struct Scratch { layer1_rn: Conv2d, layer2_rn: Conv2d, layer3_rn: Conv2d, layer4_rn: Conv2d, refine_net1: FeatureFusionBlock, refine_net2: FeatureFusionBlock, refine_net3: FeatureFusionBlock, refine_net4: FeatureFusionBlock, output_conv1: Conv2d, output_conv2: Sequential, } impl Scratch { pub fn new(conf: &DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> { const KERNEL_SIZE: usize = 3; let conv_cfg = Conv2dConfig { padding: 1, stride: 1, dilation: 1, groups: 1, }; let layer1_rn = conv2d_no_bias( conf.out_channel_sizes[0], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer1_rn"), )?; let layer2_rn = conv2d_no_bias( conf.out_channel_sizes[1], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer2_rn"), )?; let layer3_rn = conv2d_no_bias( conf.out_channel_sizes[2], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer3_rn"), )?; let layer4_rn = conv2d_no_bias( conf.out_channel_sizes[3], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer4_rn"), )?; let refine_net1 = FeatureFusionBlock::new( conf, conf.target_patch_size * 8, Activation::Relu, vb.pp("refinenet1"), )?; let refine_net2 = FeatureFusionBlock::new( conf, conf.target_patch_size * 4, Activation::Relu, vb.pp("refinenet2"), )?; let refine_net3 = FeatureFusionBlock::new( conf, conf.target_patch_size * 2, Activation::Relu, vb.pp("refinenet3"), )?; let refine_net4 = FeatureFusionBlock::new( conf, conf.target_patch_size, Activation::Relu, vb.pp("refinenet4"), )?; let conv_cfg = Conv2dConfig { padding: 1, stride: 1, dilation: 1, groups: 1, }; let output_conv1 = conv2d( conf.num_features, conf.num_features / 2, KERNEL_SIZE, conv_cfg, vb.pp("output_conv1"), )?; let output_conv2 = seq(); const HEAD_FEATURES_2: usize = 32; const OUT_CHANNELS_2: usize = 1; const KERNEL_SIZE_2: usize = 1; let output_conv2 = output_conv2.add(conv2d( conf.num_features / 2, HEAD_FEATURES_2, KERNEL_SIZE, conv_cfg, vb.pp("output_conv2").pp("0"), )?); let output_conv2 = output_conv2 .add(Activation::Relu) .add(conv2d( HEAD_FEATURES_2, OUT_CHANNELS_2, KERNEL_SIZE_2, conv_cfg, vb.pp("output_conv2").pp("2"), )?) .add(Activation::Relu); Ok(Self { layer1_rn, layer2_rn, layer3_rn, layer4_rn, refine_net1, refine_net2, refine_net3, refine_net4, output_conv1, output_conv2, }) } } const NUM_CHANNELS: usize = 4; pub struct DPTHead<'a> { conf: &'a DepthAnythingV2Config, projections: Vec<Conv2d>, resize_layers: Vec<Box<dyn Module>>, readout_projections: Vec<Sequential>, scratch: Scratch, } impl<'a> DPTHead<'a> { pub fn new(conf: &'a DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> { let mut projections: Vec<Conv2d> = Vec::with_capacity(conf.out_channel_sizes.len()); for (conv_index, out_channel_size) in conf.out_channel_sizes.iter().enumerate() { projections.push(conv2d( conf.in_channel_size, *out_channel_size, 1, Default::default(), vb.pp("projects").pp(conv_index.to_string()), )?); } let resize_layers: Vec<Box<dyn Module>> = vec![ Box::new(conv_transpose2d( conf.out_channel_sizes[0], conf.out_channel_sizes[0], 4, ConvTranspose2dConfig { padding: 0, stride: 4, dilation: 1, output_padding: 0, }, vb.pp("resize_layers").pp("0"), )?), Box::new(conv_transpose2d( conf.out_channel_sizes[1], conf.out_channel_sizes[1], 2, ConvTranspose2dConfig { padding: 0, stride: 2, dilation: 1, output_padding: 0, }, vb.pp("resize_layers").pp("1"), )?), Box::new(Identity::new()), Box::new(conv2d( conf.out_channel_sizes[3], conf.out_channel_sizes[3], 3, Conv2dConfig { padding: 1, stride: 2, dilation: 1, groups: 1, }, vb.pp("resize_layers").pp("3"), )?), ]; let readout_projections = if conf.use_class_token { let rop = Vec::with_capacity(NUM_CHANNELS); for rop_index in 0..NUM_CHANNELS { seq() .add(linear( 2 * conf.in_channel_size, conf.in_channel_size, vb.pp("readout_projects").pp(rop_index.to_string()), )?) .add(Activation::Gelu); } rop } else { vec![] }; let scratch = Scratch::new(conf, vb.pp("scratch"))?; Ok(Self { conf, projections, resize_layers, readout_projections, scratch, }) } } impl Module for DPTHead<'_> { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut out: Vec<Tensor> = Vec::with_capacity(NUM_CHANNELS); for i in 0..NUM_CHANNELS { let x = if self.conf.use_class_token { let x = xs.get(i)?.get(0)?; let class_token = xs.get(i)?.get(1)?; let readout = class_token.unsqueeze(1)?.expand(x.shape())?; let to_cat = [x, readout]; let cat = Tensor::cat(&to_cat, Minus1)?; self.readout_projections[i].forward(&cat)? } else { xs.get(i)? }; let x_dims = x.dims(); let x = x.permute((0, 2, 1))?.reshape(( x_dims[0], x_dims[x_dims.len() - 1], self.conf.target_patch_size, self.conf.target_patch_size, ))?; let x = self.projections[i].forward(&x)?; let x = self.resize_layers[i].forward(&x)?; out.push(x); } let layer_1_rn = self.scratch.layer1_rn.forward(&out[0])?; let layer_2_rn = self.scratch.layer2_rn.forward(&out[1])?; let layer_3_rn = self.scratch.layer3_rn.forward(&out[2])?; let layer_4_rn = self.scratch.layer4_rn.forward(&out[3])?; let path4 = self.scratch.refine_net4.forward(&layer_4_rn)?; let res3_out = self .scratch .refine_net3 .res_conv_unit1 .forward(&layer_3_rn)?; let res3_out = path4.add(&res3_out)?; let path3 = self.scratch.refine_net3.forward(&res3_out)?; let res2_out = self .scratch .refine_net2 .res_conv_unit1 .forward(&layer_2_rn)?; let res2_out = path3.add(&res2_out)?; let path2 = self.scratch.refine_net2.forward(&res2_out)?; let res1_out = self .scratch .refine_net1 .res_conv_unit1 .forward(&layer_1_rn)?; let res1_out = path2.add(&res1_out)?; let path1 = self.scratch.refine_net1.forward(&res1_out)?; let out = self.scratch.output_conv1.forward(&path1)?; let out = out.interpolate2d(self.conf.input_image_size, self.conf.input_image_size)?; self.scratch.output_conv2.forward(&out) } } pub struct DepthAnythingV2<'a> { pretrained: &'a DinoVisionTransformer, depth_head: DPTHead<'a>, conf: &'a DepthAnythingV2Config, } impl<'a> DepthAnythingV2<'a> { pub fn new( pretrained: &'a DinoVisionTransformer, conf: &'a DepthAnythingV2Config, vb: VarBuilder, ) -> Result<Self> { let depth_head = DPTHead::new(conf, vb.pp("depth_head"))?; Ok(Self { pretrained, depth_head, conf, }) } } impl<'a> Module for DepthAnythingV2<'a> { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let features = self.pretrained.get_intermediate_layers( xs, &self.conf.layer_ids_vits, false, false, true, )?; let depth = self.depth_head.forward(&features)?; depth.relu() } }
candle/candle-transformers/src/models/depth_anything_v2.rs/0
{ "file_path": "candle/candle-transformers/src/models/depth_anything_v2.rs", "repo_id": "candle", "token_count": 9039 }
46
use candle::{bail, DType, Module, Result, Tensor}; use candle_nn as nn; pub struct PatchEmbedder { proj: nn::Conv2d, } impl PatchEmbedder { pub fn new( patch_size: usize, in_channels: usize, embed_dim: usize, vb: nn::VarBuilder, ) -> Result<Self> { let proj = nn::conv2d( in_channels, embed_dim, patch_size, nn::Conv2dConfig { stride: patch_size, ..Default::default() }, vb.pp("proj"), )?; Ok(Self { proj }) } } impl Module for PatchEmbedder { fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = self.proj.forward(x)?; // flatten spatial dim and transpose to channels last let (b, c, h, w) = x.dims4()?; x.reshape((b, c, h * w))?.transpose(1, 2) } } pub struct Unpatchifier { patch_size: usize, out_channels: usize, } impl Unpatchifier { pub fn new(patch_size: usize, out_channels: usize) -> Result<Self> { Ok(Self { patch_size, out_channels, }) } pub fn unpatchify(&self, x: &Tensor, h: usize, w: usize) -> Result<Tensor> { let h = (h + 1) / self.patch_size; let w = (w + 1) / self.patch_size; let x = x.reshape(( x.dim(0)?, h, w, self.patch_size, self.patch_size, self.out_channels, ))?; let x = x.permute((0, 5, 1, 3, 2, 4))?; // "nhwpqc->nchpwq" x.reshape(( x.dim(0)?, self.out_channels, self.patch_size * h, self.patch_size * w, )) } } pub struct PositionEmbedder { pos_embed: Tensor, patch_size: usize, pos_embed_max_size: usize, } impl PositionEmbedder { pub fn new( hidden_size: usize, patch_size: usize, pos_embed_max_size: usize, vb: nn::VarBuilder, ) -> Result<Self> { let pos_embed = vb.get( (1, pos_embed_max_size * pos_embed_max_size, hidden_size), "pos_embed", )?; Ok(Self { pos_embed, patch_size, pos_embed_max_size, }) } pub fn get_cropped_pos_embed(&self, h: usize, w: usize) -> Result<Tensor> { let h = (h + 1) / self.patch_size; let w = (w + 1) / self.patch_size; if h > self.pos_embed_max_size || w > self.pos_embed_max_size { bail!("Input size is too large for the position embedding") } let top = (self.pos_embed_max_size - h) / 2; let left = (self.pos_embed_max_size - w) / 2; let pos_embed = self.pos_embed .reshape((1, self.pos_embed_max_size, self.pos_embed_max_size, ()))?; let pos_embed = pos_embed.narrow(1, top, h)?.narrow(2, left, w)?; pos_embed.reshape((1, h * w, ())) } } pub struct TimestepEmbedder { mlp: nn::Sequential, frequency_embedding_size: usize, } impl TimestepEmbedder { pub fn new( hidden_size: usize, frequency_embedding_size: usize, vb: nn::VarBuilder, ) -> Result<Self> { let mlp = nn::seq() .add(nn::linear( frequency_embedding_size, hidden_size, vb.pp("mlp.0"), )?) .add(nn::Activation::Silu) .add(nn::linear(hidden_size, hidden_size, vb.pp("mlp.2"))?); Ok(Self { mlp, frequency_embedding_size, }) } fn timestep_embedding(t: &Tensor, dim: usize, max_period: f64) -> Result<Tensor> { if dim % 2 != 0 { bail!("Embedding dimension must be even") } if t.dtype() != DType::F32 && t.dtype() != DType::F64 { bail!("Input tensor must be floating point") } let half = dim / 2; let freqs = Tensor::arange(0f32, half as f32, t.device())? .to_dtype(candle::DType::F32)? .mul(&Tensor::full( (-f64::ln(max_period) / half as f64) as f32, half, t.device(), )?)? .exp()?; let args = t .unsqueeze(1)? .to_dtype(candle::DType::F32)? .matmul(&freqs.unsqueeze(0)?)?; let embedding = Tensor::cat(&[args.cos()?, args.sin()?], 1)?; embedding.to_dtype(candle::DType::F16) } } impl Module for TimestepEmbedder { fn forward(&self, t: &Tensor) -> Result<Tensor> { let t_freq = Self::timestep_embedding(t, self.frequency_embedding_size, 10000.0)?; self.mlp.forward(&t_freq) } } pub struct VectorEmbedder { mlp: nn::Sequential, } impl VectorEmbedder { pub fn new(input_dim: usize, hidden_size: usize, vb: nn::VarBuilder) -> Result<Self> { let mlp = nn::seq() .add(nn::linear(input_dim, hidden_size, vb.pp("mlp.0"))?) .add(nn::Activation::Silu) .add(nn::linear(hidden_size, hidden_size, vb.pp("mlp.2"))?); Ok(Self { mlp }) } } impl Module for VectorEmbedder { fn forward(&self, x: &Tensor) -> Result<Tensor> { self.mlp.forward(x) } }
candle/candle-transformers/src/models/mmdit/embedding.rs/0
{ "file_path": "candle/candle-transformers/src/models/mmdit/embedding.rs", "repo_id": "candle", "token_count": 2837 }
47
// This implementation is based on: // https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/modeling_phi3.py use crate::models::with_tracing::{linear_no_bias as linear, Linear, RmsNorm}; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::VarBuilder; use std::sync::Arc; // https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/config.json #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_act: candle_nn::Activation, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub rms_norm_eps: f64, pub rope_theta: f64, pub bos_token_id: Option<u32>, pub eos_token_id: Option<u32>, pub rope_scaling: Option<String>, pub max_position_embeddings: usize, } impl Config { pub fn head_dim(&self) -> usize { self.hidden_size / self.num_attention_heads } } #[derive(Debug, Clone)] pub struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { pub fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.head_dim(); let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } pub fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] struct Attention { qkv_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let head_dim = cfg.head_dim(); let op_size = num_heads * head_dim + 2 * num_kv_heads * head_dim; let qkv_proj = linear(cfg.hidden_size, op_size, vb.pp("qkv_proj"))?; let o_proj = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("o_proj"))?; Ok(Self { qkv_proj, o_proj, rotary_emb, kv_cache: None, num_heads, num_kv_heads, num_kv_groups: num_heads / num_kv_heads, head_dim, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let qkv = self.qkv_proj.forward(xs)?; let query_pos = self.num_heads * self.head_dim; let query_states = qkv.narrow(D::Minus1, 0, query_pos)?; let key_states = qkv.narrow(D::Minus1, query_pos, self.num_kv_heads * self.head_dim)?; let value_states = qkv.narrow( D::Minus1, query_pos + self.num_kv_heads * self.head_dim, self.num_kv_heads * self.head_dim, )?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, ()))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct Mlp { gate_up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, i_size: usize, } impl Mlp { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size; let i_size = cfg.intermediate_size; let gate_up_proj = linear(hidden_size, 2 * i_size, vb.pp("gate_up_proj"))?; let down_proj = linear(i_size, hidden_size, vb.pp("down_proj"))?; Ok(Self { gate_up_proj, down_proj, act_fn: cfg.hidden_act, i_size, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let up_states = xs.apply(&self.gate_up_proj)?; let gate = up_states.narrow(D::Minus1, 0, self.i_size)?; let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?; let up_states = (up_states * gate.apply(&self.act_fn))?; up_states.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: Mlp, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = Mlp::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/phi3.rs/0
{ "file_path": "candle/candle-transformers/src/models/phi3.rs", "repo_id": "candle", "token_count": 5728 }
48
use candle::{Result, Tensor}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; #[derive(Debug)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, out_proj: Linear, num_heads: usize, } impl Attention { fn new( embedding_dim: usize, num_heads: usize, downsample_rate: usize, vb: VarBuilder, ) -> Result<Self> { let internal_dim = embedding_dim / downsample_rate; let q_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("q_proj"))?; let k_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("k_proj"))?; let v_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("v_proj"))?; let out_proj = candle_nn::linear(internal_dim, embedding_dim, vb.pp("out_proj"))?; Ok(Self { q_proj, k_proj, v_proj, out_proj, num_heads, }) } fn separate_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n, c) = x.dims3()?; x.reshape((b, n, self.num_heads, c / self.num_heads))? .transpose(1, 2)? .contiguous() } fn recombine_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n_heads, n_tokens, c_per_head) = x.dims4()?; x.transpose(1, 2)? .reshape((b, n_tokens, n_heads * c_per_head)) } fn forward(&self, q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let q = self.q_proj.forward(&q.contiguous()?)?; let k = self.k_proj.forward(&k.contiguous()?)?; let v = self.v_proj.forward(&v.contiguous()?)?; let q = self.separate_heads(&q)?; let k = self.separate_heads(&k)?; let v = self.separate_heads(&v)?; let (_, _, _, c_per_head) = q.dims4()?; let attn = (q.matmul(&k.t()?)? / (c_per_head as f64).sqrt())?; let attn = candle_nn::ops::softmax_last_dim(&attn)?; let out = attn.matmul(&v)?; self.recombine_heads(&out)?.apply(&self.out_proj) } } #[derive(Debug)] struct TwoWayAttentionBlock { self_attn: Attention, norm1: LayerNorm, cross_attn_token_to_image: Attention, norm2: LayerNorm, mlp: super::MlpBlock, norm3: LayerNorm, norm4: LayerNorm, cross_attn_image_to_token: Attention, skip_first_layer_pe: bool, } impl TwoWayAttentionBlock { fn new( embedding_dim: usize, num_heads: usize, mlp_dim: usize, skip_first_layer_pe: bool, vb: VarBuilder, ) -> Result<Self> { let norm1 = layer_norm(embedding_dim, 1e-5, vb.pp("norm1"))?; let norm2 = layer_norm(embedding_dim, 1e-5, vb.pp("norm2"))?; let norm3 = layer_norm(embedding_dim, 1e-5, vb.pp("norm3"))?; let norm4 = layer_norm(embedding_dim, 1e-5, vb.pp("norm4"))?; let self_attn = Attention::new(embedding_dim, num_heads, 1, vb.pp("self_attn"))?; let cross_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_token_to_image"), )?; let cross_attn_image_to_token = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_image_to_token"), )?; let mlp = super::MlpBlock::new( embedding_dim, mlp_dim, candle_nn::Activation::Relu, vb.pp("mlp"), )?; Ok(Self { self_attn, norm1, cross_attn_image_to_token, norm2, mlp, norm3, norm4, cross_attn_token_to_image, skip_first_layer_pe, }) } fn forward( &self, queries: &Tensor, keys: &Tensor, query_pe: &Tensor, key_pe: &Tensor, ) -> Result<(Tensor, Tensor)> { // Self attention block let queries = if self.skip_first_layer_pe { self.self_attn.forward(queries, queries, queries)? } else { let q = (queries + query_pe)?; let attn_out = self.self_attn.forward(&q, &q, queries)?; (queries + attn_out)? }; let queries = self.norm1.forward(&queries)?; // Cross attention block, tokens attending to image embedding let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_token_to_image.forward(&q, &k, keys)?; let queries = (&queries + attn_out)?; let queries = self.norm2.forward(&queries)?; // MLP block let mlp_out = self.mlp.forward(&queries); let queries = (queries + mlp_out)?; let queries = self.norm3.forward(&queries)?; // Cross attention block, image embedding attending to tokens let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_image_to_token.forward(&k, &q, &queries)?; let keys = (keys + attn_out)?; let keys = self.norm4.forward(&keys)?; Ok((queries, keys)) } } #[derive(Debug)] pub struct TwoWayTransformer { layers: Vec<TwoWayAttentionBlock>, final_attn_token_to_image: Attention, norm_final_attn: LayerNorm, } impl TwoWayTransformer { pub fn new( depth: usize, embedding_dim: usize, num_heads: usize, mlp_dim: usize, vb: VarBuilder, ) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(depth); for i in 0..depth { let layer = TwoWayAttentionBlock::new(embedding_dim, num_heads, mlp_dim, i == 0, vb_l.pp(i))?; layers.push(layer) } let final_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("final_attn_token_to_image"), )?; let norm_final_attn = layer_norm(embedding_dim, 1e-5, vb.pp("norm_final_attn"))?; Ok(Self { layers, final_attn_token_to_image, norm_final_attn, }) } pub fn forward( &self, image_embedding: &Tensor, image_pe: &Tensor, point_embedding: &Tensor, ) -> Result<(Tensor, Tensor)> { let image_embedding = image_embedding.flatten_from(2)?.permute((0, 2, 1))?; let image_pe = image_pe.flatten_from(2)?.permute((0, 2, 1))?; let mut queries = point_embedding.clone(); let mut keys = image_embedding; for layer in self.layers.iter() { (queries, keys) = layer.forward(&queries, &keys, point_embedding, &image_pe)? } let q = (&queries + point_embedding)?; let k = (&keys + image_pe)?; let attn_out = self.final_attn_token_to_image.forward(&q, &k, &keys)?; let queries = (queries + attn_out)?.apply(&self.norm_final_attn)?; Ok((queries, keys)) } }
candle/candle-transformers/src/models/segment_anything/transformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/transformer.rs", "repo_id": "candle", "token_count": 3597 }
49
// T5 Text Model // https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py use crate::models::with_tracing::{linear_no_bias, Embedding, Linear}; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; use std::sync::Arc; fn default_relative_attention_max_distance() -> usize { 128 } fn default_is_decoder() -> bool { false } fn default_use_cache() -> bool { true } fn default_tie_word_embeddings() -> bool { true } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Deserialize, Default, Clone, PartialEq)] pub struct ActivationWithOptionalGating { pub gated: bool, pub activation: candle_nn::Activation, } pub fn deserialize_feed_forward_proj_activation<'de, D>( deserializer: D, ) -> std::result::Result<ActivationWithOptionalGating, D::Error> where D: serde::de::Deserializer<'de>, { match String::deserialize(deserializer)?.as_str() { "gated-gelu" => Ok(ActivationWithOptionalGating { gated: true, activation: candle_nn::Activation::NewGelu, }), "gated-silu" => Ok(ActivationWithOptionalGating { gated: true, activation: candle_nn::Activation::Silu, }), buf => { let activation = serde_plain::from_str(buf).map_err(serde::de::Error::custom)?; Ok(ActivationWithOptionalGating { gated: false, activation, }) } } } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub d_model: usize, pub d_kv: usize, pub d_ff: usize, pub num_layers: usize, pub num_decoder_layers: Option<usize>, pub num_heads: usize, pub relative_attention_num_buckets: usize, #[serde(default = "default_relative_attention_max_distance")] pub relative_attention_max_distance: usize, pub dropout_rate: f64, pub layer_norm_epsilon: f64, pub initializer_factor: f64, #[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")] pub feed_forward_proj: ActivationWithOptionalGating, #[serde(default = "default_tie_word_embeddings")] pub tie_word_embeddings: bool, #[serde(default = "default_is_decoder")] pub is_decoder: bool, pub is_encoder_decoder: bool, #[serde(default = "default_use_cache")] pub use_cache: bool, pub pad_token_id: usize, pub eos_token_id: usize, pub decoder_start_token_id: Option<usize>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 32128, d_model: 512, d_kv: 64, d_ff: 2048, num_layers: 6, num_decoder_layers: None, num_heads: 8, relative_attention_num_buckets: 32, relative_attention_max_distance: 128, dropout_rate: 0.1, layer_norm_epsilon: 1e-6, initializer_factor: 1.0, feed_forward_proj: ActivationWithOptionalGating { gated: false, activation: Activation::Relu, }, tie_word_embeddings: true, is_decoder: false, is_encoder_decoder: true, use_cache: true, pad_token_id: 0, eos_token_id: 1, decoder_start_token_id: Some(0), } } } impl Config { // https://huggingface.co/facebook/musicgen-small/blob/495da4ad086b3416a27c6187f9239f9fd96f3962/config.json#L184 pub fn musicgen_small() -> Self { Self { d_ff: 3072, d_kv: 64, d_model: 768, dropout_rate: 0.1, eos_token_id: 1, feed_forward_proj: ActivationWithOptionalGating { gated: false, activation: Activation::Relu, }, tie_word_embeddings: true, initializer_factor: 1.0, is_decoder: false, is_encoder_decoder: true, layer_norm_epsilon: 1e-6, num_decoder_layers: Some(12), num_heads: 12, num_layers: 12, pad_token_id: 0, decoder_start_token_id: Some(0), relative_attention_max_distance: 128, relative_attention_num_buckets: 32, use_cache: true, vocab_size: 32128, } } } #[derive(Debug, Clone)] struct T5LayerNorm { weight: Tensor, variance_epsilon: f64, span: tracing::Span, } impl T5LayerNorm { fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(h, "weight")?; Ok(Self { weight, variance_epsilon: eps, span: tracing::span!(tracing::Level::TRACE, "layer-norm"), }) } } impl Module for T5LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let dtype = xs.dtype(); let xs_f32 = xs.to_dtype(DType::F32)?; // variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?; let xs = xs_f32.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?; let xs = xs.to_dtype(dtype)?; let xs = xs.broadcast_mul(&self.weight)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseActDense { wi: Linear, wo: Linear, act: Activation, span: tracing::Span, } impl T5DenseActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi"))?; let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi, wo, act: Activation::Relu, span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"), }) } } impl Module for T5DenseActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.wi.forward(xs)?; let xs = self.act.forward(&xs)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseGatedActDense { wi_0: Linear, wi_1: Linear, wo: Linear, act: Activation, span: tracing::Span, } impl T5DenseGatedActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi_0 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?; let wi_1 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?; let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi_0, wi_1, wo, act: cfg.feed_forward_proj.activation, span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"), }) } } impl Module for T5DenseGatedActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?; let hidden_linear = self.wi_1.forward(xs)?; let xs = hidden_gelu.broadcast_mul(&hidden_linear)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5LayerFF { dense_act: Option<T5DenseActDense>, gated_dense_act: Option<T5DenseGatedActDense>, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerFF { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated { ( None, Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?), ) } else { ( Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?), None, ) }; Ok(Self { dense_act, gated_dense_act, layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer-ff"), }) } } impl Module for T5LayerFF { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = self.layer_norm.forward(xs)?; let ys = match &self.dense_act { Some(dense_act) => dense_act.forward(&ys)?, None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?, }; let xs = (xs + ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5Attention { q: Linear, k: Linear, v: Linear, o: Linear, n_heads: usize, d_kv: usize, relative_attention_bias: Option<Embedding>, relative_attention_num_buckets: usize, relative_attention_max_distance: usize, inner_dim: usize, use_cache: bool, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, span_cache: tracing::Span, span_mm: tracing::Span, span_sm: tracing::Span, } impl T5Attention { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let inner_dim = cfg.num_heads * cfg.d_kv; let q = linear_no_bias(cfg.d_model, inner_dim, vb.pp("q"))?; let k = linear_no_bias(cfg.d_model, inner_dim, vb.pp("k"))?; let v = linear_no_bias(cfg.d_model, inner_dim, vb.pp("v"))?; let o = linear_no_bias(inner_dim, cfg.d_model, vb.pp("o"))?; let relative_attention_bias = if has_relative_attention_bias { let emb = Embedding::new( cfg.relative_attention_num_buckets, cfg.num_heads, vb.pp("relative_attention_bias"), )?; Some(emb) } else { None }; Ok(Self { q, k, v, o, n_heads: cfg.num_heads, d_kv: cfg.d_kv, relative_attention_bias, relative_attention_num_buckets: cfg.relative_attention_num_buckets, relative_attention_max_distance: cfg.relative_attention_max_distance, inner_dim, use_cache: cfg.use_cache && decoder, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"), span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"), span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, key_value_states: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { // Performs Self-attention (if key_value_states is None) or attention // over source sentence (provided by key_value_states). let _enter = self.span.enter(); let kv_input = match key_value_states { None => xs, Some(key_value_states) => key_value_states, }; let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?); let kv_len = kv_input.dim(1)?; let q = self.q.forward(xs)?; let k = self.k.forward(kv_input)?; let v = self.v.forward(kv_input)?; let q = q .reshape((b_sz, q_len, self.n_heads, self.d_kv))? .transpose(1, 2)? .contiguous()?; let mut k = k .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; let mut v = v .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; if self.use_cache && key_value_states.is_none() { let _enter = self.span_cache.enter(); if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache { k = Tensor::cat(&[kv_cache_k, &k], 2)?; v = Tensor::cat(&[kv_cache_v, &v], 2)?; }; self.kv_cache = Some((k.clone(), v.clone())); }; let k = k.contiguous()?; let v = v.contiguous()?; // TODO: Use flash_attn. let scores = { let _enter = self.span_mm.enter(); q.matmul(&k.t()?)? }; let scores = match mask { None => scores, Some(mask) => masked_fill( &scores, &mask .unsqueeze(0)? .unsqueeze(0)? .repeat((b_sz, self.n_heads))?, f32::NEG_INFINITY, )?, }; let (scores, position_bias) = match position_bias { Some(position_bias) => ( scores.broadcast_add(position_bias)?, Some(position_bias.clone()), ), None => match &self.relative_attention_bias { None => (scores, None), Some(relative_attention_bias) => { // This only handles the bidirectional case. let kv_len = k.dim(2)?; let (q_start, q_end) = match self.use_cache { true => ((kv_len - q_len) as u32, kv_len as u32), false => (0_u32, kv_len as u32), }; let num_buckets = self.relative_attention_num_buckets as u32 / 2; let max_exact = num_buckets / 2; let relative_position = (q_start..q_end) .map(|i| { (0..kv_len as u32) .map(|j| { if i < j { if j - i < max_exact { j - i + num_buckets } else { let b = f32::log( (j - i) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; u32::min( max_exact + num_buckets + b as u32, self.relative_attention_num_buckets as u32 - 1, ) } } else if i - j < max_exact { i - j } else { let b = f32::log( (i - j) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; u32::min(max_exact + b as u32, num_buckets - 1) } }) .collect::<Vec<u32>>() }) .collect::<Vec<Vec<_>>>(); let relative_buckets = Tensor::new(relative_position, q.device())?; let position_bias = relative_attention_bias .forward(&relative_buckets)? .permute((2, 0, 1))? .unsqueeze(0)?; (scores.broadcast_add(&position_bias)?, Some(position_bias)) // TODO: position_bias_masked? } }, }; let attn_weights = { let _enter = self.span_sm.enter(); candle_nn::ops::softmax_last_dim(&scores)? }; let attn_output = attn_weights.matmul(&v)?; let attn_output = attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.inner_dim))?; let attn_output = self.o.forward(&attn_output)?; Ok((attn_output, position_bias)) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct T5LayerSelfAttention { self_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerSelfAttention { fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { self_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "self-attn"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_xs = self.layer_norm.forward(xs)?; let (ys, position_bias) = self.self_attention .forward(&normed_xs, position_bias, None, mask)?; let ys = (xs + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5LayerCrossAttention { cross_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerCrossAttention { fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { cross_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "cross-attn"), }) } fn forward( &mut self, hidden_states: &Tensor, position_bias: Option<&Tensor>, key_value_states: &Tensor, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_hidden_states = self.layer_norm.forward(hidden_states)?; let (ys, position_bias) = self.cross_attention.forward( &normed_hidden_states, position_bias, Some(key_value_states), None, )?; let ys = (hidden_states + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.cross_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5Block { self_attn: T5LayerSelfAttention, cross_attn: Option<T5LayerCrossAttention>, ff: T5LayerFF, span: tracing::Span, } impl T5Block { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let vb = vb.pp("layer"); let self_attn = T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?; let cross_attn = if cfg.is_decoder { Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?) } else { None }; let ff_i = if cross_attn.is_some() { 2 } else { 1 }; let ff = T5LayerFF::load(vb.pp(ff_i.to_string()), cfg)?; Ok(Self { self_attn, cross_attn, ff, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); // TODO: Cache masks let mask = match self.cross_attn.is_some() { true => { let mask_len = xs.dim(1)?; // If the input seq length is 1, no need for a mask, this is also helpful to avoid shape // issues when using the KV cache in the decoder. if mask_len <= 1 { None } else { Some(get_mask(mask_len, xs.device())?) } } false => None, }; let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?; // TODO: clamp for f16? if let Some(cross_attn) = &mut self.cross_attn { (xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?; // TODO: clamp for f16? } let xs = self.ff.forward(&xs)?; // TODO: clamp for f16? Ok((xs, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache()); } } #[derive(Debug, Clone)] struct T5Stack { block: Vec<T5Block>, shared: Arc<Embedding>, final_layer_norm: T5LayerNorm, span: tracing::Span, } impl T5Stack { fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> { let block = (0..cfg.num_layers) .map(|i| T5Block::load(i == 0, decoder, vb.pp(&format!("block.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let final_layer_norm = T5LayerNorm::load( cfg.d_model, cfg.layer_norm_epsilon, vb.pp("final_layer_norm"), )?; Ok(Self { block, shared: shared.clone(), final_layer_norm, span: tracing::span!(tracing::Level::TRACE, "stack"), }) } fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let input_embeds = self.shared.as_ref().forward(input_ids)?; let mut hidden_states = input_embeds; let mut position_bias = None; for block in self.block.iter_mut() { (hidden_states, position_bias) = block.forward( &hidden_states, position_bias.as_ref(), encoder_hidden_states, )? } self.final_layer_norm.forward(&hidden_states) } fn clear_kv_cache(&mut self) { self.block.iter_mut().for_each(|b| b.clear_kv_cache()) } } #[derive(Debug, Clone)] pub struct T5EncoderModel { encoder: T5Stack, device: Device, span: tracing::Span, } impl T5EncoderModel { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let shared_vb = if vb.contains_tensor("shared.weight") { vb.pp("shared") } else if vb.contains_tensor("decoder.embed_tokens") { vb.pp("decoder").pp("embed_tokens") } else { vb.pp("encoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?; Ok(Self { encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "encoder"), }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.encoder.forward(input_ids, None) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct T5ForConditionalGeneration { encoder: T5Stack, decoder: T5Stack, d_model: usize, tie_word_embeddings: bool, lm_head: Option<Linear>, shared: Arc<Embedding>, device: Device, span_decode: tracing::Span, span_decode_head: tracing::Span, } impl T5ForConditionalGeneration { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { assert!(cfg.is_encoder_decoder); let d_model = cfg.d_model; let shared_vb = if vb.contains_tensor("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let mut encoder_cfg = cfg.clone(); encoder_cfg.is_decoder = false; encoder_cfg.use_cache = false; encoder_cfg.is_encoder_decoder = false; let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?; let mut decoder_cfg = cfg.clone(); decoder_cfg.is_decoder = true; decoder_cfg.is_encoder_decoder = false; decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers); let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?; let tie_word_embeddings = cfg.tie_word_embeddings; let lm_head = if tie_word_embeddings { None } else { Some(linear_no_bias( cfg.d_model, cfg.vocab_size, vb.pp("lm_head"), )?) }; Ok(Self { encoder, decoder, d_model, tie_word_embeddings, lm_head, shared, device: vb.device().clone(), span_decode: tracing::span!(tracing::Level::TRACE, "decode"), span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"), }) } pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> { self.encoder.forward(input_ids, None) } pub fn decode( &mut self, decoder_input_ids: &Tensor, encoder_output: &Tensor, ) -> Result<Tensor> { let _enter = self.span_decode.enter(); let decoder_output = self .decoder .forward(decoder_input_ids, Some(encoder_output))?; let scaling_factor = if self.tie_word_embeddings { // Rescale output before projecting on vocab // See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 (self.d_model as f64).sqrt() } else { 1.0 }; let sequence_output = ((decoder_output .narrow(1, decoder_output.dim(1)? - 1, 1)? .squeeze(1)?) * scaling_factor)?; let output = { let _enter = self.span_decode_head.enter(); match self.lm_head { None => sequence_output.matmul(&self.shared.embeddings().t()?)?, Some(ref lm_head) => lm_head.forward(&sequence_output)?, } }; Ok(output) } pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> { let encoder_output = self.encode(input_ids)?; self.decode(decoder_input_ids, &encoder_output) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache(); self.decoder.clear_kv_cache(); } }
candle/candle-transformers/src/models/t5.rs/0
{ "file_path": "candle/candle-transformers/src/models/t5.rs", "repo_id": "candle", "token_count": 15093 }
50
/// https://huggingface.co/01-ai/Yi-6B/blob/main/modeling_yi.py use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, PartialEq)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) hidden_size: usize, pub(crate) intermediate_size: usize, pub(crate) num_hidden_layers: usize, pub(crate) num_attention_heads: usize, pub(crate) num_key_value_heads: usize, pub(crate) hidden_act: Activation, pub(crate) max_position_embeddings: usize, pub(crate) rms_norm_eps: f64, pub(crate) rope_theta: f64, } impl Config { pub fn config_6b() -> Self { Self { vocab_size: 64000, hidden_size: 4096, intermediate_size: 11008, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 4, hidden_act: Activation::Silu, max_position_embeddings: 4096, rms_norm_eps: 1e-5, rope_theta: 5_000_000., } } pub fn config_34b() -> Self { Self { vocab_size: 64000, hidden_size: 7168, intermediate_size: 20480, num_hidden_layers: 60, num_attention_heads: 56, num_key_value_heads: 8, hidden_act: Activation::Silu, max_position_embeddings: 4096, rms_norm_eps: 1e-5, rope_theta: 5_000_000., } } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, ln1: RmsNorm, ln2: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let ln2 = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, ln1, ln2, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.ln1.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.ln2)?.apply(&self.mlp)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/yi.rs/0
{ "file_path": "candle/candle-transformers/src/models/yi.rs", "repo_id": "candle", "token_count": 6220 }
51
export async function getEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } const MODELS = { intfloat_e5_small_v2: { base_url: "https://huggingface.co/intfloat/e5-small-v2/resolve/main/", search_prefix: "query: ", document_prefix: "passage: ", }, intfloat_e5_base_v2: { base_url: "https://huggingface.co/intfloat/e5-base-v2/resolve/main/", search_prefix: "query: ", document_prefix: "passage:", }, intfloat_multilingual_e5_small: { base_url: "https://huggingface.co/intfloat/multilingual-e5-small/resolve/main/", search_prefix: "query: ", document_prefix: "passage: ", }, sentence_transformers_all_MiniLM_L6_v2: { base_url: "https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/refs%2Fpr%2F21/", search_prefix: "", document_prefix: "", }, sentence_transformers_all_MiniLM_L12_v2: { base_url: "https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/refs%2Fpr%2F4/", search_prefix: "", document_prefix: "", }, }; export function getModelInfo(id) { return { modelURL: MODELS[id].base_url + "model.safetensors", configURL: MODELS[id].base_url + "config.json", tokenizerURL: MODELS[id].base_url + "tokenizer.json", search_prefix: MODELS[id].search_prefix, document_prefix: MODELS[id].document_prefix, }; } export function cosineSimilarity(vec1, vec2) { const dot = vec1.reduce((acc, val, i) => acc + val * vec2[i], 0); const a = Math.sqrt(vec1.reduce((acc, val) => acc + val * val, 0)); const b = Math.sqrt(vec2.reduce((acc, val) => acc + val * val, 0)); return dot / (a * b); } export async function getWikiText(article) { // thanks to wikipedia for the API const URL = `https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exlimit=1&titles=${article}&explaintext=1&exsectionformat=plain&format=json&origin=*`; return fetch(URL, { method: "GET", headers: { Accept: "application/json", }, }) .then((r) => r.json()) .then((data) => { const pages = data.query.pages; const pageId = Object.keys(pages)[0]; const extract = pages[pageId].extract; if (extract === undefined || extract === "") { throw new Error("No article found"); } return extract; }) .catch((error) => console.error("Error:", error)); }
candle/candle-wasm-examples/bert/utils.js/0
{ "file_path": "candle/candle-wasm-examples/bert/utils.js", "repo_id": "candle", "token_count": 1250 }
52
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web wasm-bindgen ../../target/wasm32-unknown-unknown/release/m-quantized.wasm --out-dir build --target web
candle/candle-wasm-examples/t5/build-lib.sh/0
{ "file_path": "candle/candle-wasm-examples/t5/build-lib.sh", "repo_id": "candle", "token_count": 84 }
53
use yew_agent::PublicWorker; fn main() { candle_wasm_example_whisper::Worker::register(); }
candle/candle-wasm-examples/whisper/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/worker.rs", "repo_id": "candle", "token_count": 38 }
54
use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn::{ batch_norm, conv2d, conv2d_no_bias, BatchNorm, Conv2d, Conv2dConfig, Module, VarBuilder, }; use image::DynamicImage; // Model architecture from https://github.com/ultralytics/ultralytics/issues/189 // https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py #[derive(Clone, Copy, PartialEq, Debug)] pub struct Multiples { depth: f64, width: f64, ratio: f64, } impl Multiples { pub fn n() -> Self { Self { depth: 0.33, width: 0.25, ratio: 2.0, } } pub fn s() -> Self { Self { depth: 0.33, width: 0.50, ratio: 2.0, } } pub fn m() -> Self { Self { depth: 0.67, width: 0.75, ratio: 1.5, } } pub fn l() -> Self { Self { depth: 1.00, width: 1.00, ratio: 1.0, } } pub fn x() -> Self { Self { depth: 1.00, width: 1.25, ratio: 1.0, } } fn filters(&self) -> (usize, usize, usize) { let f1 = (256. * self.width) as usize; let f2 = (512. * self.width) as usize; let f3 = (512. * self.width * self.ratio) as usize; (f1, f2, f3) } } #[derive(Debug)] struct Upsample { scale_factor: usize, } impl Upsample { fn new(scale_factor: usize) -> Result<Self> { Ok(Upsample { scale_factor }) } } impl Module for Upsample { fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let (_b_size, _channels, h, w) = xs.dims4()?; xs.upsample_nearest2d(self.scale_factor * h, self.scale_factor * w) } } #[derive(Debug)] struct ConvBlock { conv: Conv2d, bn: BatchNorm, } impl ConvBlock { fn load( vb: VarBuilder, c1: usize, c2: usize, k: usize, stride: usize, padding: Option<usize>, ) -> Result<Self> { let padding = padding.unwrap_or(k / 2); let cfg = Conv2dConfig { padding, stride, groups: 1, dilation: 1, }; let conv = conv2d_no_bias(c1, c2, k, cfg, vb.pp("conv"))?; let bn = batch_norm(c2, 1e-3, vb.pp("bn"))?; Ok(Self { conv, bn }) } } impl Module for ConvBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.conv.forward(xs)?.apply_t(&self.bn, false)?; candle_nn::ops::silu(&xs) } } #[derive(Debug)] struct Bottleneck { cv1: ConvBlock, cv2: ConvBlock, residual: bool, } impl Bottleneck { fn load(vb: VarBuilder, c1: usize, c2: usize, shortcut: bool) -> Result<Self> { let channel_factor = 1.; let c_ = (c2 as f64 * channel_factor) as usize; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 3, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), c_, c2, 3, 1, None)?; let residual = c1 == c2 && shortcut; Ok(Self { cv1, cv2, residual }) } } impl Module for Bottleneck { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let ys = self.cv2.forward(&self.cv1.forward(xs)?)?; if self.residual { xs + ys } else { Ok(ys) } } } #[derive(Debug)] struct C2f { cv1: ConvBlock, cv2: ConvBlock, bottleneck: Vec<Bottleneck>, } impl C2f { fn load(vb: VarBuilder, c1: usize, c2: usize, n: usize, shortcut: bool) -> Result<Self> { let c = (c2 as f64 * 0.5) as usize; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, 2 * c, 1, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), (2 + n) * c, c2, 1, 1, None)?; let mut bottleneck = Vec::with_capacity(n); for idx in 0..n { let b = Bottleneck::load(vb.pp(&format!("bottleneck.{idx}")), c, c, shortcut)?; bottleneck.push(b) } Ok(Self { cv1, cv2, bottleneck, }) } } impl Module for C2f { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let ys = self.cv1.forward(xs)?; let mut ys = ys.chunk(2, 1)?; for m in self.bottleneck.iter() { ys.push(m.forward(ys.last().unwrap())?) } let zs = Tensor::cat(ys.as_slice(), 1)?; self.cv2.forward(&zs) } } #[derive(Debug)] struct Sppf { cv1: ConvBlock, cv2: ConvBlock, k: usize, } impl Sppf { fn load(vb: VarBuilder, c1: usize, c2: usize, k: usize) -> Result<Self> { let c_ = c1 / 2; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 1, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), c_ * 4, c2, 1, 1, None)?; Ok(Self { cv1, cv2, k }) } } impl Module for Sppf { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_, _, _, _) = xs.dims4()?; let xs = self.cv1.forward(xs)?; let xs2 = xs .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; let xs3 = xs2 .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; let xs4 = xs3 .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; self.cv2.forward(&Tensor::cat(&[&xs, &xs2, &xs3, &xs4], 1)?) } } #[derive(Debug)] struct Dfl { conv: Conv2d, num_classes: usize, } impl Dfl { fn load(vb: VarBuilder, num_classes: usize) -> Result<Self> { let conv = conv2d_no_bias(num_classes, 1, 1, Default::default(), vb.pp("conv"))?; Ok(Self { conv, num_classes }) } } impl Module for Dfl { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b_sz, _channels, anchors) = xs.dims3()?; let xs = xs .reshape((b_sz, 4, self.num_classes, anchors))? .transpose(2, 1)?; let xs = candle_nn::ops::softmax(&xs, 1)?; self.conv.forward(&xs)?.reshape((b_sz, 4, anchors)) } } #[derive(Debug)] struct DarkNet { b1_0: ConvBlock, b1_1: ConvBlock, b2_0: C2f, b2_1: ConvBlock, b2_2: C2f, b3_0: ConvBlock, b3_1: C2f, b4_0: ConvBlock, b4_1: C2f, b5: Sppf, } impl DarkNet { fn load(vb: VarBuilder, m: Multiples) -> Result<Self> { let (w, r, d) = (m.width, m.ratio, m.depth); let b1_0 = ConvBlock::load(vb.pp("b1.0"), 3, (64. * w) as usize, 3, 2, Some(1))?; let b1_1 = ConvBlock::load( vb.pp("b1.1"), (64. * w) as usize, (128. * w) as usize, 3, 2, Some(1), )?; let b2_0 = C2f::load( vb.pp("b2.0"), (128. * w) as usize, (128. * w) as usize, (3. * d).round() as usize, true, )?; let b2_1 = ConvBlock::load( vb.pp("b2.1"), (128. * w) as usize, (256. * w) as usize, 3, 2, Some(1), )?; let b2_2 = C2f::load( vb.pp("b2.2"), (256. * w) as usize, (256. * w) as usize, (6. * d).round() as usize, true, )?; let b3_0 = ConvBlock::load( vb.pp("b3.0"), (256. * w) as usize, (512. * w) as usize, 3, 2, Some(1), )?; let b3_1 = C2f::load( vb.pp("b3.1"), (512. * w) as usize, (512. * w) as usize, (6. * d).round() as usize, true, )?; let b4_0 = ConvBlock::load( vb.pp("b4.0"), (512. * w) as usize, (512. * w * r) as usize, 3, 2, Some(1), )?; let b4_1 = C2f::load( vb.pp("b4.1"), (512. * w * r) as usize, (512. * w * r) as usize, (3. * d).round() as usize, true, )?; let b5 = Sppf::load( vb.pp("b5.0"), (512. * w * r) as usize, (512. * w * r) as usize, 5, )?; Ok(Self { b1_0, b1_1, b2_0, b2_1, b2_2, b3_0, b3_1, b4_0, b4_1, b5, }) } fn forward(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let x1 = self.b1_1.forward(&self.b1_0.forward(xs)?)?; let x2 = self .b2_2 .forward(&self.b2_1.forward(&self.b2_0.forward(&x1)?)?)?; let x3 = self.b3_1.forward(&self.b3_0.forward(&x2)?)?; let x4 = self.b4_1.forward(&self.b4_0.forward(&x3)?)?; let x5 = self.b5.forward(&x4)?; Ok((x2, x3, x5)) } } #[derive(Debug)] struct YoloV8Neck { up: Upsample, n1: C2f, n2: C2f, n3: ConvBlock, n4: C2f, n5: ConvBlock, n6: C2f, } impl YoloV8Neck { fn load(vb: VarBuilder, m: Multiples) -> Result<Self> { let up = Upsample::new(2)?; let (w, r, d) = (m.width, m.ratio, m.depth); let n = (3. * d).round() as usize; let n1 = C2f::load( vb.pp("n1"), (512. * w * (1. + r)) as usize, (512. * w) as usize, n, false, )?; let n2 = C2f::load( vb.pp("n2"), (768. * w) as usize, (256. * w) as usize, n, false, )?; let n3 = ConvBlock::load( vb.pp("n3"), (256. * w) as usize, (256. * w) as usize, 3, 2, Some(1), )?; let n4 = C2f::load( vb.pp("n4"), (768. * w) as usize, (512. * w) as usize, n, false, )?; let n5 = ConvBlock::load( vb.pp("n5"), (512. * w) as usize, (512. * w) as usize, 3, 2, Some(1), )?; let n6 = C2f::load( vb.pp("n6"), (512. * w * (1. + r)) as usize, (512. * w * r) as usize, n, false, )?; Ok(Self { up, n1, n2, n3, n4, n5, n6, }) } fn forward(&self, p3: &Tensor, p4: &Tensor, p5: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let x = self .n1 .forward(&Tensor::cat(&[&self.up.forward(p5)?, p4], 1)?)?; let head_1 = self .n2 .forward(&Tensor::cat(&[&self.up.forward(&x)?, p3], 1)?)?; let head_2 = self .n4 .forward(&Tensor::cat(&[&self.n3.forward(&head_1)?, &x], 1)?)?; let head_3 = self .n6 .forward(&Tensor::cat(&[&self.n5.forward(&head_2)?, p5], 1)?)?; Ok((head_1, head_2, head_3)) } } #[derive(Debug)] struct DetectionHead { dfl: Dfl, cv2: [(ConvBlock, ConvBlock, Conv2d); 3], cv3: [(ConvBlock, ConvBlock, Conv2d); 3], ch: usize, no: usize, } #[derive(Debug)] struct PoseHead { detect: DetectionHead, cv4: [(ConvBlock, ConvBlock, Conv2d); 3], kpt: (usize, usize), } fn make_anchors( xs0: &Tensor, xs1: &Tensor, xs2: &Tensor, (s0, s1, s2): (usize, usize, usize), grid_cell_offset: f64, ) -> Result<(Tensor, Tensor)> { let dev = xs0.device(); let mut anchor_points = vec![]; let mut stride_tensor = vec![]; for (xs, stride) in [(xs0, s0), (xs1, s1), (xs2, s2)] { // xs is only used to extract the h and w dimensions. let (_, _, h, w) = xs.dims4()?; let sx = (Tensor::arange(0, w as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?; let sy = (Tensor::arange(0, h as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?; let sx = sx .reshape((1, sx.elem_count()))? .repeat((h, 1))? .flatten_all()?; let sy = sy .reshape((sy.elem_count(), 1))? .repeat((1, w))? .flatten_all()?; anchor_points.push(Tensor::stack(&[&sx, &sy], D::Minus1)?); stride_tensor.push((Tensor::ones(h * w, DType::F32, dev)? * stride as f64)?); } let anchor_points = Tensor::cat(anchor_points.as_slice(), 0)?; let stride_tensor = Tensor::cat(stride_tensor.as_slice(), 0)?.unsqueeze(1)?; Ok((anchor_points, stride_tensor)) } struct DetectionHeadOut { pred: Tensor, anchors: Tensor, strides: Tensor, } fn dist2bbox(distance: &Tensor, anchor_points: &Tensor) -> Result<Tensor> { let chunks = distance.chunk(2, 1)?; let lt = &chunks[0]; let rb = &chunks[1]; let x1y1 = anchor_points.sub(lt)?; let x2y2 = anchor_points.add(rb)?; let c_xy = ((&x1y1 + &x2y2)? * 0.5)?; let wh = (&x2y2 - &x1y1)?; Tensor::cat(&[c_xy, wh], 1) } impl DetectionHead { fn load(vb: VarBuilder, nc: usize, filters: (usize, usize, usize)) -> Result<Self> { let ch = 16; let dfl = Dfl::load(vb.pp("dfl"), ch)?; let c1 = usize::max(filters.0, nc); let c2 = usize::max(filters.0 / 4, ch * 4); let cv3 = [ Self::load_cv3(vb.pp("cv3.0"), c1, nc, filters.0)?, Self::load_cv3(vb.pp("cv3.1"), c1, nc, filters.1)?, Self::load_cv3(vb.pp("cv3.2"), c1, nc, filters.2)?, ]; let cv2 = [ Self::load_cv2(vb.pp("cv2.0"), c2, ch, filters.0)?, Self::load_cv2(vb.pp("cv2.1"), c2, ch, filters.1)?, Self::load_cv2(vb.pp("cv2.2"), c2, ch, filters.2)?, ]; let no = nc + ch * 4; Ok(Self { dfl, cv2, cv3, ch, no, }) } fn load_cv3( vb: VarBuilder, c1: usize, nc: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?; let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn load_cv2( vb: VarBuilder, c2: usize, ch: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c2, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c2, c2, 3, 1, None)?; let conv = conv2d(c2, 4 * ch, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<DetectionHeadOut> { let forward_cv = |xs, i: usize| { let xs_2 = self.cv2[i].0.forward(xs)?; let xs_2 = self.cv2[i].1.forward(&xs_2)?; let xs_2 = self.cv2[i].2.forward(&xs_2)?; let xs_3 = self.cv3[i].0.forward(xs)?; let xs_3 = self.cv3[i].1.forward(&xs_3)?; let xs_3 = self.cv3[i].2.forward(&xs_3)?; Tensor::cat(&[&xs_2, &xs_3], 1) }; let xs0 = forward_cv(xs0, 0)?; let xs1 = forward_cv(xs1, 1)?; let xs2 = forward_cv(xs2, 2)?; let (anchors, strides) = make_anchors(&xs0, &xs1, &xs2, (8, 16, 32), 0.5)?; let anchors = anchors.transpose(0, 1)?.unsqueeze(0)?; let strides = strides.transpose(0, 1)?; let reshape = |xs: &Tensor| { let d = xs.dim(0)?; let el = xs.elem_count(); xs.reshape((d, self.no, el / (d * self.no))) }; let ys0 = reshape(&xs0)?; let ys1 = reshape(&xs1)?; let ys2 = reshape(&xs2)?; let x_cat = Tensor::cat(&[ys0, ys1, ys2], 2)?; let box_ = x_cat.i((.., ..self.ch * 4))?; let cls = x_cat.i((.., self.ch * 4..))?; let dbox = dist2bbox(&self.dfl.forward(&box_)?, &anchors)?; let dbox = dbox.broadcast_mul(&strides)?; let pred = Tensor::cat(&[dbox, candle_nn::ops::sigmoid(&cls)?], 1)?; Ok(DetectionHeadOut { pred, anchors, strides, }) } } impl PoseHead { // kpt: keypoints, (17, 3) // nc: num-classes, 80 fn load( vb: VarBuilder, nc: usize, kpt: (usize, usize), filters: (usize, usize, usize), ) -> Result<Self> { let detect = DetectionHead::load(vb.clone(), nc, filters)?; let nk = kpt.0 * kpt.1; let c4 = usize::max(filters.0 / 4, nk); let cv4 = [ Self::load_cv4(vb.pp("cv4.0"), c4, nk, filters.0)?, Self::load_cv4(vb.pp("cv4.1"), c4, nk, filters.1)?, Self::load_cv4(vb.pp("cv4.2"), c4, nk, filters.2)?, ]; Ok(Self { detect, cv4, kpt }) } fn load_cv4( vb: VarBuilder, c1: usize, nc: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?; let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<Tensor> { let d = self.detect.forward(xs0, xs1, xs2)?; let forward_cv = |xs: &Tensor, i: usize| { let (b_sz, _, h, w) = xs.dims4()?; let xs = self.cv4[i].0.forward(xs)?; let xs = self.cv4[i].1.forward(&xs)?; let xs = self.cv4[i].2.forward(&xs)?; xs.reshape((b_sz, self.kpt.0 * self.kpt.1, h * w)) }; let xs0 = forward_cv(xs0, 0)?; let xs1 = forward_cv(xs1, 1)?; let xs2 = forward_cv(xs2, 2)?; let xs = Tensor::cat(&[xs0, xs1, xs2], D::Minus1)?; let (b_sz, _nk, hw) = xs.dims3()?; let xs = xs.reshape((b_sz, self.kpt.0, self.kpt.1, hw))?; let ys01 = ((xs.i((.., .., 0..2))? * 2.)?.broadcast_add(&d.anchors)? - 0.5)? .broadcast_mul(&d.strides)?; let ys2 = candle_nn::ops::sigmoid(&xs.i((.., .., 2..3))?)?; let ys = Tensor::cat(&[ys01, ys2], 2)?.flatten(1, 2)?; Tensor::cat(&[d.pred, ys], 1) } } #[derive(Debug)] pub struct YoloV8 { net: DarkNet, fpn: YoloV8Neck, head: DetectionHead, } impl YoloV8 { pub fn load(vb: VarBuilder, m: Multiples, num_classes: usize) -> Result<Self> { let net = DarkNet::load(vb.pp("net"), m)?; let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?; let head = DetectionHead::load(vb.pp("head"), num_classes, m.filters())?; Ok(Self { net, fpn, head }) } } impl Module for YoloV8 { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (xs1, xs2, xs3) = self.net.forward(xs)?; let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?; Ok(self.head.forward(&xs1, &xs2, &xs3)?.pred) } } #[derive(Debug)] pub struct YoloV8Pose { net: DarkNet, fpn: YoloV8Neck, head: PoseHead, } impl YoloV8Pose { pub fn load( vb: VarBuilder, m: Multiples, num_classes: usize, kpt: (usize, usize), ) -> Result<Self> { let net = DarkNet::load(vb.pp("net"), m)?; let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?; let head = PoseHead::load(vb.pp("head"), num_classes, kpt, m.filters())?; Ok(Self { net, fpn, head }) } } impl Module for YoloV8Pose { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (xs1, xs2, xs3) = self.net.forward(xs)?; let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?; self.head.forward(&xs1, &xs2, &xs3) } } #[derive(Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize)] pub struct KeyPoint { pub x: f32, pub y: f32, pub mask: f32, } #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub struct Bbox { pub xmin: f32, pub ymin: f32, pub xmax: f32, pub ymax: f32, pub confidence: f32, pub keypoints: Vec<KeyPoint>, } // Intersection over union of two bounding boxes. fn iou(b1: &Bbox, b2: &Bbox) -> f32 { let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.); let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.); let i_xmin = b1.xmin.max(b2.xmin); let i_xmax = b1.xmax.min(b2.xmax); let i_ymin = b1.ymin.max(b2.ymin); let i_ymax = b1.ymax.min(b2.ymax); let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.); i_area / (b1_area + b2_area - i_area) } pub fn report_detect( pred: &Tensor, img: DynamicImage, w: usize, h: usize, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Vec<Bbox>>> { let (pred_size, npreds) = pred.dims2()?; let nclasses = pred_size - 4; let conf_threshold = conf_threshold.clamp(0.0, 1.0); let iou_threshold = iou_threshold.clamp(0.0, 1.0); // The bounding boxes grouped by (maximum) class index. let mut bboxes: Vec<Vec<Bbox>> = (0..nclasses).map(|_| vec![]).collect(); // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = *pred[4..].iter().max_by(|x, y| x.total_cmp(y)).unwrap(); if confidence > conf_threshold { let mut class_index = 0; for i in 0..nclasses { if pred[4 + i] > pred[4 + class_index] { class_index = i } } if pred[class_index + 4] > 0. { let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, keypoints: vec![], }; bboxes[class_index].push(bbox) } } } non_maximum_suppression(&mut bboxes, iou_threshold); // Annotate the original image and print boxes information. let (initial_h, initial_w) = (img.height() as f32, img.width() as f32); let w_ratio = initial_w / w as f32; let h_ratio = initial_h / h as f32; for (class_index, bboxes_for_class) in bboxes.iter_mut().enumerate() { for b in bboxes_for_class.iter_mut() { crate::console_log!("{}: {:?}", crate::coco_classes::NAMES[class_index], b); b.xmin = (b.xmin * w_ratio).clamp(0., initial_w - 1.); b.ymin = (b.ymin * h_ratio).clamp(0., initial_h - 1.); b.xmax = (b.xmax * w_ratio).clamp(0., initial_w - 1.); b.ymax = (b.ymax * h_ratio).clamp(0., initial_h - 1.); } } Ok(bboxes) } fn non_maximum_suppression(bboxes: &mut [Vec<Bbox>], threshold: f32) { // Perform non-maximum suppression. for bboxes_for_class in bboxes.iter_mut() { bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); let mut current_index = 0; for index in 0..bboxes_for_class.len() { let mut drop = false; for prev_index in 0..current_index { let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]); if iou > threshold { drop = true; break; } } if !drop { bboxes_for_class.swap(current_index, index); current_index += 1; } } bboxes_for_class.truncate(current_index); } } pub fn report_pose( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, ) -> Result<Vec<Bbox>> { let (pred_size, npreds) = pred.dims2()?; if pred_size != 17 * 3 + 4 + 1 { candle::bail!("unexpected pred-size {pred_size}"); } let mut bboxes = vec![]; // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = pred[4]; if confidence > confidence_threshold { let keypoints = (0..17) .map(|i| KeyPoint { x: pred[3 * i + 5], y: pred[3 * i + 6], mask: pred[3 * i + 7], }) .collect::<Vec<_>>(); let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, keypoints, }; bboxes.push(bbox) } } let mut bboxes = vec![bboxes]; non_maximum_suppression(&mut bboxes, nms_threshold); let mut bboxes = bboxes.into_iter().next().unwrap(); let (initial_h, initial_w) = (img.height() as f32, img.width() as f32); let w_ratio = initial_w / w as f32; let h_ratio = initial_h / h as f32; for b in bboxes.iter_mut() { crate::console_log!("detected {b:?}"); b.xmin = (b.xmin * w_ratio).clamp(0., initial_w - 1.); b.ymin = (b.ymin * h_ratio).clamp(0., initial_h - 1.); b.xmax = (b.xmax * w_ratio).clamp(0., initial_w - 1.); b.ymax = (b.ymax * h_ratio).clamp(0., initial_h - 1.); for kp in b.keypoints.iter_mut() { kp.x = (kp.x * w_ratio).clamp(0., initial_w - 1.); kp.y = (kp.y * h_ratio).clamp(0., initial_h - 1.); } } Ok(bboxes) }
candle/candle-wasm-examples/yolo/src/model.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/model.rs", "repo_id": "candle", "token_count": 14731 }
55
# Prompt templates These are the templates used to format the conversation history for different models used in HuggingChat. Set them in your `.env.local` [like so](https://github.com/huggingface/chat-ui#chatprompttemplate). ## Llama 2 ```env <s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}} ``` ## CodeLlama ```env <s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}} ``` ## Falcon ```env System: {{preprompt}}\nUser:{{#each messages}}{{#ifUser}}{{content}}\nFalcon:{{/ifUser}}{{#ifAssistant}}{{content}}\nUser:{{/ifAssistant}}{{/each}} ``` ## Mistral ```env <s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s> {{/ifAssistant}}{{/each}} ``` ## Zephyr ```env <|system|>\n{{preprompt}}</s>\n{{#each messages}}{{#ifUser}}<|user|>\n{{content}}</s>\n<|assistant|>\n{{/ifUser}}{{#ifAssistant}}{{content}}</s>\n{{/ifAssistant}}{{/each}} ``` ## IDEFICS ```env {{#each messages}}{{#ifUser}}User: {{content}}{{/ifUser}}<end_of_utterance>\nAssistant: {{#ifAssistant}}{{content}}\n{{/ifAssistant}}{{/each}} ``` ## OpenChat ```env <s>{{#each messages}}{{#ifUser}}GPT4 User: {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<|end_of_turn|>GPT4 Assistant: {{/ifUser}}{{#ifAssistant}}{{content}}<|end_of_turn|>{{/ifAssistant}}{{/each}} ``` ## Mixtral ```env <s> {{#each messages}}{{#ifUser}}[INST]{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}} {{content}}</s> {{/ifAssistant}}{{/each}} ``` ## ChatML ```env {{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}} ``` ## CodeLlama 70B ```env <s>{{#if @root.preprompt}}Source: system\n\n {{@root.preprompt}} <step> {{/if}}{{#each messages}}{{#ifUser}}Source: user\n\n {{content}} <step> {{/ifUser}}{{#ifAssistant}}Source: assistant\n\n {{content}} <step> {{/ifAssistant}}{{/each}}Source: assistant\nDestination: user\n\n `` ``` ## Gemma ```env {{#each messages}}{{#ifUser}}<start_of_turn>user\n{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<end_of_turn>\n<start_of_turn>model\n{{/ifUser}}{{#ifAssistant}}{{content}}<end_of_turn>\n{{/ifAssistant}}{{/each}} ```
chat-ui/PROMPTS.md/0
{ "file_path": "chat-ui/PROMPTS.md", "repo_id": "chat-ui", "token_count": 1133 }
56
# Text Embedding Models By default (for backward compatibility), when `TEXT_EMBEDDING_MODELS` environment variable is not defined, [transformers.js](https://huggingface.co/docs/transformers.js) embedding models will be used for embedding tasks, specifically, the [Xenova/gte-small](https://huggingface.co/Xenova/gte-small) model. You can customize the embedding model by setting `TEXT_EMBEDDING_MODELS` in your `.env.local` file where the required fields are `name`, `chunkCharLength` and `endpoints`. Supported text embedding backends are: [`transformers.js`](https://huggingface.co/docs/transformers.js), [`TEI`](https://github.com/huggingface/text-embeddings-inference) and [`OpenAI`](https://platform.openai.com/docs/guides/embeddings). `transformers.js` models run locally as part of `chat-ui`, whereas `TEI` models run in a different environment & accessed through an API endpoint. `openai` models are accessed through the [OpenAI API](https://platform.openai.com/docs/guides/embeddings). When more than one embedding models are supplied in `.env.local` file, the first will be used by default, and the others will only be used on LLM's which configured `embeddingModel` to the name of the model. ## Transformers.js The Transformers.js backend uses local CPU for the embedding which can be quite slow. If possible, consider using TEI or OpenAI embeddings instead if you use web search frequently, as performance will improve significantly. ```ini TEXT_EMBEDDING_MODELS = `[ { "name": "Xenova/gte-small", "displayName": "Xenova/gte-small", "description": "locally running embedding", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]` ``` ## Text Embeddings Inference (TEI) > Text Embeddings Inference (TEI) is a comprehensive toolkit designed for efficient deployment and serving of open source text embeddings models. It enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE, and E5. Some recommended models at the time of writing (May 2024) are `Snowflake/snowflake-arctic-embed-m` and `BAAI/bge-large-en-v1.5`. You may run TEI locally with GPU support via Docker: `docker run --gpus all -p 8080:80 -v tei-data:/data --name tei ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id YOUR/HF_MODEL` You can then hook this up to your Chat UI instance with the following configuration. ```ini TEXT_EMBEDDING_MODELS=`[ { "name": "YOUR/HF_MODEL", "displayName": "YOUR/HF_MODEL", "preQuery": "Check the model documentation for the preQuery. Not all models have one", "prePassage": "Check the model documentation for the prePassage. Not all models have one", "chunkCharLength": 512, "endpoints": [{ "type": "tei", "url": "http://127.0.0.1:8080/" }] } ]` ``` Examples for `Snowflake/snowflake-arctic-embed-m` and `BAAI/bge-large-en-v1.5`: ```ini TEXT_EMBEDDING_MODELS=`[ { "name": "Snowflake/snowflake-arctic-embed-m", "displayName": "Snowflake/snowflake-arctic-embed-m", "preQuery": "Represent this sentence for searching relevant passages: ", "chunkCharLength": 512, "endpoints": [{ "type": "tei", "url": "http://127.0.0.1:8080/" }] },{ "name": "BAAI/bge-large-en-v1.5", "displayName": "BAAI/bge-large-en-v1.5", "chunkCharLength": 512, "endpoints": [{ "type": "tei", "url": "http://127.0.0.1:8080/" }] } ]` ``` ## OpenAI Please contribute an example!
chat-ui/docs/source/configuration/embeddings.md/0
{ "file_path": "chat-ui/docs/source/configuration/embeddings.md", "repo_id": "chat-ui", "token_count": 1246 }
57
# Configuration Overview Chat UI handles configuration with environment variables. The default config for Chat UI is stored in the `.env` file, which you may use as a reference. You will need to override some values to get Chat UI to run locally. This can be done in `.env.local` or via your environment. The bare minimum configuration to get Chat UI running is: ```ini MONGODB_URL=mongodb://localhost:27017 HF_TOKEN=your_token ``` The following sections detail various sections of the app you may want to configure.
chat-ui/docs/source/configuration/overview.md/0
{ "file_path": "chat-ui/docs/source/configuration/overview.md", "repo_id": "chat-ui", "token_count": 130 }
58
import fs from "fs"; import yaml from "js-yaml"; const file = fs.readFileSync("chart/env/prod.yaml", "utf8"); // have to do a weird stringify/parse because of some node error const prod = JSON.parse(JSON.stringify(yaml.load(file))); const vars = prod.envVars as Record<string, string>; let PUBLIC_CONFIG = ""; Object.entries(vars).forEach(([key, value]) => { PUBLIC_CONFIG += `${key}=\`${value}\`\n`; }); const SECRET_CONFIG = (fs.existsSync(".env.SECRET_CONFIG") ? fs.readFileSync(".env.SECRET_CONFIG", "utf8") : process.env.SECRET_CONFIG) ?? ""; // Prepend the content of the env variable SECRET_CONFIG const full_config = `${PUBLIC_CONFIG}\n${SECRET_CONFIG}`; // Write full_config to .env.local fs.writeFileSync(".env.local", full_config);
chat-ui/scripts/updateLocalEnv.ts/0
{ "file_path": "chat-ui/scripts/updateLocalEnv.ts", "repo_id": "chat-ui", "token_count": 288 }
59
<script lang="ts"> export let label = ""; </script> <div class="group/tooltip md:relative"> <slot /> <div class="invisible absolute z-10 w-64 whitespace-normal rounded-md bg-black p-2 text-center text-white group-hover/tooltip:visible group-active/tooltip:visible max-sm:left-1/2 max-sm:-translate-x-1/2" > {label} </div> </div>
chat-ui/src/lib/components/HoverTooltip.svelte/0
{ "file_path": "chat-ui/src/lib/components/HoverTooltip.svelte", "repo_id": "chat-ui", "token_count": 132 }
60
<script lang="ts"> import { fade } from "svelte/transition"; import IconDazzled from "$lib/components/icons/IconDazzled.svelte"; export let message = ""; </script> <div transition:fade|global={{ duration: 300 }} class="pointer-events-none fixed right-0 top-12 z-20 bg-gradient-to-bl from-red-500/20 via-red-500/0 to-red-500/0 pb-36 pl-36 pr-2 pt-2 md:top-0 md:pr-8 md:pt-5" > <div class="pointer-events-auto flex items-center rounded-full bg-white/90 px-3 py-1 shadow-sm dark:bg-gray-900/80" > <IconDazzled classNames="text-2xl mr-2" /> <h2 class="font-semibold">{message}</h2> </div> </div>
chat-ui/src/lib/components/Toast.svelte/0
{ "file_path": "chat-ui/src/lib/components/Toast.svelte", "repo_id": "chat-ui", "token_count": 259 }
61
<script lang="ts"> export let classNames = ""; </script> <svg width="1em" height="1em" viewBox="0 0 15 6" class={classNames} fill="none" xmlns="http://www.w3.org/2000/svg" > <path d="M1.67236 1L7.67236 7L13.6724 1" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" /> </svg>
chat-ui/src/lib/components/icons/IconChevron.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconChevron.svelte", "repo_id": "chat-ui", "token_count": 156 }
62
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId, type AnyBulkWriteOperation } from "mongodb"; import type { Assistant } from "$lib/types/Assistant"; import { generateSearchTokens } from "$lib/utils/searchTokens"; const migration: Migration = { _id: new ObjectId("5f9f3e3e3e3e3e3e3e3e3e3e"), name: "Update search assistants", up: async () => { const { assistants } = collections; let ops: AnyBulkWriteOperation<Assistant>[] = []; for await (const assistant of assistants .find() .project<Pick<Assistant, "_id" | "name">>({ _id: 1, name: 1 })) { ops.push({ updateOne: { filter: { _id: assistant._id, }, update: { $set: { searchTokens: generateSearchTokens(assistant.name), }, }, }, }); if (ops.length >= 1000) { process.stdout.write("."); await assistants.bulkWrite(ops, { ordered: false }); ops = []; } } if (ops.length) { await assistants.bulkWrite(ops, { ordered: false }); } return true; }, down: async () => { const { assistants } = collections; await assistants.updateMany({}, { $unset: { searchTokens: "" } }); return true; }, }; export default migration;
chat-ui/src/lib/migrations/routines/01-update-search-assistants.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/01-update-search-assistants.ts", "repo_id": "chat-ui", "token_count": 483 }
63
import { env } from "$env/dynamic/private"; import { z } from "zod"; import { sum } from "$lib/utils/sum"; import { embeddingEndpoints, embeddingEndpointSchema, type EmbeddingEndpoint, } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; import { embeddingEndpointTransformersJS } from "$lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints"; import JSON5 from "json5"; const modelConfig = z.object({ /** Used as an identifier in DB */ id: z.string().optional(), /** Used to link to the model page, and for inference */ name: z.string().min(1), displayName: z.string().min(1).optional(), description: z.string().min(1).optional(), websiteUrl: z.string().url().optional(), modelUrl: z.string().url().optional(), endpoints: z.array(embeddingEndpointSchema).nonempty(), chunkCharLength: z.number().positive(), maxBatchSize: z.number().positive().optional(), preQuery: z.string().default(""), prePassage: z.string().default(""), }); // Default embedding model for backward compatibility const rawEmbeddingModelJSON = env.TEXT_EMBEDDING_MODELS || `[ { "name": "Xenova/gte-small", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]`; const embeddingModelsRaw = z.array(modelConfig).parse(JSON5.parse(rawEmbeddingModelJSON)); const processEmbeddingModel = async (m: z.infer<typeof modelConfig>) => ({ ...m, id: m.id || m.name, }); const addEndpoint = (m: Awaited<ReturnType<typeof processEmbeddingModel>>) => ({ ...m, getEndpoint: async (): Promise<EmbeddingEndpoint> => { if (!m.endpoints) { return embeddingEndpointTransformersJS({ type: "transformersjs", weight: 1, model: m, }); } const totalWeight = sum(m.endpoints.map((e) => e.weight)); let random = Math.random() * totalWeight; for (const endpoint of m.endpoints) { if (random < endpoint.weight) { const args = { ...endpoint, model: m }; switch (args.type) { case "tei": return embeddingEndpoints.tei(args); case "transformersjs": return embeddingEndpoints.transformersjs(args); case "openai": return embeddingEndpoints.openai(args); case "hfapi": return embeddingEndpoints.hfapi(args); default: throw new Error(`Unknown endpoint type: ${args}`); } } random -= endpoint.weight; } throw new Error(`Failed to select embedding endpoint`); }, }); export const embeddingModels = await Promise.all( embeddingModelsRaw.map((e) => processEmbeddingModel(e).then(addEndpoint)) ); export const defaultEmbeddingModel = embeddingModels[0]; const validateEmbeddingModel = (_models: EmbeddingBackendModel[], key: "id" | "name") => { return z.enum([_models[0][key], ..._models.slice(1).map((m) => m[key])]); }; export const validateEmbeddingModelById = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "id"); }; export const validateEmbeddingModelByName = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "name"); }; export type EmbeddingBackendModel = typeof defaultEmbeddingModel;
chat-ui/src/lib/server/embeddingModels.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingModels.ts", "repo_id": "chat-ui", "token_count": 1115 }
64
import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type OpenAI from "openai"; import type { Stream } from "openai/streaming"; /** * Transform a stream of OpenAI.Chat.ChatCompletion into a stream of TextGenerationStreamOutput */ export async function* openAIChatToTextGenerationStream( completionStream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk> ) { let generatedText = ""; let tokenId = 0; for await (const completion of completionStream) { const { choices } = completion; const content = choices[0]?.delta?.content ?? ""; const last = choices[0]?.finish_reason === "stop" || choices[0]?.finish_reason === "length"; if (content) { generatedText = generatedText + content; } const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: content ?? "", logprob: 0, special: last, }, generated_text: last ? generatedText : null, details: null, }; yield output; } }
chat-ui/src/lib/server/endpoints/openai/openAIChatToTextGenerationStream.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/openai/openAIChatToTextGenerationStream.ts", "repo_id": "chat-ui", "token_count": 335 }
65
import { runWebSearch } from "$lib/server/websearch/runWebSearch"; import { preprocessMessages } from "../endpoints/preprocessMessages"; import { generateTitleForConversation } from "./title"; import { assistantHasDynamicPrompt, assistantHasWebSearch, getAssistantById, processPreprompt, } from "./assistant"; import { getTools, runTools } from "./tools"; import type { WebSearch } from "$lib/types/WebSearch"; import { type MessageUpdate, MessageUpdateType, MessageUpdateStatus, } from "$lib/types/MessageUpdate"; import { generate } from "./generate"; import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators"; import type { TextGenerationContext } from "./types"; import type { ToolResult } from "$lib/types/Tool"; import { toolHasName } from "../tools/utils"; async function* keepAlive(done: AbortSignal): AsyncGenerator<MessageUpdate, undefined, undefined> { while (!done.aborted) { yield { type: MessageUpdateType.Status, status: MessageUpdateStatus.KeepAlive, }; await new Promise((resolve) => setTimeout(resolve, 5000)); } } export async function* textGeneration(ctx: TextGenerationContext) { const done = new AbortController(); const titleGen = generateTitleForConversation(ctx.conv); const textGen = textGenerationWithoutTitle(ctx, done); const keepAliveGen = keepAlive(done.signal); // keep alive until textGen is done yield* mergeAsyncGenerators([titleGen, textGen, keepAliveGen]); } async function* textGenerationWithoutTitle( ctx: TextGenerationContext, done: AbortController ): AsyncGenerator<MessageUpdate, undefined, undefined> { yield { type: MessageUpdateType.Status, status: MessageUpdateStatus.Started, }; ctx.assistant ??= await getAssistantById(ctx.conv.assistantId); const { model, conv, messages, assistant, isContinue, webSearch, toolsPreference } = ctx; const convId = conv._id; let webSearchResult: WebSearch | undefined; // run websearch if: // - it's not continuing a previous message // - AND the model doesn't support tools and websearch is selected // - OR the assistant has websearch enabled (no tools for assistants for now) if ( !isContinue && ((!model.tools && webSearch && !conv.assistantId) || assistantHasWebSearch(assistant)) ) { webSearchResult = yield* runWebSearch(conv, messages, assistant?.rag); } let preprompt = conv.preprompt; if (assistantHasDynamicPrompt(assistant) && preprompt) { preprompt = await processPreprompt(preprompt); if (messages[0].from === "system") messages[0].content = preprompt; } let toolResults: ToolResult[] = []; if (model.tools) { const tools = await getTools(toolsPreference, ctx.assistant); const toolCallsRequired = tools.some((tool) => !toolHasName("directly_answer", tool)); if (toolCallsRequired) toolResults = yield* runTools(ctx, tools, preprompt); } const processedMessages = await preprocessMessages(messages, webSearchResult, convId); yield* generate({ ...ctx, messages: processedMessages }, toolResults, preprompt); done.abort(); }
chat-ui/src/lib/server/textGeneration/index.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/index.ts", "repo_id": "chat-ui", "token_count": 934 }
66
import type { SerializedHTMLElement } from "../scrape/types"; import { htmlElementToMarkdownElements, mergeAdjacentElements } from "./fromHtml"; import type { HeaderElement, MarkdownElement } from "./types"; import { MarkdownElementType } from "./types"; import { chunkElements } from "./utils/chunk"; /** * Converts HTML elements to Markdown elements and creates a tree based on header tags * For example: h1 [h2 [p p blockquote] h2 [h3 [...] ] ] **/ export function htmlToMarkdownTree( title: string, htmlElements: SerializedHTMLElement[], maxCharsPerElem: number ): HeaderElement { let parent: HeaderElement = { type: MarkdownElementType.Header, level: 1, parent: null, content: title, children: [], }; const markdownElements = chunkElements( mergeAdjacentElements( htmlElements.flatMap((elem) => htmlElementToMarkdownElements(parent, elem)) ), maxCharsPerElem ); for (const elem of markdownElements) { if (elem.type !== MarkdownElementType.Header) { elem.parent = parent; parent.children.push(elem); continue; } // add 1 to current level to offset for the title being level 1 elem.level += 1; // Pop up header levels until reaching the same level as the current header // or until we reach the root inner: while (parent !== null && parent.parent !== null) { if (parent.level < elem.level) break inner; parent = parent.parent; } parent.children.push(elem); parent = elem; } // Pop up to the root while (parent.parent !== null) { parent = parent.parent; } return parent; } export function removeParents<T extends MarkdownElement>(elem: T): T { if ("children" in elem) { return { ...elem, parent: null, children: elem.children.map((child) => removeParents(child)) }; } return { ...elem, parent: null }; }
chat-ui/src/lib/server/websearch/markdown/tree.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/tree.ts", "repo_id": "chat-ui", "token_count": 613 }
67
import { env } from "$env/dynamic/private"; import type { WebSearchSource } from "$lib/types/WebSearch"; export default async function search(query: string): Promise<WebSearchSource[]> { const params = { q: query, hl: "en", gl: "us", }; const response = await fetch("https://google.serper.dev/search", { method: "POST", body: JSON.stringify(params), headers: { "x-api-key": env.SERPER_API_KEY, "Content-type": "application/json", }, }); /* eslint-disable @typescript-eslint/no-explicit-any */ const data = (await response.json()) as Record<string, any>; if (!response.ok) { throw new Error( data["message"] ?? `Serper API returned error code ${response.status} - ${response.statusText}` ); } return data["organic"] ?? []; }
chat-ui/src/lib/server/websearch/search/endpoints/serper.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serper.ts", "repo_id": "chat-ui", "token_count": 282 }
68
import type { ObjectId } from "mongodb"; import type { User } from "./User"; import type { Timestamps } from "./Timestamps"; export interface Assistant extends Timestamps { _id: ObjectId; createdById: User["_id"] | string; // user id or session createdByName?: User["username"]; avatar?: string; name: string; description?: string; modelId: string; exampleInputs: string[]; preprompt: string; userCount?: number; featured?: boolean; rag?: { allowAllDomains: boolean; allowedDomains: string[]; allowedLinks: string[]; }; generateSettings?: { temperature?: number; top_p?: number; repetition_penalty?: number; top_k?: number; }; dynamicPrompt?: boolean; searchTokens: string[]; last24HoursCount: number; tools?: string[]; } // eslint-disable-next-line no-shadow export enum SortKey { POPULAR = "popular", TRENDING = "trending", }
chat-ui/src/lib/types/Assistant.ts/0
{ "file_path": "chat-ui/src/lib/types/Assistant.ts", "repo_id": "chat-ui", "token_count": 306 }
69
export interface Timestamps { createdAt: Date; updatedAt: Date; }
chat-ui/src/lib/types/Timestamps.ts/0
{ "file_path": "chat-ui/src/lib/types/Timestamps.ts", "repo_id": "chat-ui", "token_count": 23 }
70
import type { Conversation } from "$lib/types/Conversation"; import { sha256 } from "./sha256"; export async function hashConv(conv: Conversation) { // messages contains the conversation message but only the immutable part const messages = conv.messages.map((message) => { return (({ from, id, content, webSearchId }) => ({ from, id, content, webSearchId }))(message); }); const hash = await sha256(JSON.stringify(messages)); return hash; }
chat-ui/src/lib/utils/hashConv.ts/0
{ "file_path": "chat-ui/src/lib/utils/hashConv.ts", "repo_id": "chat-ui", "token_count": 132 }
71
import type { Tool } from "$lib/types/Tool"; /** * Checks if a tool's name equals a value. Replaces all hyphens with underscores before comparison * since some models return underscores even when hyphens are used in the request. **/ export function toolHasName(name: string, tool: Pick<Tool, "name">): boolean { return tool.name.replaceAll("-", "_") === name.replaceAll("-", "_"); } export const colors = ["purple", "blue", "green", "yellow", "red"] as const; export const icons = [ "wikis", "tools", "camera", "code", "email", "cloud", "terminal", "game", "chat", "speaker", "video", ] as const;
chat-ui/src/lib/utils/tools.ts/0
{ "file_path": "chat-ui/src/lib/utils/tools.ts", "repo_id": "chat-ui", "token_count": 202 }
72
import { env } from "$env/dynamic/private"; import { collections } from "$lib/server/database"; import type { Message } from "$lib/types/Message"; import { error } from "@sveltejs/kit"; import { pathToFileURL } from "node:url"; import { unlink } from "node:fs/promises"; import { uploadFile } from "@huggingface/hub"; import parquet from "parquetjs"; import { z } from "zod"; import { logger } from "$lib/server/logger.js"; // Triger like this: // curl -X POST "http://localhost:5173/chat/admin/export" -H "Authorization: Bearer <ADMIN_API_SECRET>" -H "Content-Type: application/json" -d '{"model": "OpenAssistant/oasst-sft-6-llama-30b-xor"}' export async function POST({ request }) { if (!env.PARQUET_EXPORT_DATASET || !env.PARQUET_EXPORT_HF_TOKEN) { error(500, "Parquet export is not configured."); } const { model } = z .object({ model: z.string(), }) .parse(await request.json()); const schema = new parquet.ParquetSchema({ title: { type: "UTF8" }, created_at: { type: "TIMESTAMP_MILLIS" }, updated_at: { type: "TIMESTAMP_MILLIS" }, messages: { repeated: true, fields: { from: { type: "UTF8" }, content: { type: "UTF8" }, score: { type: "INT_8", optional: true }, }, }, }); const fileName = `/tmp/conversations-${new Date().toJSON().slice(0, 10)}-${Date.now()}.parquet`; const writer = await parquet.ParquetWriter.openFile(schema, fileName); let count = 0; logger.info("Exporting conversations for model", model); for await (const conversation of collections.settings.aggregate<{ title: string; created_at: Date; updated_at: Date; messages: Message[]; }>([ { $match: { shareConversationsWithModelAuthors: true, sessionId: { $exists: true }, userId: { $exists: false }, }, }, { $lookup: { from: "conversations", localField: "sessionId", foreignField: "sessionId", as: "conversations", pipeline: [{ $match: { model, userId: { $exists: false } } }], }, }, { $unwind: "$conversations" }, { $project: { title: "$conversations.title", created_at: "$conversations.createdAt", updated_at: "$conversations.updatedAt", messages: "$conversations.messages", }, }, ])) { await writer.appendRow({ title: conversation.title, created_at: conversation.created_at, updated_at: conversation.updated_at, messages: conversation.messages.map((message: Message) => ({ from: message.from, content: message.content, ...(message.score ? { score: message.score } : undefined), })), }); ++count; if (count % 1_000 === 0) { logger.info("Exported", count, "conversations"); } } logger.info("exporting convos with userId"); for await (const conversation of collections.settings.aggregate<{ title: string; created_at: Date; updated_at: Date; messages: Message[]; }>([ { $match: { shareConversationsWithModelAuthors: true, userId: { $exists: true } } }, { $lookup: { from: "conversations", localField: "userId", foreignField: "userId", as: "conversations", pipeline: [{ $match: { model } }], }, }, { $unwind: "$conversations" }, { $project: { title: "$conversations.title", created_at: "$conversations.createdAt", updated_at: "$conversations.updatedAt", messages: "$conversations.messages", }, }, ])) { await writer.appendRow({ title: conversation.title, created_at: conversation.created_at, updated_at: conversation.updated_at, messages: conversation.messages.map((message: Message) => ({ from: message.from, content: message.content, ...(message.score ? { score: message.score } : undefined), })), }); ++count; if (count % 1_000 === 0) { logger.info("Exported", count, "conversations"); } } await writer.close(); logger.info("Uploading", fileName, "to Hugging Face Hub"); await uploadFile({ file: pathToFileURL(fileName) as URL, credentials: { accessToken: env.PARQUET_EXPORT_HF_TOKEN }, repo: { type: "dataset", name: env.PARQUET_EXPORT_DATASET, }, }); logger.info("Upload done"); await unlink(fileName); return new Response(); }
chat-ui/src/routes/admin/export/+server.ts/0
{ "file_path": "chat-ui/src/routes/admin/export/+server.ts", "repo_id": "chat-ui", "token_count": 1661 }
73
import { base } from "$app/paths"; import { env } from "$env/dynamic/private"; import { Database, collections } from "$lib/server/database.js"; import { SortKey, type Assistant } from "$lib/types/Assistant"; import type { User } from "$lib/types/User"; import { generateQueryTokens } from "$lib/utils/searchTokens.js"; import { error, redirect } from "@sveltejs/kit"; import type { Filter } from "mongodb"; const NUM_PER_PAGE = 24; export const load = async ({ url, locals }) => { if (!env.ENABLE_ASSISTANTS) { redirect(302, `${base}/`); } const modelId = url.searchParams.get("modelId"); const pageIndex = parseInt(url.searchParams.get("p") ?? "0"); const username = url.searchParams.get("user"); const query = url.searchParams.get("q")?.trim() ?? null; const sort = url.searchParams.get("sort")?.trim() ?? SortKey.TRENDING; const createdByCurrentUser = locals.user?.username && locals.user.username === username; let user: Pick<User, "_id"> | null = null; if (username) { user = await collections.users.findOne<Pick<User, "_id">>( { username }, { projection: { _id: 1 } } ); if (!user) { error(404, `User "${username}" doesn't exist`); } } // if there is no user, we show community assistants, so only show featured assistants const shouldBeFeatured = env.REQUIRE_FEATURED_ASSISTANTS === "true" && !user && !locals.user?.isAdmin ? { featured: true } : {}; // if the user queried is not the current user, only show "public" assistants that have been shared before const shouldHaveBeenShared = env.REQUIRE_FEATURED_ASSISTANTS === "true" && !createdByCurrentUser && !locals.user?.isAdmin ? { userCount: { $gt: 1 } } : {}; // fetch the top assistants sorted by user count from biggest to smallest. filter by model too if modelId is provided or query if query is provided const filter: Filter<Assistant> = { ...(modelId && { modelId }), ...(user && { createdById: user._id }), ...(query && { searchTokens: { $all: generateQueryTokens(query) } }), ...shouldBeFeatured, ...shouldHaveBeenShared, }; const assistants = await Database.getInstance() .getCollections() .assistants.find(filter) .sort({ ...(sort === SortKey.TRENDING && { last24HoursCount: -1 }), userCount: -1, _id: 1, }) .skip(NUM_PER_PAGE * pageIndex) .limit(NUM_PER_PAGE) .toArray(); const numTotalItems = await Database.getInstance() .getCollections() .assistants.countDocuments(filter); return { assistants: JSON.parse(JSON.stringify(assistants)) as Array<Assistant>, selectedModel: modelId ?? "", numTotalItems, numItemsPerPage: NUM_PER_PAGE, query, sort, }; };
chat-ui/src/routes/assistants/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/assistants/+page.server.ts", "repo_id": "chat-ui", "token_count": 934 }
74
import { refreshSessionCookie } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { DEFAULT_SETTINGS } from "$lib/types/Settings"; import { z } from "zod"; import type { UserinfoResponse } from "openid-client"; import { error, type Cookies } from "@sveltejs/kit"; import crypto from "crypto"; import { sha256 } from "$lib/utils/sha256"; import { addWeeks } from "date-fns"; import { OIDConfig } from "$lib/server/auth"; import { env } from "$env/dynamic/private"; import { logger } from "$lib/server/logger"; export async function updateUser(params: { userData: UserinfoResponse; locals: App.Locals; cookies: Cookies; userAgent?: string; ip?: string; }) { const { userData, locals, cookies, userAgent, ip } = params; // Microsoft Entra v1 tokens do not provide preferred_username, instead the username is provided in the upn // claim. See https://learn.microsoft.com/en-us/entra/identity-platform/access-token-claims-reference if (!userData.preferred_username && userData.upn) { userData.preferred_username = userData.upn as string; } const { preferred_username: username, name, email, picture: avatarUrl, sub: hfUserId, orgs, } = z .object({ preferred_username: z.string().optional(), name: z.string(), picture: z.string().optional(), sub: z.string(), email: z.string().email().optional(), orgs: z .array( z.object({ sub: z.string(), name: z.string(), picture: z.string(), preferred_username: z.string(), isEnterprise: z.boolean(), }) ) .optional(), }) .setKey(OIDConfig.NAME_CLAIM, z.string()) .refine((data) => data.preferred_username || data.email, { message: "Either preferred_username or email must be provided by the provider.", }) .transform((data) => ({ ...data, name: data[OIDConfig.NAME_CLAIM], })) .parse(userData) as { preferred_username?: string; email?: string; picture?: string; sub: string; name: string; orgs?: Array<{ sub: string; name: string; picture: string; preferred_username: string; isEnterprise: boolean; }>; } & Record<string, string>; // Dynamically access user data based on NAME_CLAIM from environment // This approach allows us to adapt to different OIDC providers flexibly. logger.info( { login_username: username, login_name: name, login_email: email, login_orgs: orgs?.map((el) => el.sub), }, "user login" ); // if using huggingface as auth provider, check orgs for earl access and amin rights const isAdmin = (env.HF_ORG_ADMIN && orgs?.some((org) => org.sub === env.HF_ORG_ADMIN)) || false; const isEarlyAccess = (env.HF_ORG_EARLY_ACCESS && orgs?.some((org) => org.sub === env.HF_ORG_EARLY_ACCESS)) || false; logger.debug( { isAdmin, isEarlyAccess, hfUserId, }, `Updating user ${hfUserId}` ); // check if user already exists const existingUser = await collections.users.findOne({ hfUserId }); let userId = existingUser?._id; // update session cookie on login const previousSessionId = locals.sessionId; const secretSessionId = crypto.randomUUID(); const sessionId = await sha256(secretSessionId); if (await collections.sessions.findOne({ sessionId })) { error(500, "Session ID collision"); } locals.sessionId = sessionId; if (existingUser) { // update existing user if any await collections.users.updateOne( { _id: existingUser._id }, { $set: { username, name, avatarUrl, isAdmin, isEarlyAccess } } ); // remove previous session if it exists and add new one await collections.sessions.deleteOne({ sessionId: previousSessionId }); await collections.sessions.insertOne({ _id: new ObjectId(), sessionId: locals.sessionId, userId: existingUser._id, createdAt: new Date(), updatedAt: new Date(), userAgent, ip, expiresAt: addWeeks(new Date(), 2), }); } else { // user doesn't exist yet, create a new one const { insertedId } = await collections.users.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), username, name, email, avatarUrl, hfUserId, isAdmin, isEarlyAccess, }); userId = insertedId; await collections.sessions.insertOne({ _id: new ObjectId(), sessionId: locals.sessionId, userId, createdAt: new Date(), updatedAt: new Date(), userAgent, ip, expiresAt: addWeeks(new Date(), 2), }); // move pre-existing settings to new user const { matchedCount } = await collections.settings.updateOne( { sessionId: previousSessionId }, { $set: { userId, updatedAt: new Date() }, $unset: { sessionId: "" }, } ); if (!matchedCount) { // if no settings found for user, create default settings await collections.settings.insertOne({ userId, ethicsModalAcceptedAt: new Date(), updatedAt: new Date(), createdAt: new Date(), ...DEFAULT_SETTINGS, }); } } // refresh session cookie refreshSessionCookie(cookies, secretSessionId); // migrate pre-existing conversations await collections.conversations.updateMany( { sessionId: previousSessionId }, { $set: { userId }, $unset: { sessionId: "" }, } ); }
chat-ui/src/routes/login/callback/updateUser.ts/0
{ "file_path": "chat-ui/src/routes/login/callback/updateUser.ts", "repo_id": "chat-ui", "token_count": 1956 }
75
import { base } from "$app/paths"; import { redirect } from "@sveltejs/kit"; export async function load({ parent, params }) { const data = await parent(); const assistant = data.settings.assistants.find((id) => id === params.assistantId); if (!assistant) { redirect(302, `${base}/assistant/${params.assistantId}`); } return data; }
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.ts", "repo_id": "chat-ui", "token_count": 115 }
76
import { base } from "$app/paths"; import { env } from "$env/dynamic/private"; import { env as envPublic } from "$env/dynamic/public"; import { collections } from "$lib/server/database"; import { logger } from "$lib/server/logger"; import type { Tool } from "$lib/types/Tool"; import { fail, redirect, type Actions } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; async function toolOnlyIfAuthor(locals: App.Locals, toolId?: string) { const tool = await collections.tools.findOne({ _id: new ObjectId(toolId) }); if (!tool) { throw Error("Tool not found"); } if ( tool.createdById.toString() !== (locals.user?._id ?? locals.sessionId).toString() && !locals.user?.isAdmin ) { throw Error("You are not the creator of this tool"); } return tool; } export const actions: Actions = { delete: async ({ params, locals }) => { let tool; try { tool = await toolOnlyIfAuthor(locals, params.toolId); } catch (e) { return fail(400, { error: true, message: (e as Error).message }); } await collections.tools.deleteOne({ _id: tool._id }); // Remove the tool from all users' settings await collections.settings.updateMany( { tools: { $in: [tool._id.toString()] }, }, { $pull: { tools: tool._id.toString() }, } ); // Remove the tool from all assistants await collections.assistants.updateMany( { tools: { $in: [tool._id.toString()] }, }, { $pull: { tools: tool._id.toString() }, } ); redirect(302, `${base}/tools`); }, report: async ({ request, params, locals, url }) => { // is there already a report from this user for this model ? const report = await collections.reports.findOne({ createdBy: locals.user?._id ?? locals.sessionId, object: "tool", contentId: new ObjectId(params.toolId), }); if (report) { return fail(400, { error: true, message: "Already reported" }); } const formData = await request.formData(); const result = z.string().min(1).max(128).safeParse(formData?.get("reportReason")); if (!result.success) { return fail(400, { error: true, message: "Invalid report reason" }); } const { acknowledged } = await collections.reports.insertOne({ _id: new ObjectId(), contentId: new ObjectId(params.toolId), object: "tool", createdBy: locals.user?._id ?? locals.sessionId, createdAt: new Date(), updatedAt: new Date(), reason: result.data, }); if (!acknowledged) { return fail(500, { error: true, message: "Failed to report tool" }); } if (env.WEBHOOK_URL_REPORT_ASSISTANT) { const prefixUrl = envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || url.origin}${base}`; const toolUrl = `${prefixUrl}/tools/${params.toolId}`; const tool = await collections.tools.findOne<Pick<Tool, "displayName">>( { _id: new ObjectId(params.toolId) }, { projection: { displayName: 1 } } ); const username = locals.user?.username; const res = await fetch(env.WEBHOOK_URL_REPORT_ASSISTANT, { method: "POST", headers: { "Content-type": "application/json", }, body: JSON.stringify({ text: `Tool <${toolUrl}|${tool?.displayName}> reported by ${ username ? `<http://hf.co/${username}|${username}>` : "non-logged in user" }.\n\n> ${result.data}`, }), }); if (!res.ok) { logger.error(`Webhook tool report failed. ${res.statusText} ${res.text}`); } } return { from: "report", ok: true, message: "Tool reported" }; }, unfeature: async ({ params, locals }) => { if (!locals.user?.isAdmin) { return fail(403, { error: true, message: "Permission denied" }); } const tool = await collections.tools.findOne({ _id: new ObjectId(params.toolId), }); if (!tool) { return fail(404, { error: true, message: "Tool not found" }); } const result = await collections.tools.updateOne( { _id: tool._id }, { $set: { featured: false } } ); if (result.modifiedCount === 0) { return fail(500, { error: true, message: "Failed to unfeature tool" }); } return { from: "unfeature", ok: true, message: "Tool unfeatured" }; }, feature: async ({ params, locals }) => { if (!locals.user?.isAdmin) { return fail(403, { error: true, message: "Permission denied" }); } const result = await collections.tools.updateOne( { _id: new ObjectId(params.toolId) }, { $set: { featured: true } } ); if (result.modifiedCount === 0) { return fail(500, { error: true, message: "Failed to feature tool" }); } return { from: "feature", ok: true, message: "Tool featured" }; }, };
chat-ui/src/routes/tools/[toolId]/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/tools/[toolId]/+page.server.ts", "repo_id": "chat-ui", "token_count": 1743 }
77
{ "background_color": "#ffffff", "name": "Chat UI", "short_name": "Chat UI", "display": "standalone", "start_url": "/", "icons": [ { "src": "/chatui/icon-128x128.png", "sizes": "128x128", "type": "image/png" }, { "src": "/chatui/icon-256x256.png", "sizes": "256x256", "type": "image/png" }, { "src": "/chatui/icon-512x512.png", "sizes": "512x512", "type": "image/png" } ] }
chat-ui/static/chatui/manifest.json/0
{ "file_path": "chat-ui/static/chatui/manifest.json", "repo_id": "chat-ui", "token_count": 218 }
78
# How to add one new datasets Add datasets directly to the 🤗 Hugging Face Hub! You can share your dataset on https://huggingface.co/datasets directly using your account, see the documentation: * [Create a dataset and upload files on the website](https://huggingface.co/docs/datasets/upload_dataset) * [Advanced guide using the CLI](https://huggingface.co/docs/datasets/share)
datasets/ADD_NEW_DATASET.md/0
{ "file_path": "datasets/ADD_NEW_DATASET.md", "repo_id": "datasets", "token_count": 113 }
79
# Differences between Dataset and IterableDataset There are two types of dataset objects, a [`Dataset`] and an [`IterableDataset`]. Whichever type of dataset you choose to use or create depends on the size of the dataset. In general, an [`IterableDataset`] is ideal for big datasets (think hundreds of GBs!) due to its lazy behavior and speed advantages, while a [`Dataset`] is great for everything else. This page will compare the differences between a [`Dataset`] and an [`IterableDataset`] to help you pick the right dataset object for you. ## Downloading and streaming When you have a regular [`Dataset`], you can access it using `my_dataset[0]`. This provides random access to the rows. Such datasets are also called "map-style" datasets. For example you can download ImageNet-1k like this and access any row: ```python from datasets import load_dataset imagenet = load_dataset("imagenet-1k", split="train") # downloads the full dataset print(imagenet[0]) ``` But one caveat is that you must have the entire dataset stored on your disk or in memory, which blocks you from accessing datasets bigger than the disk. Because it can become inconvenient for big datasets, there exists another type of dataset, the [`IterableDataset`]. When you have an `IterableDataset`, you can access it using a `for` loop to load the data progressively as you iterate over the dataset. This way, only a small fraction of examples is loaded in memory, and you don't write anything on disk. For example, you can stream the ImageNet-1k dataset without downloading it on disk: ```python from datasets import load_dataset imagenet = load_dataset("imagenet-1k", split="train", streaming=True) # will start loading the data when iterated over for example in imagenet: print(example) break ``` Streaming can read online data without writing any file to disk. For example, you can stream datasets made out of multiple shards, each of which is hundreds of gigabytes like [C4](https://huggingface.co/datasets/c4), [OSCAR](https://huggingface.co/datasets/oscar) or [LAION-2B](https://huggingface.co/datasets/laion/laion2B-en). Learn more about how to stream a dataset in the [Dataset Streaming Guide](./stream). This is not the only difference though, because the "lazy" behavior of an `IterableDataset` is also present when it comes to dataset creation and processing. ## Creating map-style datasets and iterable datasets You can create a [`Dataset`] using lists or dictionaries, and the data is entirely converted to Arrow so you can easily access any row: ```python my_dataset = Dataset.from_dict({"col_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}) print(my_dataset[0]) ``` To create an `IterableDataset` on the other hand, you must provide a "lazy" way to load the data. In Python, we generally use generator functions. These functions `yield` one example at a time, which means you can't access a row by slicing it like a regular `Dataset`: ```python def my_generator(n): for i in range(n): yield {"col_1": i} my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs={"n": 10}) for example in my_iterable_dataset: print(example) break ``` ## Loading local files entirely and progressively It is possible to convert local or remote data files to an Arrow [`Dataset`] using [`load_dataset`]: ```python data_files = {"train": ["path/to/data.csv"]} my_dataset = load_dataset("csv", data_files=data_files, split="train") print(my_dataset[0]) ``` However, this requires a conversion step from CSV to Arrow format, which takes time and disk space if your dataset is big. To save disk space and skip the conversion step, you can define an `IterableDataset` by streaming from the local files directly. This way, the data is read progressively from the local files as you iterate over the dataset: ```python data_files = {"train": ["path/to/data.csv"]} my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True) for example in my_iterable_dataset: # this reads the CSV file progressively as you iterate over the dataset print(example) break ``` Many file formats are supported, like CSV, JSONL, and Parquet, as well as image and audio files. You can find more information in the corresponding guides for loading [tabular](./tabular_load), [text](./nlp_load), [vision](./image_load), and [audio](./audio_load]) datasets. ## Eager data processing and lazy data processing When you process a [`Dataset`] object using [`Dataset.map`], the entire dataset is processed immediately and returned. This is similar to how `pandas` works for example. ```python my_dataset = my_dataset.map(process_fn) # process_fn is applied on all the examples of the dataset print(my_dataset[0]) ``` On the other hand, due to the "lazy" nature of an `IterableDataset`, calling [`IterableDataset.map`] does not apply your `map` function over the full dataset. Instead, your `map` function is applied on-the-fly. Because of that, you can chain multiple processing steps and they will all run at once when you start iterating over the dataset: ```python my_iterable_dataset = my_iterable_dataset.map(process_fn_1) my_iterable_dataset = my_iterable_dataset.filter(filter_fn) my_iterable_dataset = my_iterable_dataset.map(process_fn_2) # process_fn_1, filter_fn and process_fn_2 are applied on-the-fly when iterating over the dataset for example in my_iterable_dataset: print(example) break ``` ## Exact and fast approximate shuffling When you shuffle a [`Dataset`] using [`Dataset.shuffle`], you apply an exact shuffling of the dataset. It works by taking a list of indices `[0, 1, 2, ... len(my_dataset) - 1]` and shuffling this list. Then, accessing `my_dataset[0]` returns the row and index defined by the first element of the indices mapping that has been shuffled: ```python my_dataset = my_dataset.shuffle(seed=42) print(my_dataset[0]) ``` Since we don't have random access to the rows in the case of an `IterableDataset`, we can't use a shuffled list of indices and access a row at an arbitrary position. This prevents the use of exact shuffling. Instead, a fast approximate shuffling is used in [`IterableDataset.shuffle`]. It uses a shuffle buffer to sample random examples iteratively from the dataset. Since the dataset is still read iteratively, it provides excellent speed performance: ```python my_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) for example in my_iterable_dataset: print(example) break ``` But using a shuffle buffer is not enough to provide a satisfactory shuffling for machine learning model training. So [`IterableDataset.shuffle`] also shuffles the dataset shards if your dataset is made of multiple files or sources: ```python # Stream from the internet my_iterable_dataset = load_dataset("deepmind/code_contests", split="train", streaming=True) my_iterable_dataset.n_shards # 39 # Stream from local files data_files = {"train": [f"path/to/data_{i}.csv" for i in range(1024)]} my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True) my_iterable_dataset.n_shards # 1024 # From a generator function def my_generator(n, sources): for source in sources: for example_id_for_current_source in range(n): yield {"example_id": f"{source}_{example_id_for_current_source}"} gen_kwargs = {"n": 10, "sources": [f"path/to/data_{i}" for i in range(1024)]} my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs=gen_kwargs) my_iterable_dataset.n_shards # 1024 ``` ## Speed differences Regular [`Dataset`] objects are based on Arrow which provides fast random access to the rows. Thanks to memory mapping and the fact that Arrow is an in-memory format, reading data from disk doesn't do expensive system calls and deserialization. It provides even faster data loading when iterating using a `for` loop by iterating on contiguous Arrow record batches. However as soon as your [`Dataset`] has an indices mapping (via [`Dataset.shuffle`] for example), the speed can become 10x slower. This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. This may take a lot of time depending on the size of your dataset though: ```python my_dataset[0] # fast my_dataset = my_dataset.shuffle(seed=42) my_dataset[0] # up to 10x slower my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data my_dataset[0] # fast again ``` In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`]. It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal. You can also reshuffle the dataset easily: ```python for example in enumerate(my_iterable_dataset): # fast pass shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) for example in enumerate(shuffled_iterable_dataset): # as fast as before pass shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=1337, buffer_size=100) # reshuffling using another seed is instantaneous for example in enumerate(shuffled_iterable_dataset): # still as fast as before pass ``` If you're using your dataset on multiple epochs, the effective seed to shuffle the shards order in the shuffle buffer is `seed + epoch`. It makes it easy to reshuffle a dataset between epochs: ```python for epoch in range(n_epochs): my_iterable_dataset.set_epoch(epoch) for example in my_iterable_dataset: # fast + reshuffled at each epoch using `effective_seed = seed + epoch` pass ``` To restart the iteration of a map-style dataset, you can simply skip the first examples: ```python my_dataset = my_dataset.select(range(start_index, len(dataset))) ``` But if you use a `DataLoader` with a `Sampler`, you should instead save the state of your sampler (you might have written a custom sampler that allows resuming). On the other hand, iterable datasets don't provide random access to a specific example index to resume from. But you can use [`IterableDataset.state_dict`] and [`IterableDataset.load_state_dict`] to resume from a checkpoint instead, similarly to what you can do for models and optimizers: ```python >>> iterable_dataset = Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3) >>> # save in the middle of training >>> state_dict = iterable_dataset.state_dict() >>> # and resume later >>> iterable_dataset.load_state_dict(state_dict) ``` Under the hood, the iterable dataset keeps track of the current shard being read and the example index in the current shard and it stores this info in the `state_dict`. To resume from a checkpoint, the dataset skips all the shards that were previously read to restart from the current shard. Then it reads the shard and skips examples until it reaches the exact example from the checkpoint. Therefore restarting a dataset is quite fast, since it will not re-read the shards that have already been iterated on. Still, resuming a dataset is generally not instantaneous since it has to restart reading from the beginning of the current shard and skip examples until it reaches the checkpoint location. This can be used with the `StatefulDataLoader` from `torchdata`, see [streaming with a PyTorch DataLoader](./use_with_pytorch#stream-data). ## Switch from map-style to iterable If you want to benefit from the "lazy" behavior of an [`IterableDataset`] or their speed advantages, you can switch your map-style [`Dataset`] to an [`IterableDataset`]: ```python my_iterable_dataset = my_dataset.to_iterable_dataset() ``` If you want to shuffle your dataset or [use it with a PyTorch DataLoader](./use_with_pytorch#stream-data), we recommend generating a sharded [`IterableDataset`]: ```python my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=1024) my_iterable_dataset.n_shards # 1024 ```
datasets/docs/source/about_mapstyle_vs_iterable.mdx/0
{ "file_path": "datasets/docs/source/about_mapstyle_vs_iterable.mdx", "repo_id": "datasets", "token_count": 3730 }
80
# Load image data Image datasets have [`Image`] type columns, which contain PIL objects. <Tip> To work with image datasets, you need to have the `vision` dependency installed. Check out the [installation](./installation#vision) guide to learn how to install it. </Tip> When you load an image dataset and call the image column, the images are decoded as PIL Images: ```py >>> from datasets import load_dataset, Image >>> dataset = load_dataset("beans", split="train") >>> dataset[0]["image"] ``` <Tip warning={true}> Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding and resampling all the image objects in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>. ## Local files You can load a dataset from the image path. Use the [`~Dataset.cast_column`] function to accept a column of image file paths, and decode it into a PIL image with the [`Image`] feature: ```py >>> from datasets import Dataset, Image >>> dataset = Dataset.from_dict({"image": ["path/to/image_1", "path/to/image_2", ..., "path/to/image_n"]}).cast_column("image", Image()) >>> dataset[0]["image"] <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>] ``` If you only want to load the underlying path to the image dataset without decoding the image object, set `decode=False` in the [`Image`] feature: ```py >>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False)) >>> dataset[0]["image"] {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/bean_rust/bean_rust_train.29.jpg'} ``` ## ImageFolder You can also load a dataset with an `ImageFolder` dataset builder which does not require writing a custom dataloader. This makes `ImageFolder` ideal for quickly creating and loading image datasets with several thousand images for different vision tasks. Your image dataset structure should look like this: ``` folder/train/dog/golden_retriever.png folder/train/dog/german_shepherd.png folder/train/dog/chihuahua.png folder/train/cat/maine_coon.png folder/train/cat/bengal.png folder/train/cat/birman.png ``` Load your dataset by specifying `imagefolder` and the directory of your dataset in `data_dir`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder") >>> dataset["train"][0] {"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>, "label": 0} >>> dataset["train"][-1] {"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E8DAD30>, "label": 1} ``` Load remote datasets from their URLs with the `data_files` parameter: ```py >>> dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip", split="train") ``` Some datasets have a metadata file (`metadata.csv`/`metadata.jsonl`) associated with it, containing other information about the data like bounding boxes, text captions, and labels. The metadata is automatically loaded when you call [`load_dataset`] and specify `imagefolder`. To ignore the information in the metadata file, set `drop_labels=False` in [`load_dataset`], and allow `ImageFolder` to automatically infer the label name from the directory name: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", drop_labels=False) ``` <Tip> For more information about creating your own `ImageFolder` dataset, take a look at the [Create an image dataset](./image_dataset) guide. </Tip> ## WebDataset The [WebDataset](https://github.com/webdataset/webdataset) format is based on a folder of TAR archives and is suitable for big image datasets. Because of their size, WebDatasets are generally loaded in streaming mode (using `streaming=True`). You can load a WebDataset like this: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", streaming=True) ```
datasets/docs/source/image_load.mdx/0
{ "file_path": "datasets/docs/source/image_load.mdx", "repo_id": "datasets", "token_count": 1388 }
81
# Process 🤗 Datasets provides many tools for modifying the structure and content of a dataset. These tools are important for tidying up a dataset, creating additional columns, converting between features and formats, and much more. This guide will show you how to: - Reorder rows and split the dataset. - Rename and remove columns, and other common column operations. - Apply processing functions to each example in a dataset. - Concatenate datasets. - Apply a custom formatting transform. - Save and export processed datasets. For more details specific to processing other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_process">process audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_process">process image dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_process">process text dataset guide</a>. The examples in this guide use the MRPC dataset, but feel free to load any dataset of your choice and follow along! ```py >>> from datasets import load_dataset >>> dataset = load_dataset("glue", "mrpc", split="train") ``` <Tip warning={true}> All processing methods in this guide return a new [`Dataset`] object. Modification is not done in-place. Be careful about overriding your previous dataset! </Tip> ## Sort, shuffle, select, split, and shard There are several functions for rearranging the structure of a dataset. These functions are useful for selecting only the rows you want, creating train and test splits, and sharding very large datasets into smaller chunks. ### Sort Use [`~Dataset.sort`] to sort column values according to their numerical values. The provided column must be NumPy compatible. ```py >>> dataset["label"][:10] [1, 0, 1, 0, 1, 1, 0, 1, 0, 0] >>> sorted_dataset = dataset.sort("label") >>> sorted_dataset["label"][:10] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] >>> sorted_dataset["label"][-10:] [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` Under the hood, this creates a list of indices that is sorted according to values of the column. This indices mapping is then used to access the right rows in the underlying Arrow table. ### Shuffle The [`~Dataset.shuffle`] function randomly rearranges the column values. You can specify the `generator` parameter in this function to use a different `numpy.random.Generator` if you want more control over the algorithm used to shuffle the dataset. ```py >>> shuffled_dataset = sorted_dataset.shuffle(seed=42) >>> shuffled_dataset["label"][:10] [1, 1, 1, 0, 1, 1, 1, 1, 1, 0] ``` Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping. However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower. This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. Alternatively, you can switch to an [`IterableDataset`] and leverage its fast approximate shuffling [`IterableDataset.shuffle`]: ```py >>> iterable_dataset = dataset.to_iterable_dataset(num_shards=128) >>> shuffled_iterable_dataset = iterable_dataset.shuffle(seed=42, buffer_size=1000) ``` ### Select and Filter There are two options for filtering rows in a dataset: [`~Dataset.select`] and [`~Dataset.filter`]. - [`~Dataset.select`] returns rows according to a list of indices: ```py >>> small_dataset = dataset.select([0, 10, 20, 30, 40, 50]) >>> len(small_dataset) 6 ``` - [`~Dataset.filter`] returns rows that match a specified condition: ```py >>> start_with_ar = dataset.filter(lambda example: example["sentence1"].startswith("Ar")) >>> len(start_with_ar) 6 >>> start_with_ar["sentence1"] ['Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .', 'Arison said Mann may have been one of the pioneers of the world music movement and he had a deep love of Brazilian music .', 'Arts helped coach the youth on an eighth-grade football team at Lombardi Middle School in Green Bay .', 'Around 9 : 00 a.m. EDT ( 1300 GMT ) , the euro was at $ 1.1566 against the dollar , up 0.07 percent on the day .', "Arguing that the case was an isolated example , Canada has threatened a trade backlash if Tokyo 's ban is not justified on scientific grounds .", 'Artists are worried the plan would harm those who need help most - performers who have a difficult time lining up shows .' ] ``` [`~Dataset.filter`] can also filter by indices if you set `with_indices=True`: ```py >>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True) >>> len(even_dataset) 1834 >>> len(dataset) / 2 1834.0 ``` Unless the list of indices to keep is contiguous, those methods also create an indices mapping under the hood. ### Split The [`~Dataset.train_test_split`] function creates train and test splits if your dataset doesn't already have them. This allows you to adjust the relative proportions or an absolute number of samples in each split. In the example below, use the `test_size` parameter to create a test split that is 10% of the original dataset: ```py >>> dataset.train_test_split(test_size=0.1) {'train': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 3301), 'test': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 367)} >>> 0.1 * len(dataset) 366.8 ``` The splits are shuffled by default, but you can set `shuffle=False` to prevent shuffling. ### Shard 🤗 Datasets supports sharding to divide a very large dataset into a predefined number of chunks. Specify the `num_shards` parameter in [`~Dataset.shard`] to determine the number of shards to split the dataset into. You'll also need to provide the shard you want to return with the `index` parameter. For example, the [imdb](https://huggingface.co/datasets/imdb) dataset has 25000 examples: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imdb", split="train") >>> print(dataset) Dataset({ features: ['text', 'label'], num_rows: 25000 }) ``` After sharding the dataset into four chunks, the first shard will only have 6250 examples: ```py >>> dataset.shard(num_shards=4, index=0) Dataset({ features: ['text', 'label'], num_rows: 6250 }) >>> print(25000/4) 6250.0 ``` ## Rename, remove, cast, and flatten The following functions allow you to modify the columns of a dataset. These functions are useful for renaming or removing columns, changing columns to a new set of features, and flattening nested column structures. ### Rename Use [`~Dataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place. Provide [`~Dataset.rename_column`] with the name of the original column, and the new column name: ```py >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'label', 'idx'], num_rows: 3668 }) >>> dataset = dataset.rename_column("sentence1", "sentenceA") >>> dataset = dataset.rename_column("sentence2", "sentenceB") >>> dataset Dataset({ features: ['sentenceA', 'sentenceB', 'label', 'idx'], num_rows: 3668 }) ``` ### Remove When you need to remove one or more columns, provide the column name to remove to the [`~Dataset.remove_columns`] function. Remove more than one column by providing a list of column names: ```py >>> dataset = dataset.remove_columns("label") >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'idx'], num_rows: 3668 }) >>> dataset = dataset.remove_columns(["sentence1", "sentence2"]) >>> dataset Dataset({ features: ['idx'], num_rows: 3668 }) ``` Conversely, [`~Dataset.select_columns`] selects one or more columns to keep and removes the rest. This function takes either one or a list of column names: ```py >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'label', 'idx'], num_rows: 3668 }) >>> dataset = dataset.select_columns(['sentence1', 'sentence2', 'idx']) >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'idx'], num_rows: 3668 }) >>> dataset = dataset.select_columns('idx') >>> dataset Dataset({ features: ['idx'], num_rows: 3668 }) ``` ### Cast The [`~Dataset.cast`] function transforms the feature type of one or more columns. This function accepts your new [`Features`] as its argument. The example below demonstrates how to change the [`ClassLabel`] and [`Value`] features: ```py >>> dataset.features {'sentence1': Value(dtype='string', id=None), 'sentence2': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None), 'idx': Value(dtype='int32', id=None)} >>> from datasets import ClassLabel, Value >>> new_features = dataset.features.copy() >>> new_features["label"] = ClassLabel(names=["negative", "positive"]) >>> new_features["idx"] = Value("int64") >>> dataset = dataset.cast(new_features) >>> dataset.features {'sentence1': Value(dtype='string', id=None), 'sentence2': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None), 'idx': Value(dtype='int64', id=None)} ``` <Tip> Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value("int32")` to `Value("bool")` if the original column only contains ones and zeros. </Tip> Use the [`~Dataset.cast_column`] function to change the feature type of a single column. Pass the column name and its new feature type as arguments: ```py >>> dataset.features {'audio': Audio(sampling_rate=44100, mono=True, id=None)} >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> dataset.features {'audio': Audio(sampling_rate=16000, mono=True, id=None)} ``` ### Flatten Sometimes a column can be a nested structure of several types. Take a look at the nested structure below from the SQuAD dataset: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("squad", split="train") >>> dataset.features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` The `answers` field contains two subfields: `text` and `answer_start`. Use the [`~Dataset.flatten`] function to extract the subfields into their own separate columns: ```py >>> flat_dataset = dataset.flatten() >>> flat_dataset Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) ``` Notice how the subfields are now their own independent columns: `answers.text` and `answers.answer_start`. ## Map Some of the more powerful applications of 🤗 Datasets come from using the [`~Dataset.map`] function. The primary purpose of [`~Dataset.map`] is to speed up processing functions. It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns. In the following example, prefix each `sentence1` value in the dataset with `'My sentence: '`. Start by creating a function that adds `'My sentence: '` to the beginning of each sentence. The function needs to accept and output a `dict`: ```py >>> def add_prefix(example): ... example["sentence1"] = 'My sentence: ' + example["sentence1"] ... return example ``` Now use [`~Dataset.map`] to apply the `add_prefix` function to the entire dataset: ```py >>> updated_dataset = small_dataset.map(add_prefix) >>> updated_dataset["sentence1"][:5] ['My sentence: Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', "My sentence: Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .", 'My sentence: They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .', 'My sentence: Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .', ] ``` Let's take a look at another example, except this time, you'll remove a column with [`~Dataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed. Specify the column to remove with the `remove_columns` parameter in [`~Dataset.map`]: ```py >>> updated_dataset = dataset.map(lambda example: {"new_sentence": example["sentence1"]}, remove_columns=["sentence1"]) >>> updated_dataset.column_names ['sentence2', 'label', 'idx', 'new_sentence'] ``` <Tip> 🤗 Datasets also has a [`~Dataset.remove_columns`] function which is faster because it doesn't copy the data of the remaining columns. </Tip> You can also use [`~Dataset.map`] with indices if you set `with_indices=True`. The example below adds the index to the beginning of each sentence: ```py >>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True) >>> updated_dataset["sentence2"][:5] ['0: Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', "1: Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .", "2: On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .", '3: Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .', '4: PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .' ] ``` ### Multiprocessing Multiprocessing significantly speeds up processing by parallelizing processes on the CPU. Set the `num_proc` parameter in [`~Dataset.map`] to set the number of processes to use: ```py >>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True, num_proc=4) ``` The [`~Dataset.map`] also works with the rank of the process if you set `with_rank=True`. This is analogous to the `with_indices` parameter. The `with_rank` parameter in the mapped function goes after the `index` one if it is already present. ```py >>> import torch >>> from multiprocess import set_start_method >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> from datasets import load_dataset >>> >>> # Get an example dataset >>> dataset = load_dataset("fka/awesome-chatgpt-prompts", split="train") >>> >>> # Get an example model and its tokenizer >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B-Chat").eval() >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat") >>> >>> def gpu_computation(batch, rank): ... # Move the model on the right GPU if it's not there already ... device = f"cuda:{(rank or 0) % torch.cuda.device_count()}" ... model.to(device) ... ... # Your big GPU call goes here, for example: ... chats = [[ ... {"role": "system", "content": "You are a helpful assistant."}, ... {"role": "user", "content": prompt} ... ] for prompt in batch["prompt"]] ... texts = [tokenizer.apply_chat_template( ... chat, ... tokenize=False, ... add_generation_prompt=True ... ) for chat in chats] ... model_inputs = tokenizer(texts, padding=True, return_tensors="pt").to(device) ... with torch.no_grad(): ... outputs = model.generate(**model_inputs, max_new_tokens=512) ... batch["output"] = tokenizer.batch_decode(outputs, skip_special_tokens=True) ... return batch >>> >>> if __name__ == "__main__": ... set_start_method("spawn") ... updated_dataset = dataset.map( ... gpu_computation, ... batched=True, ... batch_size=16, ... with_rank=True, ... num_proc=torch.cuda.device_count(), # one process per GPU ... ) ``` The main use-case for rank is to parallelize computation across several GPUs. This requires setting `multiprocess.set_start_method("spawn")`. If you don't you'll receive the following CUDA error: ```bash RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method. ``` ### Batch processing The [`~Dataset.map`] function supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` parameter. Batch processing enables interesting applications such as splitting long sentences into shorter chunks and data augmentation. #### Split long examples When examples are too long, you may want to split them into several smaller chunks. Begin by creating a function that: 1. Splits the `sentence1` field into chunks of 50 characters. 2. Stacks all the chunks together to create the new dataset. ```py >>> def chunk_examples(examples): ... chunks = [] ... for sentence in examples["sentence1"]: ... chunks += [sentence[i:i + 50] for i in range(0, len(sentence), 50)] ... return {"chunks": chunks} ``` Apply the function with [`~Dataset.map`]: ```py >>> chunked_dataset = dataset.map(chunk_examples, batched=True, remove_columns=dataset.column_names) >>> chunked_dataset[:10] {'chunks': ['Amrozi accused his brother , whom he called " the ', 'witness " , of deliberately distorting his evidenc', 'e .', "Yucaipa owned Dominick 's before selling the chain", ' to Safeway in 1998 for $ 2.5 billion .', 'They had published an advertisement on the Interne', 't on June 10 , offering the cargo for sale , he ad', 'ded .', 'Around 0335 GMT , Tab shares were up 19 cents , or', ' 4.4 % , at A $ 4.56 , having earlier set a record']} ``` Notice how the sentences are split into shorter chunks now, and there are more rows in the dataset. ```py >>> dataset Dataset({ features: ['sentence1', 'sentence2', 'label', 'idx'], num_rows: 3668 }) >>> chunked_dataset Dataset({ features: ['chunks'], num_rows: 10470 }) ``` #### Data augmentation The [`~Dataset.map`] function could also be used for data augmentation. The following example generates additional words for a masked token in a sentence. Load and use the [RoBERTA](https://huggingface.co/roberta-base) model in 🤗 Transformers' [FillMaskPipeline](https://huggingface.co/transformers/main_classes/pipelines#transformers.FillMaskPipeline): ```py >>> from random import randint >>> from transformers import pipeline >>> fillmask = pipeline("fill-mask", model="roberta-base") >>> mask_token = fillmask.tokenizer.mask_token >>> smaller_dataset = dataset.filter(lambda e, i: i<100, with_indices=True) ``` Create a function to randomly select a word to mask in the sentence. The function should also return the original sentence and the top two replacements generated by RoBERTA. ```py >>> def augment_data(examples): ... outputs = [] ... for sentence in examples["sentence1"]: ... words = sentence.split(' ') ... K = randint(1, len(words)-1) ... masked_sentence = " ".join(words[:K] + [mask_token] + words[K+1:]) ... predictions = fillmask(masked_sentence) ... augmented_sequences = [predictions[i]["sequence"] for i in range(3)] ... outputs += [sentence] + augmented_sequences ... ... return {"data": outputs} ``` Use [`~Dataset.map`] to apply the function over the whole dataset: ```py >>> augmented_dataset = smaller_dataset.map(augment_data, batched=True, remove_columns=dataset.column_names, batch_size=8) >>> augmented_dataset[:9]["data"] ['Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', 'Amrozi accused his brother, whom he called " the witness ", of deliberately withholding his evidence.', 'Amrozi accused his brother, whom he called " the witness ", of deliberately suppressing his evidence.', 'Amrozi accused his brother, whom he called " the witness ", of deliberately destroying his evidence.', "Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .", 'Yucaipa owned Dominick Stores before selling the chain to Safeway in 1998 for $ 2.5 billion.', "Yucaipa owned Dominick's before selling the chain to Safeway in 1998 for $ 2.5 billion.", 'Yucaipa owned Dominick Pizza before selling the chain to Safeway in 1998 for $ 2.5 billion.' ] ``` For each original sentence, RoBERTA augmented a random word with three alternatives. The original word `distorting` is supplemented by `withholding`, `suppressing`, and `destroying`. ### Process multiple splits Many datasets have splits that can be processed simultaneously with [`DatasetDict.map`]. For example, tokenize the `sentence1` field in the train and test split by: ```py >>> from datasets import load_dataset # load all the splits >>> dataset = load_dataset('glue', 'mrpc') >>> encoded_dataset = dataset.map(lambda examples: tokenizer(examples["sentence1"]), batched=True) >>> encoded_dataset["train"][0] {'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', 'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', 'label': 1, 'idx': 0, 'input_ids': [ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] } ``` ### Distributed usage When you use [`~Dataset.map`] in a distributed setting, you should also use [torch.distributed.barrier](https://pytorch.org/docs/stable/distributed?highlight=barrier#torch.distributed.barrier). This ensures the main process performs the mapping, while the other processes load the results, thereby avoiding duplicate work. The following example shows how you can use `torch.distributed.barrier` to synchronize the processes: ```py >>> from datasets import Dataset >>> import torch.distributed >>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> if training_args.local_rank > 0: ... print("Waiting for main process to perform the mapping") ... torch.distributed.barrier() >>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1}) >>> if training_args.local_rank == 0: ... print("Loading results from main process") ... torch.distributed.barrier() ``` ## Batch The [`~Dataset.batch`] method allows you to group samples from the dataset into batches. This is particularly useful when you want to create batches of data for training or evaluation, especially when working with deep learning models. Here's an example of how to use the `batch()` method: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes", split="train") >>> batched_dataset = dataset.batch(batch_size=4) >>> batched_dataset[0] {'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic', 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'], 'label': [1, 1, 1, 1]} ``` The `batch()` method accepts the following parameters: - `batch_size` (`int`): The number of samples in each batch. - `drop_last_batch` (`bool`, defaults to `False`): Whether to drop the last incomplete batch if the dataset size is not divisible by the batch size. - `num_proc` (`int`, optional, defaults to `None`): The number of processes to use for multiprocessing. If None, no multiprocessing is used. This can significantly speed up batching for large datasets. Note that `Dataset.batch()` returns a new [`Dataset`] where each item is a batch of multiple samples from the original dataset. If you want to process data in batches, you should use a batched [`~Dataset.map`] directly, which applies a function to batches but the output dataset is unbatched. ## Concatenate Separate datasets can be concatenated if they share the same column types. Concatenate datasets with [`concatenate_datasets`]: ```py >>> from datasets import concatenate_datasets, load_dataset >>> bookcorpus = load_dataset("bookcorpus", split="train") >>> wiki = load_dataset("wikipedia", "20220301.en", split="train") >>> wiki = wiki.remove_columns([col for col in wiki.column_names if col != "text"]) # only keep the 'text' column >>> assert bookcorpus.features.type == wiki.features.type >>> bert_dataset = concatenate_datasets([bookcorpus, wiki]) ``` You can also concatenate two datasets horizontally by setting `axis=1` as long as the datasets have the same number of rows: ```py >>> from datasets import Dataset >>> bookcorpus_ids = Dataset.from_dict({"ids": list(range(len(bookcorpus)))}) >>> bookcorpus_with_ids = concatenate_datasets([bookcorpus, bookcorpus_ids], axis=1) ``` ### Interleave You can also mix several datasets together by taking alternating examples from each one to create a new dataset. This is known as *interleaving*, which is enabled by the [`interleave_datasets`] function. Both [`interleave_datasets`] and [`concatenate_datasets`] work with regular [`Dataset`] and [`IterableDataset`] objects. Refer to the [Stream](./stream#interleave) guide for an example of how to interleave [`IterableDataset`] objects. You can define sampling probabilities for each of the original datasets to specify how to interleave the datasets. In this case, the new dataset is constructed by getting examples one by one from a random dataset until one of the datasets runs out of samples. ```py >>> from datasets import Dataset, interleave_datasets >>> seed = 42 >>> probabilities = [0.3, 0.5, 0.2] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed) >>> dataset["a"] [10, 11, 20, 12, 0, 21, 13] ``` You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples. You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached. Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`. ```py >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20] ``` ## Format The [`~Dataset.set_format`] function changes the format of a column to be compatible with some common data formats. Specify the output you'd like in the `type` parameter and the columns you want to format. Formatting is applied on-the-fly. For example, create PyTorch tensors by setting `type="torch"`: ```py >>> import torch >>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"]) ``` The [`~Dataset.with_format`] function also changes the format of a column, except it returns a new [`Dataset`] object: ```py >>> dataset = dataset.with_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"]) ``` <Tip> 🤗 Datasets also provides support for other common data formats such as NumPy, Pandas, and JAX. Check out the [Using Datasets with TensorFlow](https://huggingface.co/docs/datasets/master/en/use_with_tensorflow#using-totfdataset) guide for more details on how to efficiently create a TensorFlow dataset. </Tip> If you need to reset the dataset to its original format, use the [`~Dataset.reset_format`] function: ```py >>> dataset.format {'type': 'torch', 'format_kwargs': {}, 'columns': ['label'], 'output_all_columns': False} >>> dataset.reset_format() >>> dataset.format {'type': 'python', 'format_kwargs': {}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False} ``` ### Format transform The [`~Dataset.set_transform`] function applies a custom formatting transform on-the-fly. This function replaces any previously specified format. For example, you can use this function to tokenize and pad tokens on-the-fly. Tokenization is only applied when examples are accessed: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") >>> def encode(batch): ... return tokenizer(batch["sentence1"], batch["sentence2"], padding="longest", truncation=True, max_length=512, return_tensors="pt") >>> dataset.set_transform(encode) >>> dataset.format {'type': 'custom', 'format_kwargs': {'transform': <function __main__.encode(batch)>}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False} ``` You can also use the [`~Dataset.set_transform`] function to decode formats not supported by [`Features`]. For example, the [`Audio`] feature uses [`soundfile`](https://python-soundfile.readthedocs.io/en/0.11.0/) - a fast and simple library to install - but it does not provide support for less common audio formats. Here is where you can use [`~Dataset.set_transform`] to apply a custom decoding transform on the fly. You're free to use any library you like to decode the audio files. The example below uses the [`pydub`](http://pydub.com/) package to open an audio format not supported by `soundfile`: ```py >>> import numpy as np >>> from pydub import AudioSegment >>> audio_dataset_amr = Dataset.from_dict({"audio": ["audio_samples/audio.amr"]}) >>> def decode_audio_with_pydub(batch, sampling_rate=16_000): ... def pydub_decode_file(audio_path): ... sound = AudioSegment.from_file(audio_path) ... if sound.frame_rate != sampling_rate: ... sound = sound.set_frame_rate(sampling_rate) ... channel_sounds = sound.split_to_mono() ... samples = [s.get_array_of_samples() for s in channel_sounds] ... fp_arr = np.array(samples).T.astype(np.float32) ... fp_arr /= np.iinfo(samples[0].typecode).max ... return fp_arr ... ... batch["audio"] = [pydub_decode_file(audio_path) for audio_path in batch["audio"]] ... return batch >>> audio_dataset_amr.set_transform(decode_audio_with_pydub) ``` ## Save Once you are done processing your dataset, you can save and reuse it later with [`~Dataset.save_to_disk`]. Save your dataset by providing the path to the directory you wish to save it to: ```py >>> encoded_dataset.save_to_disk("path/of/my/dataset/directory") ``` Use the [`load_from_disk`] function to reload the dataset: ```py >>> from datasets import load_from_disk >>> reloaded_dataset = load_from_disk("path/of/my/dataset/directory") ``` <Tip> Want to save your dataset to a cloud storage provider? Read our [Cloud Storage](./filesystems) guide to learn how to save your dataset to AWS or Google Cloud Storage. </Tip> ## Export 🤗 Datasets supports exporting as well so you can work with your dataset in other applications. The following table shows currently supported file formats you can export to: | File type | Export method | |-------------------------|----------------------------------------------------------------| | CSV | [`Dataset.to_csv`] | | JSON | [`Dataset.to_json`] | | Parquet | [`Dataset.to_parquet`] | | SQL | [`Dataset.to_sql`] | | In-memory Python object | [`Dataset.to_pandas`] or [`Dataset.to_dict`] | For example, export your dataset to a CSV file like this: ```py >>> encoded_dataset.to_csv("path/of/my/dataset.csv") ```
datasets/docs/source/process.mdx/0
{ "file_path": "datasets/docs/source/process.mdx", "repo_id": "datasets", "token_count": 10961 }
82
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🤗 Datasets Notebooks You can find here a list of the official notebooks provided by Hugging Face. Also, we would like to list here interesting content created by the community. If you wrote some notebook(s) leveraging 🤗 Datasets and would like it to be listed here, please open a Pull Request so it can be included under the Community notebooks. ## Hugging Face's notebooks 🤗 ### Documentation notebooks You can open any page of the documentation as a notebook in Colab (there is a button directly on said pages) but they are also listed here if you need them: | Notebook | Description | | | |:----------|:-------------|:-------------|------:| | [Quickstart](https://github.com/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb) | A quick presentation on integrating Datasets into a model training workflow |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)|
datasets/notebooks/README.md/0
{ "file_path": "datasets/notebooks/README.md", "repo_id": "datasets", "token_count": 534 }
83
import importlib import importlib.metadata import logging import os import platform from pathlib import Path from typing import Optional from huggingface_hub import constants from packaging import version logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging # Datasets S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets" CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets" REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}" # Hub HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}" HUB_DEFAULT_VERSION = "main" PY_VERSION = version.parse(platform.python_version()) # General environment variables accepted values for booleans ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"}) # Imports DILL_VERSION = version.parse(importlib.metadata.version("dill")) FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec")) PANDAS_VERSION = version.parse(importlib.metadata.version("pandas")) PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow")) HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub")) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_JAX", "AUTO").upper() TORCH_VERSION = "N/A" TORCH_AVAILABLE = False if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None if TORCH_AVAILABLE: try: TORCH_VERSION = version.parse(importlib.metadata.version("torch")) logger.info(f"PyTorch version {TORCH_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling PyTorch because USE_TF is set") POLARS_VERSION = "N/A" POLARS_AVAILABLE = importlib.util.find_spec("polars") is not None if POLARS_AVAILABLE: try: POLARS_VERSION = version.parse(importlib.metadata.version("polars")) logger.info(f"Polars version {POLARS_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass TF_VERSION = "N/A" TF_AVAILABLE = False if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None if TF_AVAILABLE: # For the metadata, we have to look for both tensorflow and tensorflow-cpu for package in [ "tensorflow", "tensorflow-cpu", "tensorflow-gpu", "tf-nightly", "tf-nightly-cpu", "tf-nightly-gpu", "intel-tensorflow", "tensorflow-rocm", "tensorflow-macos", ]: try: TF_VERSION = version.parse(importlib.metadata.version(package)) except importlib.metadata.PackageNotFoundError: continue else: break else: TF_AVAILABLE = False if TF_AVAILABLE: if TF_VERSION.major < 2: logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.") TF_AVAILABLE = False else: logger.info(f"TensorFlow version {TF_VERSION} available.") else: logger.info("Disabling Tensorflow because USE_TORCH is set") JAX_VERSION = "N/A" JAX_AVAILABLE = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None if JAX_AVAILABLE: try: JAX_VERSION = version.parse(importlib.metadata.version("jax")) logger.info(f"JAX version {JAX_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling JAX because USE_JAX is set to False") # Optional tools for data loading SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None # Optional tools for feature decoding PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( importlib.import_module("soundfile").__libsndfile_version__ ) >= version.parse("1.0.31") IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( importlib.import_module("soundfile").__libsndfile_version__ ) >= version.parse("1.1.0") # Optional compression tools RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None # Cache location DEFAULT_XDG_CACHE_HOME = "~/.cache" XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME) DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface") HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME)) DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets") HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE)) DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules") HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE)) DOWNLOADED_DATASETS_DIR = "downloads" DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR) DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH)) EXTRACTED_DATASETS_DIR = "extracted" DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR) EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH)) # Download count for the website HF_UPDATE_DOWNLOAD_COUNTS = ( os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES ) # For downloads and to check remote files metadata HF_DATASETS_MULTITHREADING_MAX_WORKERS = 16 # Remote dataset scripts support __HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "ask") HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = ( True if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES else False if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES else None ) TIME_OUT_REMOTE_CODE = 15 # Dataset viewer API USE_PARQUET_EXPORT = True # Batch size constants. For more info, see: # https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations) DEFAULT_MAX_BATCH_SIZE = 1000 # Size of the preloaded record batch in `Dataset.__iter__` ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10 # Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare) MAX_SHARD_SIZE = "500MB" # Parquet configuration PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 # Offline mode _offline = os.environ.get("HF_DATASETS_OFFLINE") HF_HUB_OFFLINE = constants.HF_HUB_OFFLINE if _offline is None else _offline.upper() in ENV_VARS_TRUE_VALUES HF_DATASETS_OFFLINE = HF_HUB_OFFLINE # kept for backward-compatibility # Here, `True` will disable progress bars globally without possibility of enabling it # programmatically. `False` will enable them without possibility of disabling them. # If environment variable is not set (None), then the user is free to enable/disable # them programmatically. # TL;DR: env variable has priority over code __HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS") HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = ( __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None else None ) # In-memory DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE)) # File names DATASET_ARROW_FILENAME = "dataset.arrow" DATASET_INDICES_FILENAME = "indices.arrow" DATASET_STATE_JSON_FILENAME = "state.json" DATASET_INFO_FILENAME = "dataset_info.json" DATASETDICT_INFOS_FILENAME = "dataset_infos.json" LICENSE_FILENAME = "LICENSE" DATASETDICT_JSON_FILENAME = "dataset_dict.json" METADATA_CONFIGS_FIELD = "configs" REPOCARD_FILENAME = "README.md" REPOYAML_FILENAME = ".huggingface.yaml" MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules" MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255 # Temporary cache directory prefix TEMP_CACHE_DIR_PREFIX = "hf_datasets-" # Streaming STREAMING_READ_MAX_RETRIES = 20 STREAMING_READ_RETRY_INTERVAL = 5 # Datasets without script DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10 ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 # Progress bars PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec # Maximum number of uploaded files per commit UPLOADS_MAX_NUMBER_PER_COMMIT = 50 # Backward compatibiliy MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
datasets/src/datasets/config.py/0
{ "file_path": "datasets/src/datasets/config.py", "repo_id": "datasets", "token_count": 4102 }
84
import inspect import os import random import shutil import tempfile import weakref from functools import wraps from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import xxhash from . import config from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH from .utils._dill import dumps from .utils.logging import get_logger if TYPE_CHECKING: from .arrow_dataset import Dataset logger = get_logger(__name__) # Fingerprinting allows to have one deterministic fingerprint per dataset state. # A dataset fingerprint is updated after each transform. # Re-running the same transforms on a dataset in a different session results in the same fingerprint. # This is possible thanks to a custom hashing function that works with most python objects. # Fingerprinting is the main mechanism that enables caching. # The caching mechanism allows to reload an existing cache file if it's already been computed. ################# # Caching ################# _CACHING_ENABLED = True _TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None _DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None class _TempCacheDir: """ A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files before deleting the directory itself to avoid permission errors on Windows. """ def __init__(self): self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX) self._finalizer = weakref.finalize(self, self._cleanup) def _cleanup(self): for dset in get_datasets_with_cache_file_in_temp_dir(): dset.__del__() if os.path.exists(self.name): try: shutil.rmtree(self.name) except Exception as e: raise OSError( f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually." ) from e def cleanup(self): if self._finalizer.detach(): self._cleanup() def maybe_register_dataset_for_temp_dir_deletion(dataset): """ This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order to properly delete them before deleting the temporary directory. The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled. """ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: return global _DATASETS_WITH_TABLE_IN_TEMP_DIR if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None: _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet() if any( Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents for cache_file in dataset.cache_files ): _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset) def get_datasets_with_cache_file_in_temp_dir(): return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else [] def enable_caching(): """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED _CACHING_ENABLED = True def disable_caching(): """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED _CACHING_ENABLED = False def is_caching_enabled() -> bool: """ When applying transforms on a dataset, the data are stored in cache files. The caching mechanism allows to reload an existing cache file if it's already been computed. Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated after each transform. If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. More precisely, if the caching is disabled: - cache files are always recreated - cache files are written to a temporary directory that is deleted when session closes - cache files are named using a random hash instead of the dataset fingerprint - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use the `download_mode` parameter in [`~datasets.load_dataset`]. """ global _CACHING_ENABLED return bool(_CACHING_ENABLED) def get_temporary_cache_files_directory() -> str: """Return a directory that is deleted when session closes.""" global _TEMP_DIR_FOR_TEMP_CACHE_FILES if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir() return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name ################# # Hashing ################# class Hasher: """Hasher that accepts python objects as inputs.""" dispatch: Dict = {} def __init__(self): self.m = xxhash.xxh64() @classmethod def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str: value = [value] if isinstance(value, bytes) else value m = xxhash.xxh64() for x in value: m.update(x) return m.hexdigest() @classmethod def hash(cls, value: Any) -> str: return cls.hash_bytes(dumps(value)) def update(self, value: Any) -> None: header_for_update = f"=={type(value)}==" value_for_update = self.hash(value) self.m.update(header_for_update.encode("utf8")) self.m.update(value_for_update.encode("utf-8")) def hexdigest(self) -> str: return self.m.hexdigest() ################# # Fingerprinting ################# fingerprint_rng = random.Random() # we show a warning only once when fingerprinting fails to avoid spam fingerprint_warnings: Dict[str, bool] = {} def generate_fingerprint(dataset: "Dataset") -> str: state = dataset.__dict__ hasher = Hasher() for key in sorted(state): if key == "_fingerprint": continue hasher.update(key) hasher.update(state[key]) # hash data files last modification timestamps as well for cache_file in dataset.cache_files: hasher.update(os.path.getmtime(cache_file["filename"])) return hasher.hexdigest() def generate_random_fingerprint(nbits: int = 64) -> str: return f"{fingerprint_rng.getrandbits(nbits):0{nbits//4}x}" def update_fingerprint(fingerprint, transform, transform_args): global fingerprint_warnings hasher = Hasher() hasher.update(fingerprint) try: hasher.update(transform) except: # noqa various errors might raise here from pickle or dill if _CACHING_ENABLED: if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): logger.warning( f"Transform {transform} couldn't be hashed properly, a random hash was used instead. " "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " "This warning is only showed once. Subsequent hashing failures won't be showed." ) fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True else: logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.") else: logger.info( f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." ) return generate_random_fingerprint() for key in sorted(transform_args): hasher.update(key) try: hasher.update(transform_args[key]) except: # noqa various errors might raise here from pickle or dill if _CACHING_ENABLED: if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): logger.warning( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. " "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " "This warning is only showed once. Subsequent hashing failures won't be showed." ) fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True else: logger.info( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead." ) else: logger.info( f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." ) return generate_random_fingerprint() return hasher.hexdigest() def validate_fingerprint(fingerprint: str, max_length=64): """ Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default, so that the fingerprint can be used to name cache files without issues. """ if not isinstance(fingerprint, str) or not fingerprint: raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.") for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH: if invalid_char in fingerprint: raise ValueError( f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. " f"They could create issues when creating cache files." ) if len(fingerprint) > max_length: raise ValueError( f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}." "It could create issues when creating cache files." ) def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str: """ Format a transform to the format that will be used to update the fingerprint. """ transform = f"{func.__module__}.{func.__qualname__}" if version is not None: transform += f"@{version}" return transform def format_kwargs_for_fingerprint( func: Callable, args: Tuple, kwargs: Dict[str, Any], use_kwargs: Optional[List[str]] = None, ignore_kwargs: Optional[List[str]] = None, randomized_function: bool = False, ) -> Dict[str, Any]: """ Format the kwargs of a transform to the format that will be used to update the fingerprint. """ kwargs_for_fingerprint = kwargs.copy() if args: params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD] args = args[1:] # assume the first argument is the dataset params = params[1:] kwargs_for_fingerprint.update(zip(params, args)) else: del kwargs_for_fingerprint[ next(iter(inspect.signature(func).parameters)) ] # assume the first key is the dataset # keep the right kwargs to be hashed to generate the fingerprint if use_kwargs: kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs} if ignore_kwargs: kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs} if randomized_function: # randomized functions have `seed` and `generator` parameters if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None: _, seed, pos, *_ = np.random.get_state() seed = seed[pos] if pos < 624 else seed[0] kwargs_for_fingerprint["generator"] = np.random.default_rng(seed) # remove kwargs that are the default values default_values = { p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty } for default_varname, default_value in default_values.items(): if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value: kwargs_for_fingerprint.pop(default_varname) return kwargs_for_fingerprint def fingerprint_transform( inplace: bool, use_kwargs: Optional[List[str]] = None, ignore_kwargs: Optional[List[str]] = None, fingerprint_names: Optional[List[str]] = None, randomized_function: bool = False, version: Optional[str] = None, ): """ Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint`` Args: inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace. Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of setting the fingerprint of the returned Dataset. use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account to update the fingerprint to the wrapped method that should take care of setting the fingerprint of the returned Dataset. By default all the arguments are used. ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs. fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]): If the dataset transforms is not inplace and returns a DatasetDict, then it can require several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names, one fingerprint named after each element of fingerprint_names is going to be passed. randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has optional parameters "seed" and "generator", then you can set randomized_function to True. This way, even if users set "seed" and "generator" to None, then the fingerprint is going to be randomly generated depending on numpy's current state. In this case, the generator is set to np.random.default_rng(np.random.get_state()[1][0]). version (:obj:`str`, optional): version of the transform. The version is taken into account when computing the fingerprint. If a datase transform changes (or at least if the output data that are cached changes), then one should increase the version. If the version stays the same, then old cached data could be reused that are not compatible with the new transform. It should be in the format "MAJOR.MINOR.PATCH". """ if use_kwargs is not None and not isinstance(use_kwargs, list): raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}") if ignore_kwargs is not None and not isinstance(ignore_kwargs, list): raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}") if inplace and fingerprint_names: raise ValueError("fingerprint_names are only used when inplace is False") fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"] def _fingerprint(func): if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names): raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature") if randomized_function: # randomized function have seed and generator parameters if "seed" not in func.__code__.co_varnames: raise ValueError(f"'seed' must be in {func}'s signature") if "generator" not in func.__code__.co_varnames: raise ValueError(f"'generator' must be in {func}'s signature") # this call has to be outside the wrapper or since __qualname__ changes in multiprocessing transform = format_transform_for_fingerprint(func, version=version) @wraps(func) def wrapper(*args, **kwargs): kwargs_for_fingerprint = format_kwargs_for_fingerprint( func, args, kwargs, use_kwargs=use_kwargs, ignore_kwargs=ignore_kwargs, randomized_function=randomized_function, ) if args: dataset: Dataset = args[0] args = args[1:] else: dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters))) # compute new_fingerprint and add it to the args of not in-place transforms if inplace: new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint) else: for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes if kwargs.get(fingerprint_name) is None: kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name kwargs[fingerprint_name] = update_fingerprint( dataset._fingerprint, transform, kwargs_for_fingerprint ) else: validate_fingerprint(kwargs[fingerprint_name]) # Call actual function out = func(dataset, *args, **kwargs) # Update fingerprint of in-place transforms + update in-place history of transforms if inplace: # update after calling func so that the fingerprint doesn't change if the function fails dataset._fingerprint = new_fingerprint return out wrapper._decorator_name_ = "fingerprint" return wrapper return _fingerprint
datasets/src/datasets/fingerprint.py/0
{ "file_path": "datasets/src/datasets/fingerprint.py", "repo_id": "datasets", "token_count": 7526 }
85
import os from typing import BinaryIO, Optional, Union import fsspec import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def get_writer_batch_size(features: Features) -> Optional[int]: """ Get the writer_batch_size that defines the maximum row group size in the parquet files. The default in `datasets` is 1,000 but we lower it to 100 for image datasets. This allows to optimize random access to parquet file, since accessing 1 row requires to read its entire row group. This can be improved to get optimized size for querying/iterating but at least it matches the dataset viewer expectations on HF. Args: ds_config_info (`datasets.info.DatasetInfo`): Dataset info from `datasets`. Returns: writer_batch_size (`Optional[int]`): Writer batch size to pass to a dataset builder. If `None`, then it will use the `datasets` default. """ batch_size = np.inf def set_batch_size(feature: FeatureType) -> None: nonlocal batch_size if isinstance(feature, Image): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(feature, Audio): batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(feature, Value) and feature.dtype == "binary": batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(features, set_batch_size) return None if batch_size is np.inf else batch_size class ParquetDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} hash = _PACKAGED_DATASETS_MODULES["parquet"][1] self.builder = Parquet( cache_dir=cache_dir, data_files=path_or_paths, features=features, hash=hash, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class ParquetDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, storage_options: Optional[dict] = None, **parquet_writer_kwargs, ): self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size or get_writer_batch_size(dataset.features) self.storage_options = storage_options or {} self.parquet_writer_kwargs = parquet_writer_kwargs def write(self) -> int: batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer: written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs) else: written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs) return written def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int: """Writes the pyarrow table as Parquet to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 _ = parquet_writer_kwargs.pop("path_or_buf", None) schema = self.dataset.features.arrow_schema writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) for offset in hf_tqdm( range(0, len(self.dataset), batch_size), unit="ba", desc="Creating parquet from Arrow format", ): batch = query_table( table=self.dataset._data, key=slice(offset, offset + batch_size), indices=self.dataset._indices, ) writer.write_table(batch) written += batch.nbytes writer.close() return written
datasets/src/datasets/io/parquet.py/0
{ "file_path": "datasets/src/datasets/io/parquet.py", "repo_id": "datasets", "token_count": 2585 }
86
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal logger = datasets.utils.logging.get_logger(__name__) _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"] _PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"] _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"] _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ["verbose"] @dataclass class CsvConfig(datasets.BuilderConfig): """BuilderConfig for CSV.""" sep: str = "," delimiter: Optional[str] = None header: Optional[Union[int, List[int], str]] = "infer" names: Optional[List[str]] = None column_names: Optional[List[str]] = None index_col: Optional[Union[int, str, List[int], List[str]]] = None usecols: Optional[Union[List[int], List[str]]] = None prefix: Optional[str] = None mangle_dupe_cols: bool = True engine: Optional[Literal["c", "python", "pyarrow"]] = None converters: Dict[Union[int, str], Callable[[Any], Any]] = None true_values: Optional[list] = None false_values: Optional[list] = None skipinitialspace: bool = False skiprows: Optional[Union[int, List[int]]] = None nrows: Optional[int] = None na_values: Optional[Union[str, List[str]]] = None keep_default_na: bool = True na_filter: bool = True verbose: bool = False skip_blank_lines: bool = True thousands: Optional[str] = None decimal: str = "." lineterminator: Optional[str] = None quotechar: str = '"' quoting: int = 0 escapechar: Optional[str] = None comment: Optional[str] = None encoding: Optional[str] = None dialect: Optional[str] = None error_bad_lines: bool = True warn_bad_lines: bool = True skipfooter: int = 0 doublequote: bool = True memory_map: bool = False float_precision: Optional[str] = None chunksize: int = 10_000 features: Optional[datasets.Features] = None encoding_errors: Optional[str] = "strict" on_bad_lines: Literal["error", "warn", "skip"] = "error" date_format: Optional[str] = None def __post_init__(self): super().__post_init__() if self.delimiter is not None: self.sep = self.delimiter if self.column_names is not None: self.names = self.column_names @property def pd_read_csv_kwargs(self): pd_read_csv_kwargs = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.2 deprecated arguments if datasets.config.PANDAS_VERSION.release >= (2, 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class Csv(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = CsvConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self, files): schema = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str dtype = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(files)): csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(csv_file_reader): pa_table = pa.Table.from_pandas(df) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/csv/csv.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/csv/csv.py", "repo_id": "datasets", "token_count": 3889 }
87
import sys from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast if TYPE_CHECKING: import sqlite3 import sqlalchemy logger = datasets.utils.logging.get_logger(__name__) @dataclass class SqlConfig(datasets.BuilderConfig): """BuilderConfig for SQL.""" sql: Union[str, "sqlalchemy.sql.Selectable"] = None con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None index_col: Optional[Union[str, List[str]]] = None coerce_float: bool = True params: Optional[Union[List, Tuple, Dict]] = None parse_dates: Optional[Union[List, Dict]] = None columns: Optional[List[str]] = None chunksize: Optional[int] = 10_000 features: Optional[datasets.Features] = None def __post_init__(self): super().__post_init__() if self.sql is None: raise ValueError("sql must be specified") if self.con is None: raise ValueError("con must be specified") def create_config_id( self, config_kwargs: dict, custom_features: Optional[datasets.Features] = None, ) -> str: config_kwargs = config_kwargs.copy() # We need to stringify the Selectable object to make its hash deterministic # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html sql = config_kwargs["sql"] if not isinstance(sql, str): if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules: import sqlalchemy if isinstance(sql, sqlalchemy.sql.Selectable): engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://") sql_str = str(sql.compile(dialect=engine.dialect)) config_kwargs["sql"] = sql_str else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) con = config_kwargs["con"] if not isinstance(con, str): config_kwargs["con"] = id(con) logger.info( f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead." ) return super().create_config_id(config_kwargs, custom_features=custom_features) @property def pd_read_sql_kwargs(self): pd_read_sql_kwargs = { "index_col": self.index_col, "columns": self.columns, "params": self.params, "coerce_float": self.coerce_float, "parse_dates": self.parse_dates, } return pd_read_sql_kwargs class Sql(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = SqlConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self): chunksize = self.config.chunksize sql_reader = pd.read_sql( self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs ) sql_reader = [sql_reader] if chunksize is None else sql_reader for chunk_idx, df in enumerate(sql_reader): pa_table = pa.Table.from_pandas(df) yield chunk_idx, self._cast_table(pa_table)
datasets/src/datasets/packaged_modules/sql/sql.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/sql/sql.py", "repo_id": "datasets", "token_count": 1979 }
88
import enum import inspect import warnings from functools import wraps from typing import Callable, Optional from .logging import get_logger _emitted_deprecation_warnings = set() logger = get_logger(__name__) def deprecated(help_message: Optional[str] = None): """Decorator to mark a class or a function as deprecated. Args: help_message (:obj:`str`, optional): An optional message to guide the user on how to switch to non-deprecated usage of the library. """ def decorator(deprecated_class_or_function: Callable): global _emitted_deprecation_warnings if inspect.isclass(deprecated_class_or_function): deprecated_function = deprecated_class_or_function.__init__ name = deprecated_class_or_function.__name__ else: deprecated_function = deprecated_class_or_function name = deprecated_function.__name__ # Support deprecating __init__ class method: class name instead name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2] warning_msg = ( f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}" if help_message else "" ) @wraps(deprecated_function) def wrapper(*args, **kwargs): func_hash = hash(deprecated_function) if func_hash not in _emitted_deprecation_warnings: warnings.warn(warning_msg, category=FutureWarning, stacklevel=2) _emitted_deprecation_warnings.add(func_hash) return deprecated_function(*args, **kwargs) wrapper._decorator_name_ = "deprecated" if inspect.isclass(deprecated_class_or_function): deprecated_class_or_function.__init__ = wrapper return deprecated_class_or_function else: return wrapper return decorator class OnAccess(enum.EnumMeta): """ Enum metaclass that calls a user-specified function whenever a member is accessed. """ def __getattribute__(cls, name): obj = super().__getattribute__(name) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj def __getitem__(cls, name): member = super().__getitem__(name) if member._on_access: member._on_access() return member def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj class DeprecatedEnum(enum.Enum, metaclass=OnAccess): """ Enum class that calls `deprecate` method whenever a member is accessed. """ def __new__(cls, value): member = object.__new__(cls) member._value_ = value member._on_access = member.deprecate return member @property def help_message(self): return "" def deprecate(self): help_message = f" {self.help_message}" if self.help_message else "" warnings.warn( f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets." + help_message, FutureWarning, stacklevel=3, )
datasets/src/datasets/utils/deprecation_utils.py/0
{ "file_path": "datasets/src/datasets/utils/deprecation_utils.py", "repo_id": "datasets", "token_count": 1426 }
89
name: "" # Filename comes here allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null # meaning it should not be checked. - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: true allow_empty_text: true subsections: null - name: "Dataset Structure" allow_empty: false allow_empty_text: true subsections: - name: "Data Instances" allow_empty: false allow_empty_text: true subsections: null - name: "Data Fields" allow_empty: false allow_empty_text: true subsections: null - name: "Data Splits" allow_empty: false allow_empty_text: true subsections: null - name: "Dataset Creation" allow_empty: false allow_empty_text: true subsections: - name: "Curation Rationale" allow_empty: true allow_empty_text: true subsections: null - name: "Source Data" allow_empty: false allow_empty_text: true subsections: - name: "Initial Data Collection and Normalization" allow_empty: true allow_empty_text: true subsections: null - name: "Who are the source language producers?" allow_empty: true allow_empty_text: true subsections: null - name: "Annotations" allow_empty: false allow_empty_text: true subsections: - name: "Annotation process" allow_empty: true allow_empty_text: true subsections: null - name: "Who are the annotators?" allow_empty: true allow_empty_text: true subsections: null - name: "Personal and Sensitive Information" allow_empty: true allow_empty_text: true subsections: null - name: "Considerations for Using the Data" allow_empty: true allow_empty_text: true subsections: - name: "Social Impact of Dataset" allow_empty: true allow_empty_text: true subsections: null - name: "Discussion of Biases" allow_empty: true allow_empty_text: true subsections: null - name: "Other Known Limitations" allow_empty: true allow_empty_text: true subsections: null - name: "Additional Information" allow_empty: true allow_empty_text: true subsections: - name: "Dataset Curators" allow_empty: true allow_empty_text: true subsections: null - name: "Licensing Information" allow_empty: true allow_empty_text: true subsections: null - name: "Citation Information" allow_empty: false allow_empty_text: true subsections: null - name: "Contributions" allow_empty: false allow_empty_text: false subsections: null
datasets/src/datasets/utils/resources/readme_structure.yaml/0
{ "file_path": "datasets/src/datasets/utils/resources/readme_structure.yaml", "repo_id": "datasets", "token_count": 1924 }
90
import pytest DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import json import os import datasets REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def dataset_loading_script_name(): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def dataset_loading_script_code(): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path): script_name = dataset_loading_script_name script_dir = tmp_path / "datasets" / script_name script_dir.mkdir(parents=True) script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(dataset_loading_script_code) return str(script_dir)
datasets/tests/commands/conftest.py/0
{ "file_path": "datasets/tests/commands/conftest.py", "repo_id": "datasets", "token_count": 1193 }
91
from pathlib import Path import pytest from datasets import load_dataset from datasets.packaged_modules.cache.cache import Cache SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata" SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME = "hf-internal-testing/DatasetWithCapitalLetters" def test_cache(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache" ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) hash = Path(ds["train"].cache_files[0]["filename"]).parts[-2] cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash=hash) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) def test_cache_streaming(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_streaming" ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) hash = Path(ds["train"].cache_files[0]["filename"]).parts[-2] cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash=hash) reloaded = cache.as_streaming_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) def test_cache_auto_hash(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_auto_hash" ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto") reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) def test_cache_auto_hash_with_custom_config(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_auto_hash_with_custom_config" ds = load_dataset(str(text_dir), sample_by="paragraph", cache_dir=str(cache_dir)) another_ds = load_dataset(str(text_dir), cache_dir=str(cache_dir)) cache = Cache( cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto", sample_by="paragraph" ) another_cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto") assert cache.config_id.endswith("paragraph") assert not another_cache.config_id.endswith("paragraph") reloaded = cache.as_dataset() another_reloaded = another_cache.as_dataset() assert list(ds) == list(reloaded) assert list(ds["train"]) == list(reloaded["train"]) assert list(another_ds) == list(another_reloaded) assert list(another_ds["train"]) == list(another_reloaded["train"]) def test_cache_missing(text_dir: Path, tmp_path: Path): cache_dir = tmp_path / "test_cache_missing" load_dataset(str(text_dir), cache_dir=str(cache_dir)) Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto").download_and_prepare() with pytest.raises(ValueError): Cache(cache_dir=str(cache_dir), dataset_name="missing", version="auto", hash="auto").download_and_prepare() with pytest.raises(ValueError): Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash="missing").download_and_prepare() with pytest.raises(ValueError): Cache( cache_dir=str(cache_dir), dataset_name=text_dir.name, config_name="missing", version="auto", hash="auto" ).download_and_prepare() @pytest.mark.integration def test_cache_multi_configs(tmp_path: Path): cache_dir = tmp_path / "test_cache_multi_configs" repo_id = SAMPLE_DATASET_TWO_CONFIG_IN_METADATA dataset_name = repo_id.split("/")[-1] config_name = "v1" ds = load_dataset(repo_id, config_name, cache_dir=str(cache_dir)) cache = Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, config_name=config_name, version="auto", hash="auto", ) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) with pytest.raises(ValueError) as excinfo: Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, config_name="missing", version="auto", hash="auto", ) assert config_name in str(excinfo.value) @pytest.mark.integration def test_cache_single_config(tmp_path: Path): cache_dir = tmp_path / "test_cache_single_config" repo_id = SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA dataset_name = repo_id.split("/")[-1] config_name = "custom" ds = load_dataset(repo_id, cache_dir=str(cache_dir)) cache = Cache(cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto") reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) cache = Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, config_name=config_name, repo_id=repo_id, version="auto", hash="auto", ) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) with pytest.raises(ValueError) as excinfo: Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, config_name="missing", version="auto", hash="auto", ) assert config_name in str(excinfo.value) @pytest.mark.integration def test_cache_capital_letters(tmp_path: Path): cache_dir = tmp_path / "test_cache_capital_letters" repo_id = SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME dataset_name = repo_id.split("/")[-1] ds = load_dataset(repo_id, cache_dir=str(cache_dir)) cache = Cache(cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto") reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"]) cache = Cache( cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto", ) reloaded = cache.as_dataset() assert list(ds) == list(reloaded) assert len(ds["train"]) == len(reloaded["train"])
datasets/tests/packaged_modules/test_cache.py/0
{ "file_path": "datasets/tests/packaged_modules/test_cache.py", "repo_id": "datasets", "token_count": 2721 }
92
import os import tempfile from unittest import TestCase import numpy as np import pandas as pd import pytest from datasets import load_from_disk from datasets.arrow_dataset import Dataset from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.features import ClassLabel, Features, Sequence, Value from datasets.iterable_dataset import IterableDataset from datasets.splits import NamedSplit from .utils import ( assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_numpy1_on_windows, require_polars, require_tf, require_torch, ) class DatasetDictTest(TestCase): def _create_dummy_dataset(self, multiple_columns=False): if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} dset = Dataset.from_dict(data) else: dset = Dataset.from_dict( {"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]} ) return dset def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict: return DatasetDict( { "train": self._create_dummy_dataset(multiple_columns=multiple_columns), "test": self._create_dummy_dataset(multiple_columns=multiple_columns), } ) def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset: def gen(): if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} for v1, v2 in zip(data["col_1"], data["col_2"]): yield {"col_1": v1, "col_2": v2} else: for x in range(30): yield {"filename": "my_name-train" + "_" + f"{x:03d}"} return IterableDataset.from_generator(gen) def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict: return IterableDatasetDict( { "train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), "test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), } ) def test_flatten(self): dset_split = Dataset.from_dict( {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}), ) dset = DatasetDict({"train": dset_split, "test": dset_split}) dset = dset.flatten() self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]}) self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"]) self.assertDictEqual( dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")}) ) del dset def test_set_format_numpy(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="numpy", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], np.int64) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.reset_format() with dset.formatted_as(type="numpy", columns=["col_1"]): for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], np.int64) self.assertEqual(dset_split[0]["col_1"].item(), 3) for dset_split in dset.values(): self.assertEqual(dset_split.format["type"], None) self.assertEqual(dset_split.format["format_kwargs"], {}) self.assertEqual(dset_split.format["columns"], dset_split.column_names) self.assertEqual(dset_split.format["output_all_columns"], False) dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="numpy", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], np.str_) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset @require_numpy1_on_windows @require_torch def test_set_format_torch(self): import torch dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="torch", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="torch", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="torch") for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].item(), 3) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") del dset @require_tf def test_set_format_tf(self): import tensorflow as tf dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="tensorflow", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3) dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="tensorflow", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a") del dset def test_set_format_pandas(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="pandas", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 1) self.assertIsInstance(dset_split[0], pd.DataFrame) self.assertListEqual(list(dset_split[0].shape), [1, 1]) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="pandas", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 2) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset @require_polars def test_set_format_polars(self): import polars as pl dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="polars", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 1) self.assertIsInstance(dset_split[0], pl.DataFrame) self.assertEqual(dset_split[0].shape, (1, 1)) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="polars", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 2) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset def test_set_transform(self): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_transform(transform=transform, columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(dset_split.format["type"], "custom") self.assertEqual(len(dset_split[0].keys()), 1) self.assertEqual(dset_split[0]["col_1"], "3") self.assertEqual(dset_split[:2]["col_1"], ["3", "2"]) self.assertEqual(dset_split["col_1"][:2], ["3", "2"]) prev_format = dset[list(dset.keys())[0]].format for dset_split in dset.values(): dset_split.set_format(**dset_split.format) self.assertEqual(prev_format, dset_split.format) dset.set_transform(transform=transform, columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].keys()), 2) self.assertEqual(dset_split[0]["col_2"], "A") del dset def test_with_format(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset2 = dset.with_format("numpy", columns=["col_1"]) dset.set_format("numpy", columns=["col_1"]) for dset_split, dset_split2 in zip(dset.values(), dset2.values()): self.assertDictEqual(dset_split.format, dset_split2.format) del dset, dset2 def test_with_transform(self): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} dset = self._create_dummy_dataset_dict(multiple_columns=True) dset2 = dset.with_transform(transform, columns=["col_1"]) dset.set_transform(transform, columns=["col_1"]) for dset_split, dset_split2 in zip(dset.values(), dset2.values()): self.assertDictEqual(dset_split.format, dset_split2.format) del dset, dset2 def test_cast(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) features = dset["train"].features features["col_1"] = Value("float64") dset = dset.cast(features) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) self.assertEqual(dset_split.features["col_1"], Value("float64")) self.assertIsInstance(dset_split[0]["col_1"], float) del dset def test_remove_columns(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.remove_columns(column_names="col_1") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_2"]) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.remove_columns(column_names=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 0) dset = self._create_dummy_dataset_dict(multiple_columns=True) for dset_split in dset.values(): dset_split._format_columns = ["col_1", "col_2"] dset = dset.remove_columns(column_names=["col_1"]) for dset_split in dset.values(): self.assertListEqual(dset_split._format_columns, ["col_2"]) self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_2"]) del dset def test_rename_column(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"]) del dset def test_select_columns(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names=[]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 0) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names="col_1") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_1"]) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) dset = self._create_dummy_dataset_dict(multiple_columns=True) for dset_split in dset.values(): dset_split._format_columns = ["col_1", "col_2"] dset = dset.select_columns(column_names=["col_1"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_1"]) self.assertListEqual(dset_split._format_columns, ["col_1"]) def test_map(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True) self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys())) self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"]) cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } mapped_dsets_2: DatasetDict = mapped_dsets_1.map( lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names ) self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys())) self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"])) del dsets, mapped_dsets_1, mapped_dsets_2 def test_iterable_map(self): dsets = self._create_dummy_iterable_dataset_dict() fn_kwargs = {"n": 3} mapped_dsets: IterableDatasetDict = dsets.map( lambda x, n: {"foo": [n] * len(x["filename"])}, batched=True, fn_kwargs=fn_kwargs, ) mapped_example = next(iter(mapped_dsets["train"])) self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"])) self.assertLessEqual(mapped_example["foo"], 3) del dsets, mapped_dsets def test_filter(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys())) self.assertEqual(len(filtered_dsets_1["train"]), 10) cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } filtered_dsets_2: DatasetDict = filtered_dsets_1.filter( lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names ) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys())) self.assertEqual(len(filtered_dsets_2["train"]), 5) filtered_dsets_3: DatasetDict = dsets.filter( lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True ) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys())) self.assertEqual(len(filtered_dsets_3["train"]), 10) del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3 def test_iterable_filter(self): dsets = self._create_dummy_iterable_dataset_dict() example = next(iter(dsets["train"])) fn_kwargs = {"n": 3} filtered_dsets: IterableDatasetDict = dsets.filter( lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs ) filtered_example = next(iter(filtered_dsets["train"])) self.assertListEqual(list(example.keys()), list(filtered_example.keys())) self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3 del dsets, filtered_dsets def test_sort(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() sorted_dsets_1: DatasetDict = dsets.sort("filename") self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys())) self.assertListEqual( [f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]], sorted(f"{x:03d}" for x in range(30)), ) indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } sorted_dsets_2: DatasetDict = sorted_dsets_1.sort( "filename", indices_cache_file_names=indices_cache_file_names, reverse=True ) self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys())) self.assertListEqual( [f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]], sorted((f"{x:03d}" for x in range(30)), reverse=True), ) del dsets, sorted_dsets_1, sorted_dsets_2 def test_shuffle(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } seeds = { "train": 1234, "test": 1234, } dsets_shuffled = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False ) self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"]) self.assertEqual(len(dsets_shuffled["train"]), 30) self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028") self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010") self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")})) self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")})) # Reproducibility indices_cache_file_names_2 = { "train": os.path.join(tmp_dir, "train_2.arrow"), "test": os.path.join(tmp_dir, "test_2.arrow"), } dsets_shuffled_2 = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False ) self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"]) seeds = { "train": 1234, "test": 1, } indices_cache_file_names_3 = { "train": os.path.join(tmp_dir, "train_3.arrow"), "test": os.path.join(tmp_dir, "test_3.arrow"), } dsets_shuffled_3 = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False ) self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"]) # other input types dsets_shuffled_int = dsets.shuffle(42) dsets_shuffled_alias = dsets.shuffle(seed=42) dsets_shuffled_none = dsets.shuffle() self.assertEqual(len(dsets_shuffled_int["train"]), 30) self.assertEqual(len(dsets_shuffled_alias["train"]), 30) self.assertEqual(len(dsets_shuffled_none["train"]), 30) del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3 del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none def test_flatten_indices(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } dsets_shuffled = dsets.shuffle( seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False ) self.assertIsNotNone(dsets_shuffled["train"]._indices) self.assertIsNotNone(dsets_shuffled["test"]._indices) dsets_flat = dsets_shuffled.flatten_indices() self.assertIsNone(dsets_flat["train"]._indices) self.assertIsNone(dsets_flat["test"]._indices) del dsets, dsets_shuffled, dsets_flat def test_check_values_type(self): dsets = self._create_dummy_dataset_dict() dsets["bad_split"] = None self.assertRaises(TypeError, dsets.map, lambda x: x) self.assertRaises(TypeError, dsets.filter, lambda x: True) self.assertRaises(TypeError, dsets.shuffle) self.assertRaises(TypeError, dsets.sort, "filename") del dsets def test_serialization(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) del reloaded_dsets del dsets["test"] dsets.save_to_disk(tmp_dir) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) del dsets, reloaded_dsets dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2}) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["train"].cache_files), 3) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) del reloaded_dsets dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir, num_proc=2) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["train"].cache_files), 2) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) del reloaded_dsets def test_load_from_disk(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir) del dsets dsets = load_from_disk(tmp_dir) self.assertListEqual(sorted(dsets), ["test", "train"]) self.assertEqual(len(dsets["train"]), 30) self.assertListEqual(dsets["train"].column_names, ["filename"]) self.assertEqual(len(dsets["test"]), 30) self.assertListEqual(dsets["test"].column_names, ["filename"]) del dsets def test_align_labels_with_mapping(self): train_features = Features( { "input_text": Value("string"), "input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), } ) test_features = Features( { "input_text": Value("string"), "input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]), } ) train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]} test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]} label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1} id2label = {v: k for k, v in label2id.items()} train_expected_labels = [2, 2, 1, 1, 0, 0] test_expected_labels = [2, 2, 0, 0, 1, 1] train_expected_label_names = [id2label[idx] for idx in train_expected_labels] test_expected_label_names = [id2label[idx] for idx in test_expected_labels] dsets = DatasetDict( { "train": Dataset.from_dict(train_data, features=train_features), "test": Dataset.from_dict(test_data, features=test_features), } ) dsets = dsets.align_labels_with_mapping(label2id, "input_labels") self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"]) self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"]) train_aligned_label_names = [ dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"] ] test_aligned_label_names = [ dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"] ] self.assertListEqual(train_expected_label_names, train_aligned_label_names) self.assertListEqual(test_expected_label_names, test_aligned_label_names) def test_dummy_datasetdict_serialize_fs(mockfs): dataset_dict = DatasetDict( { "train": Dataset.from_dict({"a": range(30)}), "test": Dataset.from_dict({"a": range(10)}), } ) dataset_path = "mock://my_dataset" dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options) assert mockfs.isdir(dataset_path) assert mockfs.glob(dataset_path + "/*") reloaded = DatasetDict.load_from_disk(dataset_path, storage_options=mockfs.storage_options) assert list(reloaded) == list(dataset_dict) for k in dataset_dict: assert reloaded[k].features == dataset_dict[k].features assert reloaded[k].to_dict() == dataset_dict[k].to_dict() def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_csv_features(features, csv_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir) _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_csv_split(split, csv_path, tmp_path): if split: path = {split: csv_path} else: split = "train" path = {"train": csv_path, "test": csv_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_csv(path, cache_dir=cache_dir) _check_csv_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir) _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): if split: path = {split: jsonl_path} else: split = "train" path = {"train": jsonl_path, "test": jsonl_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_json(path, cache_dir=cache_dir) _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir) _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path): if split: path = {split: parquet_path} else: split = "train" path = {"train": parquet_path, "test": parquet_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir) _check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ], ) def test_datasetdict_from_text_features(features, text_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"text": "string"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir) _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_text_split(split, text_path, tmp_path): if split: path = {split: text_path} else: split = "train" path = {"train": text_path, "test": text_path} cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = DatasetDict.from_text(path, cache_dir=cache_dir) _check_text_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys())
datasets/tests/test_dataset_dict.py/0
{ "file_path": "datasets/tests/test_dataset_dict.py", "repo_id": "datasets", "token_count": 17837 }
93
import pickle from copy import deepcopy from itertools import chain, cycle, islice import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pytest from datasets import Dataset, load_dataset from datasets.combine import concatenate_datasets, interleave_datasets from datasets.distributed import split_dataset_by_node from datasets.features import ( ClassLabel, Features, Image, Value, ) from datasets.formatting import get_format_type_from_alias from datasets.info import DatasetInfo from datasets.iterable_dataset import ( ArrowExamplesIterable, BufferShuffledExamplesIterable, CyclingMultiSourcesExamplesIterable, ExamplesIterable, FilteredExamplesIterable, FormattingConfig, HorizontallyConcatenatedMultiSourcesExamplesIterable, IterableDataset, MappedExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable, RebatchedArrowExamplesIterable, SelectColumnsIterable, ShuffledDataSourcesArrowExamplesIterable, ShuffledDataSourcesExamplesIterable, ShufflingConfig, SkipExamplesIterable, StepExamplesIterable, TakeExamplesIterable, TypedExamplesIterable, VerticallyConcatenatedMultiSourcesExamplesIterable, _BaseExamplesIterable, _batch_to_examples, _convert_to_arrow, _examples_to_batch, ) from .utils import ( assert_arrow_memory_doesnt_increase, is_rng_equal, require_dill_gt_0_3_2, require_not_windows, require_numpy1_on_windows, require_pyspark, require_tf, require_torch, require_torchdata_stateful_dataloader, ) DEFAULT_N_EXAMPLES = 20 DEFAULT_BATCH_SIZE = 4 DEFAULT_FILEPATH = "file.txt" SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script def generate_examples_fn(**kwargs): kwargs = kwargs.copy() n = kwargs.pop("n", DEFAULT_N_EXAMPLES) filepaths = kwargs.pop("filepaths", None) for filepath in filepaths or [DEFAULT_FILEPATH]: if filepaths is not None: kwargs["filepath"] = filepath for i in range(n): yield f"{filepath}_{i}", {"id": i, **kwargs} def generate_tables_fn(**kwargs): kwargs = kwargs.copy() n = kwargs.pop("n", DEFAULT_N_EXAMPLES) batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE) filepaths = kwargs.pop("filepaths", None) for filepath in filepaths or [DEFAULT_FILEPATH]: buffer = [] batch_idx = 0 if filepaths is not None: kwargs["filepath"] = filepath for i in range(n): buffer.append({"id": i, **kwargs}) if len(buffer) == batch_size: yield f"{filepath}_{batch_idx}", pa.Table.from_pylist(buffer) buffer = [] batch_idx += 1 yield batch_idx, pa.Table.from_pylist(buffer) @pytest.fixture def dataset(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train") @pytest.fixture def dataset_with_several_columns(): ex_iterable = ExamplesIterable( generate_examples_fn, {"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}}, ) return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train") @pytest.fixture def arrow_file(tmp_path_factory, dataset: IterableDataset): filename = str(tmp_path_factory.mktemp("data") / "file.arrow") Dataset.from_generator(dataset.__iter__).map(cache_file_name=filename) return filename def assert_load_state_dict_resumes_iteration(ex_iterable: _BaseExamplesIterable): ex_iterable._init_state_dict() state_dicts = [ex_iterable.state_dict()] examples = [] for _, example in ex_iterable: state_dicts.append(ex_iterable.state_dict()) examples.append(example) for i, state_dict in enumerate(state_dicts): ex_iterable.load_state_dict(state_dict) examples_after_resuming = [example for _, example in ex_iterable] assert examples_after_resuming == examples[i:], f"resuming from idx {i} with {state_dict=}" def assert_load_state_dict_resumes_arrow_iteration(ex_iterable: _BaseExamplesIterable): assert ex_iterable.iter_arrow is not None ex_iterable._init_state_dict() state_dicts = [ex_iterable.state_dict()] examples = [] indices = [0] for _, pa_table in ex_iterable.iter_arrow(): state_dicts.append(ex_iterable.state_dict()) examples.extend(pa_table.to_pylist()) indices.append(indices[-1] + len(pa_table)) for i, state_dict in zip(indices, state_dicts): ex_iterable.load_state_dict(state_dict) examples_after_resuming = [ example for _, pa_table in ex_iterable.iter_arrow() for example in pa_table.to_pylist() ] assert examples_after_resuming == examples[i:], f"resuming from idx {i} with {state_dict=}" ################################ # # Utilities tests # ################################ @pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_convert_to_arrow(batch_size, drop_last_batch): examples = [{"foo": i} for i in range(10)] full_table = pa.Table.from_pylist(examples) num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size subtables = list( _convert_to_arrow( list(enumerate(examples)), batch_size=batch_size, drop_last_batch=drop_last_batch, ) ) assert len(subtables) == num_batches if drop_last_batch: assert all(len(subtable) == batch_size for _, subtable in subtables) else: assert all(len(subtable) == batch_size for _, subtable in subtables[:-1]) assert len(subtables[-1][1]) <= batch_size if num_rows > 0: reloaded = pa.concat_tables([subtable for _, subtable in subtables]) assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() ################################ # # _BaseExampleIterable tests # ################################ def test_examples_iterable(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) expected = list(generate_examples_fn()) assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected assert ex_iterable.iter_arrow is None assert_load_state_dict_resumes_iteration(ex_iterable) def test_examples_iterable_with_kwargs(): ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"}) expected = list(generate_examples_fn(filepaths=["0.txt", "1.txt"], split="train")) assert list(ex_iterable) == expected assert all("split" in ex for _, ex in ex_iterable) assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"] assert_load_state_dict_resumes_iteration(ex_iterable) def test_examples_iterable_shuffle_data_sources(): ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]}) ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40)) expected = list(generate_examples_fn(filepaths=["1.txt", "0.txt"])) # shuffle the filepaths assert list(ex_iterable) == expected assert_load_state_dict_resumes_iteration(ex_iterable) def test_examples_iterable_shuffle_shards_and_metadata(): def gen(filepaths, all_metadata): for i, (filepath, metadata) in enumerate(zip(filepaths, all_metadata)): yield i, {"filepath": filepath, "metadata": metadata} ex_iterable = ExamplesIterable( gen, { "filepaths": [f"{i}.txt" for i in range(100)], "all_metadata": [{"id": str(i)} for i in range(100)], }, ) ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(42)) out = list(ex_iterable) filepaths_ids = [x["filepath"].split(".")[0] for _, x in out] metadata_ids = [x["metadata"]["id"] for _, x in out] assert filepaths_ids == metadata_ids, "entangled lists of shards/metadata should be shuffled the same way" assert_load_state_dict_resumes_iteration(ex_iterable) def test_arrow_examples_iterable(): ex_iterable = ArrowExamplesIterable(generate_tables_fn, {}) expected = sum([pa_table.to_pylist() for _, pa_table in generate_tables_fn()], []) assert next(iter(ex_iterable))[1] == expected[0] assert [example for _, example in ex_iterable] == expected expected = list(generate_tables_fn()) assert list(ex_iterable.iter_arrow()) == expected assert_load_state_dict_resumes_iteration(ex_iterable) def test_arrow_examples_iterable_with_kwargs(): ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"}) expected = sum( [pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")], [] ) assert [example for _, example in ex_iterable] == expected assert all("split" in ex for _, ex in ex_iterable) assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"] expected = list(generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")) assert list(ex_iterable.iter_arrow()) == expected assert_load_state_dict_resumes_iteration(ex_iterable) def test_arrow_examples_iterable_shuffle_data_sources(): ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"]}) ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40)) expected = sum( [pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["1.txt", "0.txt"])], [] ) # shuffle the filepaths assert [example for _, example in ex_iterable] == expected expected = list(generate_tables_fn(filepaths=["1.txt", "0.txt"])) assert list(ex_iterable.iter_arrow()) == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "tables", [ [pa.table({"foo": range(10)})], [pa.table({"foo": range(5 * i, 5 * (i + 1))}) for i in range(2)], [pa.table({"foo": range(5 * i, 5 * (i + 1))}) for i in range(7)], [pa.table({"foo": [i]}) for i in range(10)], ], ) @pytest.mark.parametrize("batch_size", [1, 2, 3, 7, 9, 10, 11, 13, 20]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_rebatched_arrow_examples_iterable(tables, batch_size, drop_last_batch): full_table = pa.concat_tables(tables) num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size def gen(tables): for i, table in enumerate(tables): yield str(i), table ex_iterable = ArrowExamplesIterable(gen, {"tables": tables}) ex_iterable = RebatchedArrowExamplesIterable(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) subtables = list(ex_iterable.iter_arrow()) assert len(subtables) == num_batches if drop_last_batch: assert all(len(subtable) == batch_size for _, subtable in subtables) else: assert all(len(subtable) == batch_size for _, subtable in subtables[:-1]) assert len(subtables[-1][1]) <= batch_size if num_rows > 0: reloaded = pa.concat_tables([subtable for _, subtable in subtables]) assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() assert_load_state_dict_resumes_iteration(ex_iterable) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) @pytest.mark.parametrize("seed", [42, 1337, 101010, 123456]) def test_buffer_shuffled_examples_iterable(seed): n, buffer_size = 100, 30 generator = np.random.default_rng(seed) base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = BufferShuffledExamplesIterable(base_ex_iterable, buffer_size=buffer_size, generator=generator) rng = deepcopy(generator) expected_indices_used_for_shuffling = list( islice(BufferShuffledExamplesIterable._iter_random_indices(rng, buffer_size=buffer_size), n - buffer_size) ) # indices to pick in the shuffle buffer should all be in the right range assert all(0 <= index_to_pick < buffer_size for index_to_pick in expected_indices_used_for_shuffling) # it should be random indices assert expected_indices_used_for_shuffling != list(range(buffer_size)) # The final order of examples is the result of a shuffle buffer. all_examples = list(generate_examples_fn(n=n)) # We create a buffer and we pick random examples from it. buffer, rest = all_examples[:buffer_size], all_examples[buffer_size:] expected = [] for i, index_to_pick in enumerate(expected_indices_used_for_shuffling): expected.append(buffer[index_to_pick]) # The picked examples are directly replaced by the next examples from the iterable. buffer[index_to_pick] = rest.pop(0) # Once we have reached the end of the iterable, we shuffle the buffer and return the remaining examples. rng.shuffle(buffer) expected += buffer assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected assert sorted(ex_iterable) == sorted(all_examples) def test_cycling_multi_sources_examples_iterable(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"}) ex_iterable = CyclingMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = list(chain(*zip(generate_examples_fn(text="foo"), generate_examples_fn(text="bar")))) # The cycling stops as soon as one iterable is out of examples (here ex_iterable1), so the last sample from ex_iterable2 is unecessary expected = expected[:-1] assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected assert all((x["id"], x["text"]) == (i // 2, "bar" if i % 2 else "foo") for i, (_, x) in enumerate(ex_iterable)) assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)]) def test_randomly_cycling_multi_sources_examples_iterable(probabilities): seed = 42 generator = np.random.default_rng(seed) ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"}) ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable( [ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities ) # The source used randomly changes at each example. It stops when one of the iterators is empty. rng = deepcopy(generator) iterators = (generate_examples_fn(text="foo"), generate_examples_fn(text="bar")) indices_iterator = cycle(rng.choice(len(iterators), size=1000, p=probabilities)) expected = [] lengths = [len(list(ex_iterable1)), len(list(ex_iterable2))] for i in indices_iterator: if lengths[0] == 0 or lengths[1] == 0: break for key, example in iterators[i]: expected.append((key, example)) lengths[i] -= 1 break else: break assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)]) @pytest.mark.parametrize("stopping_strategy", ["first_exhausted", "all_exhausted"]) @pytest.mark.parametrize("step", [-1, 0, 5, 20, 30, 300]) def test_randomly_cycling_multi_sources_examples_iterable_state(probabilities, stopping_strategy, step): seed = 42 generator = np.random.default_rng(seed) ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"}) ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable( [ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy, ) step = min(step, len(list(ex_iterable)) - 1) ex_iterable._init_state_dict() state_dict = ex_iterable.state_dict() examples = [] for i, x in enumerate(ex_iterable): examples.append(x) if i == step: state_dict = ex_iterable.state_dict() ex_iterable.load_state_dict(state_dict) assert examples[step + 1 :] == list(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id (3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0 (3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [{**x, **func(x)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(batch) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id (3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0 (3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable_drop_last_batch(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True ) all_examples = [x for _, x in generate_examples_fn(n=n)] is_empty = False if batched is False: # `drop_last_batch` has no effect here expected = [{**x, **func(x)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] if len(examples) < batch_size: # ignore last batch break batch = _examples_to_batch(examples) transformed_batch = func(batch) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size] if all_examples: expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) else: is_empty = True if not is_empty: assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected else: with pytest.raises(StopIteration): next(iter(ex_iterable)) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x, index: {"id+idx": x["id"] + index}, False, None), # add the index to the id ( 25, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, 10, ), # add the index to the id (5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, None), # same with bs=None (5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, -1), # same with bs<=0 ], ) def test_mapped_examples_iterable_with_indices(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [{**x, **func(x, idx)} for idx, x in enumerate(all_examples)] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) indices = list(range(batch_offset, batch_offset + len(examples))) transformed_batch = func(batch, indices) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size, remove_columns", [ (3, lambda x: {"id+1": x["id"] + 1}, False, None, ["extra_column"]), # just add 1 to the id (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10, ["extra_column"]), # same with bs=10 ( 50, lambda x: {"foo": ["bar"] * np.random.default_rng(x["id"][0]).integers(0, 10)}, True, 8, ["extra_column", "id"], ), # make a duplicate of each example (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None, ["extra_column"]), # same with bs=None (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1, ["extra_column"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_remove_columns(n, func, batched, batch_size, remove_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns] if batched is False: expected = [{**{k: v for k, v in x.items() if k not in columns_to_remove}, **func(x)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(batch) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = {k: v for k, v in _examples_to_batch(all_examples).items() if k not in columns_to_remove} expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size, fn_kwargs", [ (3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, None), (3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, {"y": 3}), (25, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, 10, {"y": 3}), (5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, None, {"y": 3}), # same with bs=None (5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, -1, {"y": 3}), # same with bs<=0 ], ) def test_mapped_examples_iterable_fn_kwargs(n, func, batched, batch_size, fn_kwargs): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs ) all_examples = [x for _, x in generate_examples_fn(n=n)] if fn_kwargs is None: fn_kwargs = {} if batched is False: expected = [{**x, **func(x, **fn_kwargs)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(batch, **fn_kwargs) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size, input_columns", [ (3, lambda id_: {"id+1": id_ + 1}, False, None, ["id"]), # just add 1 to the id (25, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, 10, ["id"]), # same with bs=10 (5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, None, ["id"]), # same with bs=None (5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, -1, ["id"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_input_columns(n, func, batched, batch_size, input_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] if batched is False: expected = [{**x, **func(*[x[col] for col in columns_to_input])} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(*[batch[col] for col in columns_to_input]) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0 (3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable_arrow_format(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(batch).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0 (3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable_arrow_format_from_arrow_examples_iterable(n, func, batched, batch_size): base_ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"n": n}) base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(batch).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0 (3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable_drop_last_batch_and_arrow_format(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] is_empty = False if batched is False: # `drop_last_batch` has no effect here expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples] else: all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] if len(examples) < batch_size: # ignore last batch break batch = pa.Table.from_pylist(examples) out = func(batch) all_transformed_examples.extend( out.to_pylist() ) # we don't merge with input since they're arrow tables and not dictionaries all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size] if all_examples: expected = all_transformed_examples else: is_empty = True if not is_empty: assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected else: with pytest.raises(StopIteration): next(iter(ex_iterable)) @pytest.mark.parametrize( "n, func, batched, batch_size", [ ( 3, lambda t, index: t.append_column("id+idx", pc.add(t["id"], index)), False, None, ), # add the index to the id ( 25, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, 10, ), # add the index to the id (5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, None), # same with bs=None (5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, -1), # same with bs<=0 ], ) def test_mapped_examples_iterable_with_indices_and_arrow_format(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [func(pa.Table.from_pylist([x]), i).to_pylist()[0] for i, x in enumerate(all_examples)] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(batch, list(range(batch_offset, batch_offset + len(batch)))).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size, remove_columns", [ ( 3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None, ["extra_column"], ), # just add 1 to the id (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10, ["extra_column"]), # same with bs=10 ( 50, lambda t: pa.table({"foo": ["bar"] * np.random.default_rng(t["id"][0].as_py()).integers(0, 10)}), True, 8, ["extra_column", "id"], ), # make a duplicate of each example (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None, ["extra_column"]), # same with bs=None (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1, ["extra_column"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_remove_columns_arrow_format(n, func, batched, batch_size, remove_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"}) base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns] if batched is False: expected = [ {**{k: v for k, v in func(pa.Table.from_pylist([x])).to_pylist()[0].items() if k not in columns_to_remove}} for x in all_examples ] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend( [{k: v for k, v in x.items() if k not in columns_to_remove} for x in func(batch).to_pylist()] ) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size, fn_kwargs", [ (3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, None), (3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, {"y": 3}), (25, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, 10, {"y": 3}), (5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, None, {"y": 3}), # same with bs=None (5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, -1, {"y": 3}), # same with bs<=0 ], ) def test_mapped_examples_iterable_fn_kwargs_and_arrow_format(n, func, batched, batch_size, fn_kwargs): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] if fn_kwargs is None: fn_kwargs = {} if batched is False: expected = [func(pa.Table.from_pylist([x]), **fn_kwargs).to_pylist()[0] for x in all_examples] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(batch, **fn_kwargs).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size, input_columns", [ (3, lambda id_: pa.table({"id+1": pc.add(id_, 1)}), False, None, ["id"]), # just add 1 to the id (25, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, 10, ["id"]), # same with bs=10 (5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, None, ["id"]), # same with bs=None (5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, -1, ["id"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_input_columns_and_arrow_format(n, func, batched, batch_size, input_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] if batched is False: expected = [ func(*[pa.Table.from_pylist([x])[col] for col in columns_to_input]).to_pylist()[0] for x in all_examples ] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(*[batch[col] for col in columns_to_input]).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x: x["id"] % 2 == 0, False, None), # keep even number (3, lambda x: [x["id"][0] % 2 == 0], True, 1), # same with bs=1 (25, lambda x: [i % 2 == 0 for i in x["id"]], True, 10), # same with bs=10 (5, lambda x: [i % 2 == 0 for i in x["id"]], True, None), # same with bs=None (5, lambda x: [i % 2 == 0 for i in x["id"]], True, -1), # same with bs<=0 (3, lambda x: False, False, None), # return 0 examples (3, lambda x: [False] * len(x["id"]), True, 10), # same with bs=10 ], ) def test_filtered_examples_iterable(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = FilteredExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [x for x in all_examples if func(x)] else: # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) mask = func(batch) expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) if expected: assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x, index: index % 2 == 0, False, None), # keep even number (25, lambda x, indices: [idx % 2 == 0 for idx in indices], True, 10), # same with bs=10 (5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, None), # same with bs=None (5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, -1), # same with bs<=0 ], ) def test_filtered_examples_iterable_with_indices(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = FilteredExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [x for idx, x in enumerate(all_examples) if func(x, idx)] else: # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) indices = list(range(batch_offset, batch_offset + len(examples))) mask = func(batch, indices) expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "n, func, batched, batch_size, input_columns", [ (3, lambda id_: id_ % 2 == 0, False, None, ["id"]), # keep even number (25, lambda ids_: [i % 2 == 0 for i in ids_], True, 10, ["id"]), # same with bs=10 (3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None (3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None ], ) def test_filtered_examples_iterable_input_columns(n, func, batched, batch_size, input_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = FilteredExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] if batched is False: expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])] else: # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) mask = func(*[batch[col] for col in columns_to_input]) expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected assert_load_state_dict_resumes_iteration(ex_iterable) def test_skip_examples_iterable(): total, count = 10, 2 base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total}) skip_ex_iterable = SkipExamplesIterable(base_ex_iterable, n=count) expected = list(generate_examples_fn(n=total))[count:] assert list(skip_ex_iterable) == expected assert ( skip_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is skip_ex_iterable ), "skip examples makes the shards order fixed" assert_load_state_dict_resumes_iteration(skip_ex_iterable) def test_take_examples_iterable(): total, count = 10, 2 base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total}) take_ex_iterable = TakeExamplesIterable(base_ex_iterable, n=count) expected = list(generate_examples_fn(n=total))[:count] assert list(take_ex_iterable) == expected assert ( take_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is take_ex_iterable ), "skip examples makes the shards order fixed" assert_load_state_dict_resumes_iteration(take_ex_iterable) def test_vertically_concatenated_examples_iterable(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2] assert [x for _, x in concatenated_ex_iterable] == expected assert_load_state_dict_resumes_iteration(concatenated_ex_iterable) def test_vertically_concatenated_examples_iterable_with_different_columns(): # having different columns is supported # Though iterable datasets fill the missing data with nulls ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {}) concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2] assert [x for _, x in concatenated_ex_iterable] == expected assert_load_state_dict_resumes_iteration(concatenated_ex_iterable) def test_vertically_concatenated_examples_iterable_shuffle_data_sources(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) rng = np.random.default_rng(42) shuffled_ex_iterable = concatenated_ex_iterable.shuffle_data_sources(rng) # make sure the list of examples iterables is shuffled, and each examples iterable is shuffled expected = [x for _, x in ex_iterable2.shuffle_data_sources(rng)] + [ x for _, x in ex_iterable1.shuffle_data_sources(rng) ] assert [x for _, x in shuffled_ex_iterable] == expected assert_load_state_dict_resumes_iteration(shuffled_ex_iterable) def test_horizontally_concatenated_examples_iterable(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) with pytest.raises(ValueError): # column "id" is duplicated -> raise an error list(concatenated_ex_iterable) ex_iterable2 = MappedExamplesIterable(ex_iterable2, lambda x: x, remove_columns=["id"]) concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = [{**x, **y} for (_, x), (_, y) in zip(ex_iterable1, ex_iterable2)] assert [x for _, x in concatenated_ex_iterable] == expected assert ( concatenated_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is concatenated_ex_iterable ), "horizontally concatenated examples makes the shards order fixed" assert_load_state_dict_resumes_iteration(concatenated_ex_iterable) @pytest.mark.parametrize( "ex_iterable", [ ExamplesIterable(generate_examples_fn, {}), ShuffledDataSourcesExamplesIterable(generate_examples_fn, {}, np.random.default_rng(42)), SelectColumnsIterable(ExamplesIterable(generate_examples_fn, {}), ["id"]), StepExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 2, 0), CyclingMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), VerticallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), HorizontallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), RandomlyCyclingMultiSourcesExamplesIterable( [ExamplesIterable(generate_examples_fn, {})], np.random.default_rng(42) ), MappedExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: x), MappedExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: x), FilteredExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: True), FilteredExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: True), BufferShuffledExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10, np.random.default_rng(42)), SkipExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10), TakeExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10), TypedExamplesIterable( ExamplesIterable(generate_examples_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={} ), ], ) def test_no_iter_arrow(ex_iterable: _BaseExamplesIterable): assert ex_iterable.iter_arrow is None if not isinstance(ex_iterable, BufferShuffledExamplesIterable): assert_load_state_dict_resumes_iteration(ex_iterable) @pytest.mark.parametrize( "ex_iterable", [ ArrowExamplesIterable(generate_tables_fn, {}), ShuffledDataSourcesArrowExamplesIterable(generate_tables_fn, {}, np.random.default_rng(42)), SelectColumnsIterable(ArrowExamplesIterable(generate_tables_fn, {}), ["id"]), # StepExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 2, 0), # not implemented # CyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented VerticallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # HorizontallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented # RandomlyCyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})], np.random.default_rng(42)), # not implemented MappedExamplesIterable( RebatchedArrowExamplesIterable(ExamplesIterable(generate_examples_fn, {}), batch_size=1), lambda t: t, formatting=FormattingConfig(format_type="arrow"), ), MappedExamplesIterable( RebatchedArrowExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), batch_size=1), lambda t: t, formatting=FormattingConfig(format_type="arrow"), ), FilteredExamplesIterable( RebatchedArrowExamplesIterable(ExamplesIterable(generate_examples_fn, {}), batch_size=1), lambda t: True, formatting=FormattingConfig(format_type="arrow"), ), FilteredExamplesIterable( RebatchedArrowExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), batch_size=1), lambda t: True, formatting=FormattingConfig(format_type="arrow"), ), # BufferShuffledExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10, np.random.default_rng(42)), # not implemented # SkipExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented # TakeExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented TypedExamplesIterable( ArrowExamplesIterable(generate_tables_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={} ), ], ) def test_iter_arrow(ex_iterable: _BaseExamplesIterable): assert ex_iterable.iter_arrow is not None key, pa_table = next(ex_iterable.iter_arrow()) assert isinstance(pa_table, pa.Table) assert_load_state_dict_resumes_arrow_iteration(ex_iterable) ############################ # # IterableDataset tests # ############################ def test_iterable_dataset(): dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {})) expected = [x for _, x in generate_examples_fn()] assert next(iter(dataset)) == expected[0] assert list(dataset) == expected def test_iterable_dataset_from_generator(): data = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] def gen(): yield from data dataset = IterableDataset.from_generator(gen) assert isinstance(dataset, IterableDataset) assert list(dataset) == data def test_iterable_dataset_from_generator_with_shards(): def gen(shard_names): for shard_name in shard_names: for i in range(10): yield {"shard_name": shard_name, "i": i} shard_names = [f"data{shard_idx}.txt" for shard_idx in range(4)] dataset = IterableDataset.from_generator(gen, gen_kwargs={"shard_names": shard_names}) assert isinstance(dataset, IterableDataset) assert dataset.n_shards == len(shard_names) @require_numpy1_on_windows def test_iterable_dataset_from_file(dataset: IterableDataset, arrow_file: str): with assert_arrow_memory_doesnt_increase(): dataset_from_file = IterableDataset.from_file(arrow_file) expected_features = dataset._resolve_features().features assert dataset_from_file.features.type == expected_features.type assert dataset_from_file.features == expected_features assert isinstance(dataset_from_file, IterableDataset) assert list(dataset_from_file) == list(dataset) @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_streaming(): import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [ ("0", 0, 0.0), ("1", 1, 1.0), ("2", 2, 2.0), ("3", 3, 3.0), ] df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float") dataset = IterableDataset.from_spark(df) assert isinstance(dataset, IterableDataset) results = [] for ex in dataset: results.append(ex) assert results == [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_streaming_features(): import PIL.Image import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())] df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>") features = Features({"idx": Value("int64"), "image": Image()}) dataset = IterableDataset.from_spark( df, features=features, ) assert isinstance(dataset, IterableDataset) results = [] for ex in dataset: results.append(ex) assert len(results) == 1 isinstance(results[0]["image"], PIL.Image.Image) @require_torch def test_iterable_dataset_torch_integration(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) import torch.utils.data assert isinstance(dataset, torch.utils.data.IterableDataset) assert isinstance(dataset, IterableDataset) @require_torch def test_iterable_dataset_torch_picklable(): import pickle ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable, formatting=FormattingConfig(format_type="torch")) reloaded_dataset = pickle.loads(pickle.dumps(dataset)) import torch.utils.data assert isinstance(reloaded_dataset, IterableDataset) assert isinstance(reloaded_dataset, torch.utils.data.IterableDataset) assert reloaded_dataset._formatting.format_type == "torch" assert len(list(dataset)) == len(list(reloaded_dataset)) @require_torch def test_iterable_dataset_with_format_torch(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) from torch.utils.data import DataLoader dataloader = DataLoader(dataset) assert len(list(dataloader)) == len(list(ex_iterable)) @require_torch def test_iterable_dataset_torch_dataloader_parallel(): from torch.utils.data import DataLoader ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) dataloader = DataLoader(dataset, num_workers=2, batch_size=None) result = list(dataloader) expected = [example for _, example in ex_iterable] assert len(result) == len(expected) assert {str(x) for x in result} == {str(x) for x in expected} @require_torch @pytest.mark.filterwarnings("ignore:This DataLoader will create:UserWarning") @pytest.mark.parametrize("n_shards, num_workers", [(2, 1), (2, 2), (3, 2), (2, 3)]) def test_sharded_iterable_dataset_torch_dataloader_parallel(n_shards, num_workers): from torch.utils.data import DataLoader ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}.txt" for i in range(n_shards)]}) dataset = IterableDataset(ex_iterable) dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers) result = list(dataloader) expected = [example for _, example in ex_iterable] assert len(result) == len(expected) assert {str(x) for x in result} == {str(x) for x in expected} @require_torch @pytest.mark.integration @pytest.mark.parametrize("num_workers", [1, 2]) def test_iterable_dataset_from_hub_torch_dataloader_parallel(num_workers, tmp_path): from torch.utils.data import DataLoader dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=str(tmp_path), streaming=True, split="train") dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers) result = list(dataloader) assert len(result) == 2 @pytest.mark.parametrize("batch_size", [4, 5]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_iterable_dataset_iter_batch(batch_size, drop_last_batch): n = 25 dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {"n": n})) all_examples = [ex for _, ex in generate_examples_fn(n=n)] expected = [] for i in range(0, len(all_examples), batch_size): if len(all_examples[i : i + batch_size]) < batch_size and drop_last_batch: continue expected.append(_examples_to_batch(all_examples[i : i + batch_size])) assert next(iter(dataset.iter(batch_size, drop_last_batch=drop_last_batch))) == expected[0] assert list(dataset.iter(batch_size, drop_last_batch=drop_last_batch)) == expected def test_iterable_dataset_info(): info = DatasetInfo(description="desc", citation="@article{}", size_in_bytes=42) ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable, info=info) assert dataset.info == info assert dataset.description == info.description assert dataset.citation == info.citation assert dataset.size_in_bytes == info.size_in_bytes def test_iterable_dataset_set_epoch(dataset: IterableDataset): assert dataset._epoch == 0 dataset.set_epoch(42) assert dataset._epoch == 42 @pytest.mark.parametrize("seed", [None, 42, 1337]) @pytest.mark.parametrize("epoch", [None, 0, 1, 10]) def test_iterable_dataset_set_epoch_of_shuffled_dataset(dataset: IterableDataset, seed, epoch): buffer_size = 10 shuffled_dataset = dataset.shuffle(seed, buffer_size=buffer_size) base_generator = shuffled_dataset._shuffling.generator if epoch is not None: shuffled_dataset.set_epoch(epoch) effective_generator = shuffled_dataset._effective_generator() assert effective_generator is not None if epoch is None or epoch == 0: assert is_rng_equal(base_generator, shuffled_dataset._effective_generator()) else: assert not is_rng_equal(base_generator, shuffled_dataset._effective_generator()) effective_seed = deepcopy(base_generator).integers(0, 1 << 63) - epoch assert is_rng_equal(np.random.default_rng(effective_seed), shuffled_dataset._effective_generator()) def test_iterable_dataset_map( dataset: IterableDataset, ): func = lambda x: {"id+1": x["id"] + 1} # noqa: E731 mapped_dataset = dataset.map(func) assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable) assert mapped_dataset._ex_iterable.function is func assert mapped_dataset._ex_iterable.batched is False assert next(iter(mapped_dataset)) == {**next(iter(dataset)), **func(next(iter(generate_examples_fn()))[1])} def test_iterable_dataset_map_batched( dataset: IterableDataset, ): func = lambda x: {"id+1": [i + 1 for i in x["id"]]} # noqa: E731 batch_size = 3 dataset = dataset.map(func, batched=True, batch_size=batch_size) assert isinstance(dataset._ex_iterable, MappedExamplesIterable) assert dataset._ex_iterable.function is func assert dataset._ex_iterable.batch_size == batch_size assert next(iter(dataset)) == {"id": 0, "id+1": 1} def test_iterable_dataset_map_complex_features( dataset: IterableDataset, ): # https://github.com/huggingface/datasets/issues/3505 ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"}) features = Features( { "id": Value("int64"), "label": Value("string"), } ) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) dataset = dataset.cast_column("label", ClassLabel(names=["negative", "positive"])) dataset = dataset.map(lambda x: {"id+1": x["id"] + 1, **x}) assert isinstance(dataset._ex_iterable, MappedExamplesIterable) features["label"] = ClassLabel(names=["negative", "positive"]) assert [{k: v for k, v in ex.items() if k != "id+1"} for ex in dataset] == [ features.encode_example(ex) for _, ex in ex_iterable ] def test_iterable_dataset_map_with_features(dataset: IterableDataset) -> None: # https://github.com/huggingface/datasets/issues/3888 ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"}) features_before_map = Features( { "id": Value("int64"), "label": Value("string"), } ) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features_before_map)) assert dataset.info.features is not None assert dataset.info.features == features_before_map features_after_map = Features( { "id": Value("int64"), "label": Value("string"), "target": Value("string"), } ) dataset = dataset.map(lambda x: {"target": x["label"]}, features=features_after_map) assert dataset.info.features is not None assert dataset.info.features == features_after_map def test_iterable_dataset_map_with_fn_kwargs(dataset: IterableDataset) -> None: fn_kwargs = {"y": 1} mapped_dataset = dataset.map(lambda x, y: {"id+y": x["id"] + y}, fn_kwargs=fn_kwargs) assert mapped_dataset._ex_iterable.batched is False assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1} batch_size = 3 mapped_dataset = dataset.map( lambda x, y: {"id+y": [i + y for i in x["id"]]}, batched=True, batch_size=batch_size, fn_kwargs=fn_kwargs ) assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable) assert mapped_dataset._ex_iterable.batch_size == batch_size assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1} def test_iterable_dataset_filter(dataset: IterableDataset) -> None: fn_kwargs = {"y": 1} filtered_dataset = dataset.filter(lambda x, y: x["id"] == y, fn_kwargs=fn_kwargs) assert filtered_dataset._ex_iterable.batched is False assert next(iter(filtered_dataset)) == {"id": 1} @pytest.mark.parametrize("seed", [42, 1337, 101010, 123456]) @pytest.mark.parametrize("epoch", [None, 0, 1]) def test_iterable_dataset_shuffle(dataset: IterableDataset, seed, epoch): buffer_size = 3 dataset = deepcopy(dataset) dataset._ex_iterable.kwargs["filepaths"] = ["0.txt", "1.txt"] dataset = dataset.shuffle(seed, buffer_size=buffer_size) assert isinstance(dataset._shuffling, ShufflingConfig) assert isinstance(dataset._shuffling.generator, np.random.Generator) assert is_rng_equal(dataset._shuffling.generator, np.random.default_rng(seed)) # Effective seed is sum of seed and epoch if epoch is None or epoch == 0: effective_seed = seed else: dataset.set_epoch(epoch) effective_seed = np.random.default_rng(seed).integers(0, 1 << 63) - epoch # Shuffling adds a shuffle buffer expected_first_example_index = next( iter(BufferShuffledExamplesIterable._iter_random_indices(np.random.default_rng(effective_seed), buffer_size)) ) assert isinstance(dataset._ex_iterable, BufferShuffledExamplesIterable) # It also shuffles the underlying examples iterable expected_ex_iterable = ExamplesIterable( generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]} ).shuffle_data_sources(np.random.default_rng(effective_seed)) assert isinstance(dataset._ex_iterable.ex_iterable, ExamplesIterable) assert next(iter(dataset)) == list(islice(expected_ex_iterable, expected_first_example_index + 1))[-1][1] @pytest.mark.parametrize( "features", [ None, Features( { "id": Value("int64"), "label": Value("int64"), } ), Features( { "id": Value("int64"), "label": ClassLabel(names=["negative", "positive"]), } ), ], ) def test_iterable_dataset_features(features): ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0}) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) if features: expected = [features.encode_example(x) for _, x in ex_iterable] else: expected = [x for _, x in ex_iterable] assert list(dataset) == expected def test_iterable_dataset_features_cast_to_python(): ex_iterable = ExamplesIterable( generate_examples_fn, {"timestamp": pd.Timestamp(2020, 1, 1), "array": np.ones(5), "n": 1} ) features = Features( { "id": Value("int64"), "timestamp": Value("timestamp[us]"), "array": [Value("int64")], } ) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) assert list(dataset) == [{"timestamp": pd.Timestamp(2020, 1, 1).to_pydatetime(), "array": [1] * 5, "id": 0}] @pytest.mark.parametrize("format_type", [None, "torch", "python", "tf", "tensorflow", "np", "numpy", "jax"]) def test_iterable_dataset_with_format(dataset: IterableDataset, format_type): formatted_dataset = dataset.with_format(format_type) assert formatted_dataset._formatting.format_type == get_format_type_from_alias(format_type) @require_torch def test_iterable_dataset_is_torch_iterable_dataset(dataset: IterableDataset): from torch.utils.data import DataLoader, _DatasetKind dataloader = DataLoader(dataset) assert dataloader._dataset_kind == _DatasetKind.Iterable out = list(dataloader) assert len(out) == DEFAULT_N_EXAMPLES @require_torch def test_iterable_dataset_persists_epoch_in_torch_workers(dataset: IterableDataset): from torch.utils.data import DataLoader dataset = dataset.shuffle(seed=42) dataloader = DataLoader(dataset, num_workers=1, persistent_workers=True) epoch0 = list(dataloader) assert list(dataloader) == epoch0 dataset.set_epoch(1) assert list(dataloader) != epoch0 # Make sure pickle works even with torch objects in shared memory dataset_copy: IterableDataset = pickle.loads(pickle.dumps(dataset)) dataloader = DataLoader(dataset_copy, num_workers=1, persistent_workers=True) epoch1 = list(dataloader) assert list(dataloader) == epoch1 dataset.set_epoch(2) # this should not affect the copy assert list(dataloader) == epoch1 dataset_copy.set_epoch(2) assert list(dataloader) != epoch1 @pytest.mark.parametrize("n", [0, 2, int(1e10)]) def test_iterable_dataset_skip(dataset: IterableDataset, n): skip_dataset = dataset.skip(n) assert isinstance(skip_dataset._ex_iterable, SkipExamplesIterable) assert skip_dataset._ex_iterable.n == n assert list(skip_dataset) == list(dataset)[n:] @pytest.mark.parametrize("n", [0, 2, int(1e10)]) def test_iterable_dataset_take(dataset: IterableDataset, n): take_dataset = dataset.take(n) assert isinstance(take_dataset._ex_iterable, TakeExamplesIterable) assert take_dataset._ex_iterable.n == n assert list(take_dataset) == list(dataset)[:n] @pytest.mark.parametrize("method", ["skip", "take"]) @pytest.mark.parametrize("after_shuffle", [False, True]) @pytest.mark.parametrize("count", [2, 5, 11]) def test_iterable_dataset_skip_or_take_after_shuffle(method, after_shuffle, count): seed = 42 n, n_shards = 3, 10 ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(n_shards)]}) dataset = IterableDataset(ex_iterable) shuffled_dataset = dataset if after_shuffle: shuffled_dataset = shuffled_dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES) shuffled_dataset = shuffled_dataset.skip(count) if method == "skip" else shuffled_dataset.take(count) # skip/take a shuffled dataset should not keep the same examples and shuffle the shards key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731 assert (len(list(dataset)) - count if method == "skip" else count) == len(list(shuffled_dataset)) assert sorted(list(dataset)[count:] if method == "skip" else list(dataset)[:count], key=key) != sorted( shuffled_dataset, key=key ) else: shuffled_dataset = shuffled_dataset.skip(count) if method == "skip" else shuffled_dataset.take(count) shuffled_dataset = shuffled_dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES) # shuffling a skip/take dataset should keep the same examples and don't shuffle the shards key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731 assert (len(list(dataset)) - count if method == "skip" else count) == len(list(shuffled_dataset)) assert sorted(list(dataset)[count:] if method == "skip" else list(dataset)[:count], key=key) == sorted( shuffled_dataset, key=key ) @pytest.mark.parametrize("method", ["skip", "take"]) @pytest.mark.parametrize("after_split_by_node", [False, True]) @pytest.mark.parametrize("count", [2, 5, 11]) def test_iterable_dataset_skip_or_take_after_split_by_node(method, after_split_by_node, count): n, n_shards = 3, 10 rank, world_size = 1, 2 ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(n_shards)]}) dataset = IterableDataset(ex_iterable) distributed_dataset = dataset true_distributed_dataset = split_dataset_by_node(dataset, rank=rank, world_size=world_size) if after_split_by_node: distributed_dataset = split_dataset_by_node(distributed_dataset, rank=rank, world_size=world_size) distributed_dataset = distributed_dataset.skip(count) if method == "skip" else distributed_dataset.take(count) assert ( list(true_distributed_dataset)[count:] if method == "skip" else list(true_distributed_dataset)[:count] == list(distributed_dataset) ) else: distributed_dataset = distributed_dataset.skip(count) if method == "skip" else distributed_dataset.take(count) distributed_dataset = split_dataset_by_node(distributed_dataset, rank=rank, world_size=world_size) assert len( list(true_distributed_dataset)[count // world_size :] if method == "skip" else list(true_distributed_dataset)[: count // world_size] ) == len(list(distributed_dataset)) def test_iterable_dataset_add_column(dataset_with_several_columns: IterableDataset): new_column = list(range(3 * DEFAULT_N_EXAMPLES)) new_dataset = dataset_with_several_columns.add_column("new_column", new_column) assert list(new_dataset) == [ {**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns) ] new_dataset = new_dataset._resolve_features() assert "new_column" in new_dataset.column_names def test_iterable_dataset_rename_column(dataset_with_several_columns: IterableDataset): new_dataset = dataset_with_several_columns.rename_column("id", "new_id") assert list(new_dataset) == [ {("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns ] assert new_dataset.features is None assert new_dataset.column_names is None # rename the column if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().rename_column("id", "new_id") assert new_dataset.features is not None assert new_dataset.column_names is not None assert "id" not in new_dataset.column_names assert "new_id" in new_dataset.column_names def test_iterable_dataset_rename_columns(dataset_with_several_columns: IterableDataset): column_mapping = {"id": "new_id", "filepath": "filename"} new_dataset = dataset_with_several_columns.rename_columns(column_mapping) assert list(new_dataset) == [ {column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns ] assert new_dataset.features is None assert new_dataset.column_names is None # rename the columns if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().rename_columns(column_mapping) assert new_dataset.features is not None assert new_dataset.column_names is not None assert all(c not in new_dataset.column_names for c in ["id", "filepath"]) assert all(c in new_dataset.column_names for c in ["new_id", "filename"]) def test_iterable_dataset_remove_columns(dataset_with_several_columns: IterableDataset): new_dataset = dataset_with_several_columns.remove_columns("id") assert list(new_dataset) == [ {k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns ] assert new_dataset.features is None new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"]) assert list(new_dataset) == [ {k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns ] assert new_dataset.features is None assert new_dataset.column_names is None # remove the columns if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().remove_columns(["id", "filepath"]) assert new_dataset.features is not None assert new_dataset.column_names is not None assert all(c not in new_dataset.features for c in ["id", "filepath"]) assert all(c not in new_dataset.column_names for c in ["id", "filepath"]) def test_iterable_dataset_select_columns(dataset_with_several_columns: IterableDataset): new_dataset = dataset_with_several_columns.select_columns("id") assert list(new_dataset) == [ {k: v for k, v in example.items() if k == "id"} for example in dataset_with_several_columns ] assert new_dataset.features is None new_dataset = dataset_with_several_columns.select_columns(["id", "filepath"]) assert list(new_dataset) == [ {k: v for k, v in example.items() if k in ("id", "filepath")} for example in dataset_with_several_columns ] assert new_dataset.features is None # select the columns if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().select_columns(["id", "filepath"]) assert new_dataset.features is not None assert new_dataset.column_names is not None assert all(c in new_dataset.features for c in ["id", "filepath"]) assert all(c in new_dataset.column_names for c in ["id", "filepath"]) def test_iterable_dataset_cast_column(): ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10}) features = Features({"id": Value("int64"), "label": Value("int64")}) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) casted_dataset = dataset.cast_column("label", Value("bool")) casted_features = features.copy() casted_features["label"] = Value("bool") assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable] def test_iterable_dataset_cast(): ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10}) features = Features({"id": Value("int64"), "label": Value("int64")}) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) new_features = Features({"id": Value("int64"), "label": Value("bool")}) casted_dataset = dataset.cast(new_features) assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable] def test_iterable_dataset_resolve_features(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) assert dataset.features is None assert dataset.column_names is None dataset = dataset._resolve_features() assert dataset.features == Features( { "id": Value("int64"), } ) assert dataset.column_names == ["id"] def test_iterable_dataset_resolve_features_keep_order(): def gen(): yield from zip(range(3), [{"a": 1}, {"c": 1}, {"b": 1}]) ex_iterable = ExamplesIterable(gen, {}) dataset = IterableDataset(ex_iterable)._resolve_features() # columns appear in order of appearance in the dataset assert list(dataset.features) == ["a", "c", "b"] assert dataset.column_names == ["a", "c", "b"] def test_iterable_dataset_with_features_fill_with_none(): def gen(): yield from zip(range(2), [{"a": 1}, {"b": 1}]) ex_iterable = ExamplesIterable(gen, {}) info = DatasetInfo(features=Features({"a": Value("int32"), "b": Value("int32")})) dataset = IterableDataset(ex_iterable, info=info) assert list(dataset) == [{"a": 1, "b": None}, {"b": 1, "a": None}] def test_concatenate_datasets(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) dataset2 = IterableDataset(ex_iterable2) concatenated_dataset = concatenate_datasets([dataset1, dataset2]) assert list(concatenated_dataset) == list(dataset1) + list(dataset2) def test_concatenate_datasets_resolves_features(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) dataset2 = IterableDataset(ex_iterable2) concatenated_dataset = concatenate_datasets([dataset1, dataset2]) assert concatenated_dataset.features is not None assert sorted(concatenated_dataset.features) == ["id", "label"] def test_concatenate_datasets_with_different_columns(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {}) dataset2 = IterableDataset(ex_iterable2) # missing column "label" -> it should be replaced with nulls extended_dataset2_list = [{"label": None, **x} for x in dataset2] concatenated_dataset = concatenate_datasets([dataset1, dataset2]) assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list # change order concatenated_dataset = concatenate_datasets([dataset2, dataset1]) assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1) def test_concatenate_datasets_axis_1(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) dataset2 = IterableDataset(ex_iterable2) with pytest.raises(ValueError): # column "id" is duplicated -> raise an error concatenate_datasets([dataset1, dataset2], axis=1) concatenated_dataset = concatenate_datasets([dataset1, dataset2.remove_columns("id")], axis=1) assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, dataset2)] def test_concatenate_datasets_axis_1_resolves_features(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) dataset2 = IterableDataset(ex_iterable2).remove_columns("id") concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1) assert concatenated_dataset.features is not None assert sorted(concatenated_dataset.features) == ["id", "label1", "label2"] def test_concatenate_datasets_axis_1_with_different_lengths(): n1 = 10 ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10, "n": n1}) dataset1 = IterableDataset(ex_iterable1) n2 = 5 ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5, "n": n2}) dataset2 = IterableDataset(ex_iterable2).remove_columns("id") # missing rows -> they should be replaced with nulls extended_dataset2_list = list(dataset2) + [{"label2": None}] * (n1 - n2) concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1) assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, extended_dataset2_list)] # change order concatenated_dataset = concatenate_datasets([dataset2, dataset1], axis=1) assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(extended_dataset2_list, dataset1)] @pytest.mark.parametrize( "probas, seed, expected_length, stopping_strategy", [ (None, None, 3 * (DEFAULT_N_EXAMPLES - 1) + 1, "first_exhausted"), ([1, 0, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"), ([0, 1, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"), ([0.2, 0.5, 0.3], 42, None, "first_exhausted"), ([0.1, 0.1, 0.8], 1337, None, "first_exhausted"), ([0.5, 0.2, 0.3], 101010, None, "first_exhausted"), (None, None, 3 * DEFAULT_N_EXAMPLES, "all_exhausted"), ([0.2, 0.5, 0.3], 42, None, "all_exhausted"), ([0.1, 0.1, 0.8], 1337, None, "all_exhausted"), ([0.5, 0.2, 0.3], 101010, None, "all_exhausted"), ], ) def test_interleave_datasets(dataset: IterableDataset, probas, seed, expected_length, stopping_strategy): d1 = dataset d2 = dataset.map(lambda x: {"id+1": x["id"] + 1, **x}) d3 = dataset.with_format("python") datasets = [d1, d2, d3] merged_dataset = interleave_datasets( datasets, probabilities=probas, seed=seed, stopping_strategy=stopping_strategy ) def fill_default(example): return {"id": None, "id+1": None, **example} # Check the examples iterable assert isinstance( merged_dataset._ex_iterable, (CyclingMultiSourcesExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable) ) # Check that it is deterministic if seed is not None: merged_dataset2 = interleave_datasets( [d1, d2, d3], probabilities=probas, seed=seed, stopping_strategy=stopping_strategy ) assert list(merged_dataset) == list(merged_dataset2) # Check features assert merged_dataset.features == Features({"id": Value("int64"), "id+1": Value("int64")}) # Check first example if seed is not None: rng = np.random.default_rng(seed) i = next(iter(cycle(rng.choice(len(datasets), size=1000, p=probas)))) assert next(iter(merged_dataset)) == fill_default(next(iter(datasets[i]))) else: assert any(next(iter(merged_dataset)) == fill_default(next(iter(dataset))) for dataset in datasets) # Compute length it case it's random if expected_length is None: expected_length = 0 counts = np.array([len(list(d)) for d in datasets]) bool_strategy_func = np.all if stopping_strategy == "all_exhausted" else np.any rng = np.random.default_rng(seed) for i in cycle(rng.choice(len(datasets), size=1000, p=probas)): counts[i] -= 1 expected_length += 1 if bool_strategy_func(counts <= 0): break # Check length assert len(list(merged_dataset)) == expected_length def test_interleave_datasets_with_features( dataset: IterableDataset, ): features = Features( { "id": Value("int64"), "label": ClassLabel(names=["negative", "positive"]), } ) ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0}) dataset_with_features = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) merged_dataset = interleave_datasets([dataset, dataset_with_features]) assert merged_dataset.features == features def test_interleave_datasets_with_oversampling(): # Test hardcoded results d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {})) d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {})) d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {})) expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] # Check oversampling strategy without probabilities assert [x["a"] for x in interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")] == expected_values # Check oversampling strategy with probabilities expected_values = [20, 0, 21, 10, 1, 22, 23, 24, 2, 0, 1, 20, 11, 21, 2, 0, 12, 1, 22, 13] values = [ x["a"] for x in interleave_datasets( [d1, d2, d3], probabilities=[0.5, 0.2, 0.3], seed=42, stopping_strategy="all_exhausted" ) ] assert values == expected_values @require_torch def test_with_format_torch(dataset_with_several_columns: IterableDataset): import torch dset = dataset_with_several_columns.with_format(type="torch") example = next(iter(dset)) batch = next(iter(dset.iter(batch_size=3))) assert len(example) == 3 assert isinstance(example["id"], torch.Tensor) assert list(example["id"].shape) == [] assert example["id"].item() == 0 assert isinstance(batch["id"], torch.Tensor) assert isinstance(example["filepath"], list) assert isinstance(example["filepath"][0], str) assert example["filepath"][0] == "data0.txt" assert isinstance(batch["filepath"], list) assert isinstance(example["metadata"], dict) assert isinstance(example["metadata"]["sources"], list) assert isinstance(example["metadata"]["sources"][0], str) assert isinstance(batch["metadata"], list) @require_tf def test_with_format_tf(dataset_with_several_columns: IterableDataset): import tensorflow as tf dset = dataset_with_several_columns.with_format(type="tensorflow") example = next(iter(dset)) batch = next(iter(dset.iter(batch_size=3))) assert isinstance(example["id"], tf.Tensor) assert list(example["id"].shape) == [] assert example["id"].numpy().item() == 0 assert isinstance(batch["id"], tf.Tensor) assert isinstance(example["filepath"], tf.Tensor) assert example["filepath"][0] == b"data0.txt" assert isinstance(batch["filepath"], tf.Tensor) assert isinstance(example["metadata"], dict) assert isinstance(example["metadata"]["sources"], tf.Tensor) assert isinstance(batch["metadata"], list) def test_map_array_are_not_converted_back_to_lists(dataset: IterableDataset): def func(example): return {"array": np.array([1, 2, 3])} dset_test = dataset.map(func) example = next(iter(dset_test)) # not aligned with Dataset.map because we don't convert back to lists after map() assert isinstance(example["array"], np.ndarray) def test_formatted_map(dataset: IterableDataset): dataset = dataset.with_format("np") assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray) dataset = dataset.with_format(None) assert isinstance(next(dataset.iter(batch_size=3))["id"], list) def add_one_numpy(example): assert isinstance(example["id"], np.ndarray) return {"id": example["id"] + 1} dataset = dataset.with_format("np") dataset = dataset.map(add_one_numpy, batched=True) assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray) dataset = dataset.with_format(None) assert isinstance(next(dataset.iter(batch_size=3))["id"], list) @pytest.mark.parametrize("n_shards1, n_shards2, num_workers", [(2, 1, 1), (2, 2, 2), (1, 3, 1), (4, 3, 3)]) def test_interleave_dataset_with_sharding(n_shards1, n_shards2, num_workers): from torch.utils.data import DataLoader ex_iterable1 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-1.txt" for i in range(n_shards1)]}) dataset1 = IterableDataset(ex_iterable1).with_format("torch") ex_iterable2 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-2.txt" for i in range(n_shards2)]}) dataset2 = IterableDataset(ex_iterable2).with_format("torch") dataset_merged = interleave_datasets([dataset1, dataset2], stopping_strategy="first_exhausted") assert dataset_merged.n_shards == min(n_shards1, n_shards2) dataloader = DataLoader(dataset_merged, batch_size=None, num_workers=num_workers) result = list(dataloader) expected_length = 2 * min( len([example for _, example in ex_iterable1]), len([example for _, example in ex_iterable2]) ) # some samples may be missing because the stopping strategy is applied per process assert expected_length - num_workers <= len(result) <= expected_length assert len(result) == len({str(x) for x in result}) def filter_func(batch): return batch["id"] == 4 def map_func(batch): batch["id"] *= 2 return batch def test_pickle_after_many_transforms(dataset_with_several_columns): dataset = dataset_with_several_columns dataset = dataset.remove_columns(["filepath"]) dataset = dataset.take(5) dataset = dataset.map(map_func) dataset = dataset.shuffle() dataset = dataset.skip(1) dataset = dataset.filter(filter_func) dataset = dataset.add_column("additional_col", ["something"]) dataset = dataset.rename_column("metadata", "metadata1") dataset = dataset.rename_columns({"id": "id1", "metadata1": "metadata2"}) dataset = dataset.select_columns(["id1", "additional_col"]) unpickled_dataset = pickle.loads(pickle.dumps(dataset)) assert list(unpickled_dataset) == list(dataset) @require_torchdata_stateful_dataloader def test_resume_dataloader(dataset: IterableDataset): from torchdata.stateful_dataloader import StatefulDataLoader dl = StatefulDataLoader(dataset) remaining = [] for i, x in enumerate(dl): if i == 2: state_dict = dl.state_dict() elif i > 2: remaining.append(x) dl = StatefulDataLoader(dataset) dl.load_state_dict(state_dict) assert remaining == list(dl) def test_iterable_dataset_batch(): # Create a simple IterableDataset data = [{"id": i, "text": f"Text {i}"} for i in range(10)] ds = IterableDataset.from_generator(lambda: (x for x in data)) # Test with batch_size=3, drop_last_batch=False batched_ds = ds.batch(batch_size=3, drop_last_batch=False) batches = list(batched_ds) assert len(batches) == 4 # 3 full batches and 1 partial batch for i, batch in enumerate(batches[:3]): # Check full batches assert len(batch["id"]) == 3 assert len(batch["text"]) == 3 assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2] assert batch["text"] == [f"Text {3*i}", f"Text {3*i+1}", f"Text {3*i+2}"] # Check last partial batch assert len(batches[3]["id"]) == 1 assert len(batches[3]["text"]) == 1 assert batches[3]["id"] == [9] assert batches[3]["text"] == ["Text 9"] # Test with batch_size=3, drop_last_batch=True batched_ds = ds.batch(batch_size=3, drop_last_batch=True) batches = list(batched_ds) assert len(batches) == 3 # Only full batches for i, batch in enumerate(batches): assert len(batch["id"]) == 3 assert len(batch["text"]) == 3 assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2] assert batch["text"] == [f"Text {3*i}", f"Text {3*i+1}", f"Text {3*i+2}"] # Test with batch_size=4 (doesn't evenly divide dataset size) batched_ds = ds.batch(batch_size=4, drop_last_batch=False) batches = list(batched_ds) assert len(batches) == 3 # 2 full batches and 1 partial batch for i, batch in enumerate(batches[:2]): # Check full batches assert len(batch["id"]) == 4 assert len(batch["text"]) == 4 assert batch["id"] == [4 * i, 4 * i + 1, 4 * i + 2, 4 * i + 3] assert batch["text"] == [f"Text {4*i}", f"Text {4*i+1}", f"Text {4*i+2}", f"Text {4*i+3}"] # Check last partial batch assert len(batches[2]["id"]) == 2 assert len(batches[2]["text"]) == 2 assert batches[2]["id"] == [8, 9] assert batches[2]["text"] == ["Text 8", "Text 9"]
datasets/tests/test_iterable_dataset.py/0
{ "file_path": "datasets/tests/test_iterable_dataset.py", "repo_id": "datasets", "token_count": 42237 }
94
<jupyter_start><jupyter_text>Unit 8 Part 2: Advanced Deep Reinforcement Learning. Using Sample Factory to play Doom from pixelsIn this notebook, we will learn how to train a Deep Neural Network to collect objects in a 3D environment based on the game of Doom, a video of the resulting policy is shown below. We train this policy using [Sample Factory](https://www.samplefactory.dev/), an asynchronous implementation of the PPO algorithm.Please note the following points:* [Sample Factory](https://www.samplefactory.dev/) is an advanced RL framework and **only functions on Linux and Mac** (not Windows).* The framework performs best on a **GPU machine with many CPU cores**, where it can achieve speeds of 100k interactions per second. The resources available on a standard Colab notebook **limit the performance of this library**. So the speed in this setting **does not reflect the real-world performance**.* Benchmarks for Sample Factory are available in a number of settings, check out the [examples](https://github.com/alex-petrenko/sample-factory/tree/master/sf_examples) if you want to find out more.<jupyter_code>from IPython.display import HTML HTML('''<video width="640" height="480" controls> <source src="https://huggingface.co/edbeeching/doom_health_gathering_supreme_3333/resolve/main/replay.mp4" type="video/mp4">Your browser does not support the video tag.</video>''' )<jupyter_output><empty_output><jupyter_text>To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push one model:- `doom_health_gathering_supreme` get a result of >= 5.To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**If you don't find your model, **go to the bottom of the page and click on the refresh button**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Before starting to train our agent, let's **study the library and environments we're going to use**. Sample Factory[Sample Factory](https://www.samplefactory.dev/) is one of the **fastest RL libraries focused on very efficient synchronous and asynchronous implementations of policy gradients (PPO)**.Sample Factory is thoroughly **tested, used by many researchers and practitioners**, and is actively maintained. Our implementation is known to **reach SOTA performance in a variety of domains while minimizing RL experiment training time and hardware requirements**. Key features- Highly optimized algorithm [architecture](https://www.samplefactory.dev/06-architecture/overview/) for maximum learning throughput- [Synchronous and asynchronous](https://www.samplefactory.dev/07-advanced-topics/sync-async/) training regimes- [Serial (single-process) mode](https://www.samplefactory.dev/07-advanced-topics/serial-mode/) for easy debugging- Optimal performance in both CPU-based and [GPU-accelerated environments](https://www.samplefactory.dev/09-environment-integrations/isaacgym/)- Single- & multi-agent training, self-play, supports [training multiple policies](https://www.samplefactory.dev/07-advanced-topics/multi-policy-training/) at once on one or many GPUs- Population-Based Training ([PBT](https://www.samplefactory.dev/07-advanced-topics/pbt/))- Discrete, continuous, hybrid action spaces- Vector-based, image-based, dictionary observation spaces- Automatically creates a model architecture by parsing action/observation space specification. Supports [custom model architectures](https://www.samplefactory.dev/03-customization/custom-models/)- Designed to be imported into other projects, [custom environments](https://www.samplefactory.dev/03-customization/custom-environments/) are first-class citizens- Detailed [WandB and Tensorboard summaries](https://www.samplefactory.dev/05-monitoring/metrics-reference/), [custom metrics](https://www.samplefactory.dev/05-monitoring/custom-metrics/)- [HuggingFace 🤗 integration](https://www.samplefactory.dev/10-huggingface/huggingface/) (upload trained models and metrics to the Hub)- [Multiple](https://www.samplefactory.dev/09-environment-integrations/mujoco/) [example](https://www.samplefactory.dev/09-environment-integrations/atari/) [environment](https://www.samplefactory.dev/09-environment-integrations/vizdoom/) [integrations](https://www.samplefactory.dev/09-environment-integrations/dmlab/) with tuned parameters and trained modelsAll of the above policies are available on the 🤗 hub. Search for the tag [sample-factory](https://huggingface.co/models?library=sample-factory&sort=downloads) How sample-factory worksSample-factory is one of the **most highly optimized RL implementations available to the community**.It works by **spawning multiple processes that run rollout workers, inference workers and a learner worker**.The *workers* **communicate through shared memory, which lowers the communication cost between processes**.The *rollout workers* interact with the environment and send observations to the *inference workers*.The *inferences workers* query a fixed version of the policy and **send actions back to the rollout worker**.After *k* steps the rollout works send a trajectory of experience to the learner worker, **which it uses to update the agent’s policy network**. Actor Critic models in Sample-factoryActor Critic models in Sample Factory are composed of three components:- **Encoder** - Process input observations (images, vectors) and map them to a vector. This is the part of the model you will most likely want to customize.- **Core** - Intergrate vectors from one or more encoders, can optionally include a single- or multi-layer LSTM/GRU in a memory-based agent.- **Decoder** - Apply additional layers to the output of the model core before computing the policy and value outputs.The library has been designed to automatically support any observation and action spaces. Users can easily add their custom models. You can find out more in the [documentation](https://www.samplefactory.dev/03-customization/custom-models/actor-critic-models-in-sample-factory). ViZDoom[ViZDoom](https://vizdoom.cs.put.edu.pl/) is an **open-source python interface for the Doom Engine**.The library was created in 2016 by Marek Wydmuch, Michal Kempka at the Institute of Computing Science, Poznan University of Technology, Poland.The library enables the **training of agents directly from the screen pixels in a number of scenarios**, including team deathmatch, shown in the video below. Because the ViZDoom environment is based on a game the was created in the 90s, it can be run on modern hardware at accelerated speeds, **allowing us to learn complex AI behaviors fairly quickly**.The library includes feature such as:- Multi-platform (Linux, macOS, Windows),- API for Python and C++,- [OpenAI Gym](https://www.gymlibrary.dev/) environment wrappers- Easy-to-create custom scenarios (visual editors, scripting language, and examples available),- Async and sync single-player and multiplayer modes,- Lightweight (few MBs) and fast (up to 7000 fps in sync mode, single-threaded),- Customizable resolution and rendering parameters,- Access to the depth buffer (3D vision),- Automatic labeling of game objects visible in the frame,- Access to the audio buffer- Access to the list of actors/objects and map geometry,- Off-screen rendering and episode recording,- Time scaling in async mode. We first need to install some dependencies that are required for the ViZDoom environmentNow that our Colab runtime is set up, we can start by installing the dependencies required to run ViZDoom on linux.If you are following on your machine on Mac, you will want to follow the installation instructions on the [github page](https://github.com/Farama-Foundation/ViZDoom/blob/master/doc/Quickstart.md-quickstart-for-macos-and-anaconda3-python-36).<jupyter_code>%%capture %%bash # Install ViZDoom deps from # https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux apt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \ nasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \ libopenal-dev timidity libwildmidi-dev unzip ffmpeg # Boost libraries apt-get install libboost-all-dev # Lua binding dependencies apt-get install liblua5.1-dev<jupyter_output><empty_output><jupyter_text>Then we can install Sample Factory and ViZDoom- This can take 7min<jupyter_code># install python libraries # thanks toinsson !pip install faster-fifo==1.4.2 !pip install vizdoom !pip install sample-factory==2.0.2<jupyter_output><empty_output><jupyter_text>Setting up the Doom Environment in sample-factory<jupyter_code>import functools from sample_factory.algo.utils.context import global_model_factory from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args from sample_factory.envs.env_utils import register_env from sample_factory.train import run_rl from sf_examples.vizdoom.doom.doom_model import make_vizdoom_encoder from sf_examples.vizdoom.doom.doom_params import add_doom_env_args, doom_override_defaults from sf_examples.vizdoom.doom.doom_utils import DOOM_ENVS, make_doom_env_from_spec # Registers all the ViZDoom environments def register_vizdoom_envs(): for env_spec in DOOM_ENVS: make_env_func = functools.partial(make_doom_env_from_spec, env_spec) register_env(env_spec.name, make_env_func) # Sample Factory allows the registration of a custom Neural Network architecture # See https://github.com/alex-petrenko/sample-factory/blob/master/sf_examples/vizdoom/doom/doom_model.py for more details def register_vizdoom_models(): global_model_factory().register_encoder_factory(make_vizdoom_encoder) def register_vizdoom_components(): register_vizdoom_envs() register_vizdoom_models() # parse the command line args and create a config def parse_vizdoom_cfg(argv=None, evaluation=False): parser, _ = parse_sf_args(argv=argv, evaluation=evaluation) # parameters specific to Doom envs add_doom_env_args(parser) # override Doom default values for algo parameters doom_override_defaults(parser) # second parsing pass yields the final configuration final_cfg = parse_full_cfg(parser, argv) return final_cfg<jupyter_output><empty_output><jupyter_text>Now that the setup if complete, we can train the agent. We have chosen here to learn a ViZDoom task called `Health Gathering Supreme`. The scenario: Health Gathering SupremeThe objective of this scenario is to **teach the agent how to survive without knowing what makes him survive**. Agent know only that **life is precious** and death is bad so **it must learn what prolongs his existence and that his health is connected with it**.Map is a rectangle containing walls and with a green, acidic floor which **hurts the player periodically**. Initially there are some medkits spread uniformly over the map. A new medkit falls from the skies every now and then. **Medkits heal some portions of player's health** - to survive agent needs to pick them up. Episode finishes after player's death or on timeout.Further configuration:- Living_reward = 1- 3 available buttons: turn left, turn right, move forward- 1 available game variable: HEALTH- death penalty = 100You can find out more about the scenarios available in ViZDoom [here](https://github.com/Farama-Foundation/ViZDoom/tree/master/scenarios).There are also a number of more complex scenarios that have been create for ViZDoom, such as the ones detailed on [this github page](https://github.com/edbeeching/3d_control_deep_rl). Training the agent- We're going to train the agent for 4000000 steps it will take approximately 20min<jupyter_code>## Start the training, this should take around 15 minutes register_vizdoom_components() # The scenario we train on today is health gathering # other scenarios include "doom_basic", "doom_two_colors_easy", "doom_dm", "doom_dwango5", "doom_my_way_home", "doom_deadly_corridor", "doom_defend_the_center", "doom_defend_the_line" env = "doom_health_gathering_supreme" cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=8", "--num_envs_per_worker=4", "--train_for_env_steps=4000000"]) status = run_rl(cfg)<jupyter_output><empty_output><jupyter_text>Let's take a look at the performance of the trained policy and output a video of the agent.<jupyter_code>from sample_factory.enjoy import enjoy cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10"], evaluation=True) status = enjoy(cfg)<jupyter_output><empty_output><jupyter_text>Now lets visualize the performance of the agent<jupyter_code>from base64 import b64encode from IPython.display import HTML mp4 = open('/content/train_dir/default_experiment/replay.mp4','rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url)<jupyter_output><empty_output><jupyter_text>The agent has learned something, but its performance could be better. We would clearly need to train for longer. But let's upload this model to the Hub. Now lets upload your checkpoint and video to the Hugging Face Hub To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**- Copy the token- Run the cell below and paste the token If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login`<jupyter_code>from huggingface_hub import notebook_login notebook_login() !git config --global credential.helper store from sample_factory.enjoy import enjoy hf_username = "ThomasSimonini" # insert your HuggingFace username here cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10", "--max_num_frames=100000", "--push_to_hub", f"--hf_repository={hf_username}/rl_course_vizdoom_health_gathering_supreme"], evaluation=True) status = enjoy(cfg)<jupyter_output><empty_output><jupyter_text>Let's load another model This agent's performance was good, but can do better! Let's download and visualize an agent trained for 10B timesteps from the hub.<jupyter_code>#download the agent from the hub !python -m sample_factory.huggingface.load_from_hub -r edbeeching/doom_health_gathering_supreme_2222 -d ./train_dir !ls train_dir/doom_health_gathering_supreme_2222 env = "doom_health_gathering_supreme" cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10", "--experiment=doom_health_gathering_supreme_2222", "--train_dir=train_dir"], evaluation=True) status = enjoy(cfg) mp4 = open('/content/train_dir/doom_health_gathering_supreme_2222/replay.mp4','rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url)<jupyter_output><empty_output><jupyter_text>Some additional challenges 🏆: Doom DeathmatchTraining an agent to play a Doom deathmatch **takes many hours on a more beefy machine than is available in Colab**.Fortunately, we have have **already trained an agent in this scenario and it is available in the 🤗 Hub!** Let’s download the model and visualize the agent’s performance.<jupyter_code># Download the agent from the hub !python -m sample_factory.huggingface.load_from_hub -r edbeeching/doom_deathmatch_bots_2222 -d ./train_dir<jupyter_output><empty_output><jupyter_text>Given the agent plays for a long time the video generation can take **10 minutes**.<jupyter_code>from sample_factory.enjoy import enjoy register_vizdoom_components() env = "doom_deathmatch_bots" cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=1", "--experiment=doom_deathmatch_bots_2222", "--train_dir=train_dir"], evaluation=True) status = enjoy(cfg) mp4 = open('/content/train_dir/doom_deathmatch_bots_2222/replay.mp4','rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url)<jupyter_output><empty_output>
deep-rl-class/notebooks/unit8/unit8_part2.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit8/unit8_part2.ipynb", "repo_id": "deep-rl-class", "token_count": 4950 }
95
# The Reinforcement Learning Framework [[the-reinforcement-learning-framework]] ## The RL Process [[the-rl-process]] <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%"> <figcaption>The RL Process: a loop of state, action, reward and next state</figcaption> <figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption> </figure> To understand the RL process, let’s imagine an agent learning to play a platform game: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> - Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right. - The environment goes to a **new** **state \\(S_1\\)** — new frame. - The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*. This RL loop outputs a sequence of **state, action, reward and next state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/sars.jpg" alt="State, Action, Reward, Next State" width="100%"> The agent's goal is to _maximize_ its cumulative reward, **called the expected return.** ## The reward hypothesis: the central idea of Reinforcement Learning [[reward-hypothesis]] ⇒ Why is the goal of the agent to maximize the expected return? Because RL is based on the **reward hypothesis**, which is that all goals can be described as the **maximization of the expected return** (expected cumulative reward). That’s why in Reinforcement Learning, **to have the best behavior,** we aim to learn to take actions that **maximize the expected cumulative reward.** ## Markov Property [[markov-property]] In papers, you’ll see that the RL process is called a **Markov Decision Process** (MDP). We’ll talk again about the Markov Property in the following units. But if you need to remember something today about it, it's this: the Markov Property implies that our agent needs **only the current state to decide** what action to take and **not the history of all the states and actions** they took before. ## Observations/States Space [[obs-space]] Observations/States are the **information our agent gets from the environment.** In the case of a video game, it can be a frame (a screenshot). In the case of the trading agent, it can be the value of a certain stock, etc. There is a differentiation to make between *observation* and *state*, however: - *State s*: is **a complete description of the state of the world** (there is no hidden information). In a fully observed environment. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/chess.jpg" alt="Chess"> <figcaption>In chess game, we receive a state from the environment since we have access to the whole check board information.</figcaption> </figure> In a chess game, we have access to the whole board information, so we receive a state from the environment. In other words, the environment is fully observed. - *Observation o*: is a **partial description of the state.** In a partially observed environment. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>In Super Mario Bros, we only see the part of the level close to the player, so we receive an observation.</figcaption> </figure> In Super Mario Bros, we only see the part of the level close to the player, so we receive an observation. In Super Mario Bros, we are in a partially observed environment. We receive an observation **since we only see a part of the level.** <Tip> In this course, we use the term "state" to denote both state and observation, but we will make the distinction in implementations. </Tip> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/obs_space_recap.jpg" alt="Obs space recap" width="100%"> ## Action Space [[action-space]] The Action space is the set of **all possible actions in an environment.** The actions can come from a *discrete* or *continuous space*: - *Discrete space*: the number of possible actions is **finite**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>In Super Mario Bros, we have only 4 possible actions: left, right, up (jumping) and down (crouching).</figcaption> </figure> Again, in Super Mario Bros, we have a finite set of actions since we have only 4 directions. - *Continuous space*: the number of possible actions is **infinite**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/self_driving_car.jpg" alt="Self Driving Car"> <figcaption>A Self Driving Car agent has an infinite number of possible actions since it can turn left 20°, 21,1°, 21,2°, honk, turn right 20°… </figcaption> </figure> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/action_space.jpg" alt="Action space recap" width="100%"> Taking this information into consideration is crucial because it will **have importance when choosing the RL algorithm in the future.** ## Rewards and the discounting [[rewards]] The reward is fundamental in RL because it’s **the only feedback** for the agent. Thanks to it, our agent knows **if the action taken was good or not.** The cumulative reward at each time step **t** can be written as: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_1.jpg" alt="Rewards"> <figcaption>The cumulative reward equals the sum of all rewards in the sequence. </figcaption> </figure> Which is equivalent to: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_2.jpg" alt="Rewards"> <figcaption>The cumulative reward = rt+1 (rt+k+1 = rt+0+1 = rt+1)+ rt+2 (rt+k+1 = rt+1+1 = rt+2) + ... </figcaption> </figure> However, in reality, **we can’t just add them like that.** The rewards that come sooner (at the beginning of the game) **are more likely to happen** since they are more predictable than the long-term future reward. Let’s say your agent is this tiny mouse that can move one tile each time step, and your opponent is the cat (that can move too). The mouse's goal is **to eat the maximum amount of cheese before being eaten by the cat.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_3.jpg" alt="Rewards" width="100%"> As we can see in the diagram, **it’s more probable to eat the cheese near us than the cheese close to the cat** (the closer we are to the cat, the more dangerous it is). Consequently, **the reward near the cat, even if it is bigger (more cheese), will be more discounted** since we’re not really sure we’ll be able to eat it. To discount the rewards, we proceed like this: 1. We define a discount rate called gamma. **It must be between 0 and 1.** Most of the time between **0.95 and 0.99**. - The larger the gamma, the smaller the discount. This means our agent **cares more about the long-term reward.** - On the other hand, the smaller the gamma, the bigger the discount. This means our **agent cares more about the short term reward (the nearest cheese).** 2. Then, each reward will be discounted by gamma to the exponent of the time step. As the time step increases, the cat gets closer to us, **so the future reward is less and less likely to happen.** Our discounted expected cumulative reward is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_4.jpg" alt="Rewards" width="100%">
deep-rl-class/units/en/unit1/rl-framework.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/rl-framework.mdx", "repo_id": "deep-rl-class", "token_count": 2504 }
96
# Introducing Q-Learning [[q-learning]] ## What is Q-Learning? [[what-is-q-learning]] Q-Learning is an **off-policy value-based method that uses a TD approach to train its action-value function:** - *Off-policy*: we'll talk about that at the end of this unit. - *Value-based method*: finds the optimal policy indirectly by training a value or action-value function that will tell us **the value of each state or each state-action pair.** - *TD approach:* **updates its action-value function at each step instead of at the end of the episode.** **Q-Learning is the algorithm we use to train our Q-function**, an **action-value function** that determines the value of being at a particular state and taking a specific action at that state. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function.jpg" alt="Q-function"/> <figcaption>Given a state and action, our Q Function outputs a state-action value (also called Q-value)</figcaption> </figure> The **Q comes from "the Quality" (the value) of that action at that state.** Let's recap the difference between value and reward: - The *value of a state*, or a *state-action pair* is the expected cumulative reward our agent gets if it starts at this state (or state-action pair) and then acts accordingly to its policy. - The *reward* is the **feedback I get from the environment** after performing an action at a state. Internally, our Q-function is encoded by **a Q-table, a table where each cell corresponds to a state-action pair value.** Think of this Q-table as **the memory or cheat sheet of our Q-function.** Let's go through an example of a maze. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-1.jpg" alt="Maze example"/> The Q-table is initialized. That's why all values are = 0. This table **contains, for each state and action, the corresponding state-action values.** For this simple example, the state is only defined by the position of the mouse. Therefore, we have 2*3 rows in our Q-table, one row for each possible position of the mouse. In more complex scenarios, the state could contain more information than the position of the actor. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-2.jpg" alt="Maze example"/> Here we see that the **state-action value of the initial state and going up is 0:** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-3.jpg" alt="Maze example"/> So: the Q-function uses a Q-table **that has the value of each state-action pair.** Given a state and action, **our Q-function will search inside its Q-table to output the value.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function-2.jpg" alt="Q-function"/> </figure> If we recap, *Q-Learning* **is the RL algorithm that:** - Trains a *Q-function* (an **action-value function**), which internally is a **Q-table that contains all the state-action pair values.** - Given a state and action, our Q-function **will search its Q-table for the corresponding value.** - When the training is done, **we have an optimal Q-function, which means we have optimal Q-table.** - And if we **have an optimal Q-function**, we **have an optimal policy** since we **know the best action to take at each state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy"/> In the beginning, **our Q-table is useless since it gives arbitrary values for each state-action pair** (most of the time, we initialize the Q-table to 0). As the agent **explores the environment and we update the Q-table, it will give us a better and better approximation** to the optimal policy. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-1.jpg" alt="Q-learning"/> <figcaption>We see here that with the training, our Q-table is better since, thanks to it, we can know the value of each state-action pair.</figcaption> </figure> Now that we understand what Q-Learning, Q-functions, and Q-tables are, **let's dive deeper into the Q-Learning algorithm**. ## The Q-Learning algorithm [[q-learning-algo]] This is the Q-Learning pseudocode; let's study each part and **see how it works with a simple example before implementing it.** Don't be intimidated by it, it's simpler than it looks! We'll go over each step. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-learning"/> ### Step 1: We initialize the Q-table [[step1]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-3.jpg" alt="Q-learning"/> We need to initialize the Q-table for each state-action pair. **Most of the time, we initialize with values of 0.** ### Step 2: Choose an action using the epsilon-greedy strategy [[step2]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-4.jpg" alt="Q-learning"/> The epsilon-greedy strategy is a policy that handles the exploration/exploitation trade-off. The idea is that, with an initial value of ɛ = 1.0: - *With probability 1 — ɛ* : we do **exploitation** (aka our agent selects the action with the highest state-action pair value). - With probability ɛ: **we do exploration** (trying random action). At the beginning of the training, **the probability of doing exploration will be huge since ɛ is very high, so most of the time, we'll explore.** But as the training goes on, and consequently our **Q-table gets better and better in its estimations, we progressively reduce the epsilon value** since we will need less and less exploration and more exploitation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-5.jpg" alt="Q-learning"/> ### Step 3: Perform action At, get reward Rt+1 and next state St+1 [[step3]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-6.jpg" alt="Q-learning"/> ### Step 4: Update Q(St, At) [[step4]] Remember that in TD Learning, we update our policy or value function (depending on the RL method we choose) **after one step of the interaction.** To produce our TD target, **we used the immediate reward \\(R_{t+1}\\) plus the discounted value of the next state**, computed by finding the action that maximizes the current Q-function at the next state. (We call that bootstrap). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-7.jpg" alt="Q-learning"/> Therefore, our \\(Q(S_t, A_t)\\) **update formula goes like this:** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-8.jpg" alt="Q-learning"/> This means that to update our \\(Q(S_t, A_t)\\): - We need \\(S_t, A_t, R_{t+1}, S_{t+1}\\). - To update our Q-value at a given state-action pair, we use the TD target. How do we form the TD target? 1. We obtain the reward \\(R_{t+1}\\) after taking the action \\(A_t\\). 2. To get the **best state-action pair value** for the next state, we use a greedy policy to select the next best action. Note that this is not an epsilon-greedy policy, this will always take the action with the highest state-action value. Then when the update of this Q-value is done, we start in a new state and select our action **using a epsilon-greedy policy again.** **This is why we say that Q Learning is an off-policy algorithm.** ## Off-policy vs On-policy [[off-vs-on]] The difference is subtle: - *Off-policy*: using **a different policy for acting (inference) and updating (training).** For instance, with Q-Learning, the epsilon-greedy policy (acting policy), is different from the greedy policy that is **used to select the best next-state action value to update our Q-value (updating policy).** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-1.jpg" alt="Off-on policy"/> <figcaption>Acting Policy</figcaption> </figure> Is different from the policy we use during the training part: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-2.jpg" alt="Off-on policy"/> <figcaption>Updating policy</figcaption> </figure> - *On-policy:* using the **same policy for acting and updating.** For instance, with Sarsa, another value-based algorithm, **the epsilon-greedy policy selects the next state-action pair, not a greedy policy.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-3.jpg" alt="Off-on policy"/> <figcaption>Sarsa</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-4.jpg" alt="Off-on policy"/> </figure>
deep-rl-class/units/en/unit2/q-learning.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/q-learning.mdx", "repo_id": "deep-rl-class", "token_count": 2955 }
97
# Glossary This is a community-created glossary. Contributions are welcome! - **Deep Q-Learning:** A value-based deep reinforcement learning algorithm that uses a deep neural network to approximate Q-values for actions in a given state. The goal of Deep Q-learning is to find the optimal policy that maximizes the expected cumulative reward by learning the action-values. - **Value-based methods:** Reinforcement Learning methods that estimate a value function as an intermediate step towards finding an optimal policy. - **Policy-based methods:** Reinforcement Learning methods that directly learn to approximate the optimal policy without learning a value function. In practice they output a probability distribution over actions. The benefits of using policy-gradient methods over value-based methods include: - simplicity of integration: no need to store action values; - ability to learn a stochastic policy: the agent explores the state space without always taking the same trajectory, and avoids the problem of perceptual aliasing; - effectiveness in high-dimensional and continuous action spaces; and - improved convergence properties. - **Policy Gradient:** A subset of policy-based methods where the objective is to maximize the performance of a parameterized policy using gradient ascent. The goal of a policy-gradient is to control the probability distribution of actions by tuning the policy such that good actions (that maximize the return) are sampled more frequently in the future. - **Monte Carlo Reinforce:** A policy-gradient algorithm that uses an estimated return from an entire episode to update the policy parameter. If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [Diego Carpintero](https://github.com/dcarpintero)
deep-rl-class/units/en/unit4/glossary.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/glossary.mdx", "repo_id": "deep-rl-class", "token_count": 421 }
98
# Additional Readings [[additional-readings]] ## Bias-variance tradeoff in Reinforcement Learning If you want to dive deeper into the question of variance and bias tradeoff in Deep Reinforcement Learning, you can check out these two articles: - [Making Sense of the Bias / Variance Trade-off in (Deep) Reinforcement Learning](https://blog.mlreview.com/making-sense-of-the-bias-variance-trade-off-in-deep-reinforcement-learning-79cf1e83d565) - [Bias-variance Tradeoff in Reinforcement Learning](https://www.endtoend.ai/blog/bias-variance-tradeoff-in-reinforcement-learning/) ## Advantage Functions - [Advantage Functions, SpinningUp RL](https://spinningup.openai.com/en/latest/spinningup/rl_intro.html?highlight=advantage%20functio#advantage-functions) ## Actor Critic - [Foundations of Deep RL Series, L3 Policy Gradients and Advantage Estimation by Pieter Abbeel](https://www.youtube.com/watch?v=AKbX1Zvo7r8) - [A2C Paper: Asynchronous Methods for Deep Reinforcement Learning](https://arxiv.org/abs/1602.01783v2)
deep-rl-class/units/en/unit6/additional-readings.mdx/0
{ "file_path": "deep-rl-class/units/en/unit6/additional-readings.mdx", "repo_id": "deep-rl-class", "token_count": 321 }
99
# Introducing the Clipped Surrogate Objective Function ## Recap: The Policy Objective Function Let’s remember what the objective is to optimize in Reinforce: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/lpg.jpg" alt="Reinforce"/> The idea was that by taking a gradient ascent step on this function (equivalent to taking gradient descent of the negative of this function), we would **push our agent to take actions that lead to higher rewards and avoid harmful actions.** However, the problem comes from the step size: - Too small, **the training process was too slow** - Too high, **there was too much variability in the training** With PPO, the idea is to constrain our policy update with a new objective function called the *Clipped surrogate objective function* that **will constrain the policy change in a small range using a clip.** This new function **is designed to avoid destructively large weights updates** : <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ppo-surrogate.jpg" alt="PPO surrogate function"/> Let’s study each part to understand how it works. ## The Ratio Function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ratio1.jpg" alt="Ratio"/> This ratio is calculated as follows: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ratio2.jpg" alt="Ratio"/> It’s the probability of taking action \\( a_t \\) at state \\( s_t \\) in the current policy, divided by the same for the previous policy. As we can see, \\( r_t(\theta) \\) denotes the probability ratio between the current and old policy: - If \\( r_t(\theta) > 1 \\), the **action \\( a_t \\) at state \\( s_t \\) is more likely in the current policy than the old policy.** - If \\( r_t(\theta) \\) is between 0 and 1, the **action is less likely for the current policy than for the old one**. So this probability ratio is an **easy way to estimate the divergence between old and current policy.** ## The unclipped part of the Clipped Surrogate Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/unclipped1.jpg" alt="PPO"/> This ratio **can replace the log probability we use in the policy objective function**. This gives us the left part of the new objective function: multiplying the ratio by the advantage. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/unclipped2.jpg" alt="PPO"/> <figcaption><a href="https://arxiv.org/pdf/1707.06347.pdf">Proximal Policy Optimization Algorithms</a></figcaption> </figure> However, without a constraint, if the action taken is much more probable in our current policy than in our former, **this would lead to a significant policy gradient step** and, therefore, an **excessive policy update.** ## The clipped Part of the Clipped Surrogate Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/clipped.jpg" alt="PPO"/> Consequently, we need to constrain this objective function by penalizing changes that lead to a ratio far away from 1 (in the paper, the ratio can only vary from 0.8 to 1.2). **By clipping the ratio, we ensure that we do not have a too large policy update because the current policy can't be too different from the older one.** To do that, we have two solutions: - *TRPO (Trust Region Policy Optimization)* uses KL divergence constraints outside the objective function to constrain the policy update. But this method **is complicated to implement and takes more computation time.** - *PPO* clip probability ratio directly in the objective function with its **Clipped surrogate objective function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/clipped.jpg" alt="PPO"/> This clipped part is a version where \\( r_t(\theta) \\) is clipped between \\( [1 - \epsilon, 1 + \epsilon] \\). With the Clipped Surrogate Objective function, we have two probability ratios, one non-clipped and one clipped in a range between \\( [1 - \epsilon, 1 + \epsilon] \\), epsilon is a hyperparameter that helps us to define this clip range (in the paper \\( \epsilon = 0.2 \\).). Then, we take the minimum of the clipped and non-clipped objective, **so the final objective is a lower bound (pessimistic bound) of the unclipped objective.** Taking the minimum of the clipped and non-clipped objective means **we'll select either the clipped or the non-clipped objective based on the ratio and advantage situation**.
deep-rl-class/units/en/unit8/clipped-surrogate-objective.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/clipped-surrogate-objective.mdx", "repo_id": "deep-rl-class", "token_count": 1386 }
100
# Optuna Tutorial [[optuna]] The content below comes from [Antonin's Raffin ICRA 2022 presentations](https://araffin.github.io/tools-for-robotic-rl-icra2022/), he's one of the founders of Stable-Baselines and RL-Baselines3-Zoo. ## The theory behind Hyperparameter tuning <Youtube id="AidFTOdGNFQ" /> ## Optuna Tutorial <Youtube id="ihP7E76KGOI" /> The notebook 👉 [here](https://colab.research.google.com/github/araffin/tools-for-robotic-rl-icra2022/blob/main/notebooks/optuna_lab.ipynb)
deep-rl-class/units/en/unitbonus2/optuna.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus2/optuna.mdx", "repo_id": "deep-rl-class", "token_count": 182 }
101
# Getting started: To get started, download the project from [here](https://huggingface.co/ivan267/imitation-learning-tutorial-godot-project/tree/main) (click on the download icon next to `GDRL-IL-Project.zip`). The zip file features both the “Starter” and “Complete” projects. The game code is already implemented in the starter project and the nodes are configured. We will focus on: - Implementing the code for the AIController node, - Recording expert demonstrations, - Training the agent and exporting an .onnx file which we can use for inference in Godot. ### Open the starter project in Godot Extract the zip file, open Godot, click “Import” and navigate to the `Starter\Godot` folder of the extracted archive. ### Open the robot scene <Tip> You can search for “robot” in the FileSystem search. </Tip> This scene contains a couple of different nodes, including the `robot` node, which contains the visual shape of the robot, `CameraXRotation` node which is used to rotate the camera “up-down” using the mouse in human control modes. The AI agent does not control this node since it is not necessary for learning the task. `RaycastSensors` node contains two Raycast sensors that help the agent to “sense” parts of the game world, including walls, floors, etc. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit13/open-robot-scene.jpg" alt="open robot scene"/> ### Click on the scroll next to AIController3D to open the script for editing <Tip> You might have to collapse the “robot” branch to find it more easily, or you can type `aicontroller` in the Filter box above the `Robot` node. </Tip> ### Replace the `get_obs()` and `get_reward()` methods with the implementation below: ```python func get_obs() -> Dictionary: var observations: Array[float] = [] for raycast_sensor in raycast_sensors: observations.append_array(raycast_sensor.get_observation()) var level_size = 16.0 var chest_local = to_local(chest.global_position) var chest_direction = chest_local.normalized() var chest_distance = clampf(chest_local.length(), 0.0, level_size) var lever_local = to_local(lever.global_position) var lever_direction = lever_local.normalized() var lever_distance = clampf(lever_local.length(), 0.0, level_size) var key_local = to_local(key.global_position) var key_direction = key_local.normalized() var key_distance = clampf(key_local.length(), 0.0, level_size) var raft_local = to_local(raft.global_position) var raft_direction = raft_local.normalized() var raft_distance = clampf(raft_local.length(), 0.0, level_size) var player_speed = player.global_basis.inverse() * player.velocity.limit_length(5.0) / 5.0 ( observations .append_array( [ chest_direction.x, chest_direction.y, chest_direction.z, chest_distance, lever_direction.x, lever_direction.y, lever_direction.z, lever_distance, key_direction.x, key_direction.y, key_direction.z, key_distance, raft_direction.x, raft_direction.y, raft_direction.z, raft_distance, raft.movement_direction_multiplier, float(player._is_lever_pulled), float(player._is_chest_opened), float(player._is_key_collected), float(player.is_on_floor()), player_speed.x, player_speed.y, player_speed.z, ] ) ) return {"obs": observations} func get_reward() -> float: return reward ``` In `get_obs()`, we first get the obs from the two Raycast sensors added to the `AIController3D` node in the inspector, and add them to the obs, then we get the relative position vectors to chest, lever, key, and raft, which we separate into directions and distances, and then we add them to the obs as well. We also add other game state info to the obs: - has the lever has been pulled, - was the key collected, - was the chest opened, - is the player on floor (also determines whether the player can jump), - the normalized local velocity of the player. We convert boolean values such as `_is_lever_pulled` to floats (0 or 1). In `get_reward()`, we only need to return the current reward. ### Replace the `_physics_process()` and `reset()` methods with the implementation below: ```python func _physics_process(delta: float) -> void: # Reset on timeout, this is implemented in parent class to set needs_reset to true, # we are re-implementing here to call player.game_over() that handles the game reset. n_steps += 1 if n_steps > reset_after: player.game_over() # In training or onnx inference modes, this method will be called by sync node with actions provided, # For expert demo recording mode, it will be called without any actions (as we set the actions based on human input), # For human control mode the method will not be called, so we call it here without any actions provided. if control_mode == ControlModes.HUMAN: set_action() # Reset the game faster if the lever is not pulled. steps_without_lever_pulled += 1 if steps_without_lever_pulled > 200 and (not player._is_lever_pulled): player.game_over() func reset(): super.reset() steps_without_lever_pulled = 0 ``` ### **Replace the `get_action_space()`, `get_action()`, and `set_action()` methods with the implementation below:** ```python # Defines the actions for the AI agent ("size": 2 means 2 floats for this action) func get_action_space() -> Dictionary: return { "movement": {"size": 2, "action_type": "continuous"}, "rotation": {"size": 1, "action_type": "continuous"}, "jump": {"size": 1, "action_type": "continuous"}, "use_action": {"size": 1, "action_type": "continuous"} } # We return the action values in the same order as defined in get_action_space() (important), but all in one array # For actions of size 1, we return 1 float in the array, for size 2, 2 floats in the array, etc. # set_action is called just before get_action by the sync node, so we can read the newly set values func get_action(): return [ # "movement" action values player.requested_movement.x, player.requested_movement.y, # "rotation" action value player.requested_rotation.x, # "jump" action value (-1 if not requested, 1 if requested) -1.0 + 2.0 * float(player.jump_requested), # "use_action" action value (-1 if not requested, 1 if requested) -1.0 + 2.0 * float(player.use_action_requested) ] # Here we set human control and AI control actions to the robot func set_action(action = null) -> void: # If there's no action provided, it means that AI is not controlling the robot (human control), if not action: # Only rotate if the mouse has moved since the last set_action call if previous_mouse_movement == mouse_movement: mouse_movement = Vector2.ZERO player.requested_movement = Input.get_vector( "move_left", "move_right", "move_forward", "move_back" ) player.requested_rotation = mouse_movement var use_action = Input.is_action_pressed("requested_action") var jump = Input.is_action_pressed("requested_jump") player.use_action_requested = use_action player.jump_requested = jump previous_mouse_movement = mouse_movement else: # If there is action provided, we set the actions received from the AI agent player.requested_movement = Vector2(action.movement[0], action.movement[1]) # The agent only rotates the robot along the Y axis, no need to rotate the camera along X axis player.requested_rotation = Vector2(action.rotation[0], 0.0) player.jump_requested = bool(action.jump[0] > 0) player.use_action_requested = bool(action.use_action[0] > 0) ``` For `get_action()` (only needed if using the demo record mode), we need to provide the actions that we want the agent to send when it encounters the same state. It is important for the values to be in the correct range (`-1.0 to 1.0`), which is why we have the `-1 + 2 * variable` for boolean states, and in the correct order, as defined in `get_action_space()`. In demo record mode, `set_action()` is called without providing actions, as we need to set the action values based on human input. In training/inference modes, the method is called with an `action` argument containing values for all of the actions provided by the RL model, so we have an `if/else` to handle both cases. More info is included in the code comments. ### Replace the `_input` method with the implementation below: ```python # Record mouse movement for human and demo_record modes # We don't directly rotate in input to allow for frame skipping (action_repeat setting) which # will also be applied to the AI agent in training/inference modes. func _input(event): if not (heuristic == "human" or heuristic == "demo_record"): return if event is InputEventMouseMotion: var movement_scale: float = 0.005 mouse_movement.y = clampf(event.relative.y * movement_scale, -1.0, 1.0) mouse_movement.x = clampf(event.relative.x * movement_scale, -1.0, 1.0) ``` This code part records mouse movement in case of human control and demo record modes. **Finally, save the script. We are ready for the next step.** ### Open the demo record scene, and click on AIController3D node <Tip> You can search for “demo” in the FileSystem search, and you can search for “aicontroller” in the scene's filter box. </Tip> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit13/demo_record_scene.jpg" alt="open robot scene"/> You don’t need to make any changes as everything is preset, but let’s go over the things you would need to set in your own env: The scene contains modified `Level > Robot > AIController3D` node settings: - `Control Mode` is set to `Record Expert Demos` - `Expert Demo Save Path` is filled out - `Action Repeat` is set to the same value as is set for the `Sync` node in `training_scene` and `onnx_inference_scene`. This means that every action set by the agent is repeated for 3 physics frames. The setting in `AIController` adds the same action repeat to the human input (which introduces some lag) to match the same behavior. This is a fairly low value which doesn’t introduce much lag. If you change this value, make sure to change it in all 3 places. - `Remove Last Episode` key allows us to set a key that can be used to remove a failed episode during recording, without having to restart the entire session. E.g. if the robot falls in the water and the game resets, we can use this key to remove the previously recorded episode while recording the next one. It is set to `R`, but you can change it to any key by clicking on it, and then clicking on the `Configure` button. Another way to make episode recording easier in challenging environments is to slow down the environment during recording. This can easily be done by clicking on the `Sync` node in the scene, and adjusting the `Speed Up` property (set to 1 by default). ### Let’s record some demos: <Tip> Note that the demos will only be saved if we have recorded at least one complete episode and closed the game window by clicking on "X" or pressing ALT+F4. Using the stop button in Godot editor will not save the demos. It’s best to try recording just one episode first, then check if you see "expert_demos.json" in the filesystem or in the Godot project folder. </Tip> Make sure that you are still in the `demo_record_scene`, `press F6` and the demo recording will start. Controls: - mouse controls the camera (if you need to adjust mouse sensitivity, open the `robot` scene, click on the `Robot` node and adjust the `Rotation Speed`, keep it the same value for recording demos, training and inference), - `WASD` controls the player movement, - `SPACE` jumps, - `E` activates the lever and opens the chest You can take a few practice first to get familiar with the env. If you wish to skip recording demos, you can also find the pre-recorded demos in the completed project and use the `expert_demos.json` file from there. The recorded demos should include at least 22-24 complete successful episodes. Multiple demo files can also be used in the training stage, so you don’t have to record all demos in one go (you can change the file name using the `Expert Demo Save Path` property mentioned before). Recording 23 episodes took me ~10 minutes (as the key has 2 alternating spawning positions, 22 or 24 would provide an equal distribution of key positions in the demos, but it is fairly close). When approaching the lever or chest, I pressed and held the `E` key slightly longer to ensure the action is recorded for multiple steps when near those objects. I also removed a couple of episodes that I didn’t complete successfully by pressing the `R` key during the following episode. Here’s a sped-up video of the demo recording process: <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit13/demo_record.mp4" type="video/mp4" controls autoplay loop mute /> ### Export the game for training: You can export the game from Godot using `Project > Export`.
deep-rl-class/units/en/unitbonus5/getting-started.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus5/getting-started.mdx", "repo_id": "deep-rl-class", "token_count": 3919 }
102
import argparse import sys sys.path.append(".") from base_classes import LCMLoRATextToImageBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="stabilityai/stable-diffusion-xl-base-1.0", ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=4) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = LCMLoRATextToImageBenchmark(args) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_t2i_lcm_lora.py/0
{ "file_path": "diffusers/benchmarks/benchmark_t2i_lcm_lora.py", "repo_id": "diffusers", "token_count": 273 }
103
# docstyle-ignore INSTALL_CONTENT = """ # Diffusers installation ! pip install diffusers transformers datasets accelerate # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/diffusers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
diffusers/docs/source/_config.py/0
{ "file_path": "diffusers/docs/source/_config.py", "repo_id": "diffusers", "token_count": 102 }
104
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DDIMScheduler [Denoising Diffusion Implicit Models](https://huggingface.co/papers/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon. The abstract from the paper is: *Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.* The original codebase of this paper can be found at [ermongroup/ddim](https://github.com/ermongroup/ddim), and you can contact the author on [tsong.me](https://tsong.me/). ## Tips The paper [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) claims that a mismatch between the training and inference settings leads to suboptimal inference generation results for Stable Diffusion. To fix this, the authors propose: <Tip warning={true}> 🧪 This is an experimental feature! </Tip> 1. rescale the noise schedule to enforce zero terminal signal-to-noise ratio (SNR) ```py pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, rescale_betas_zero_snr=True) ``` 2. train a model with `v_prediction` (add the following argument to the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) scripts) ```bash --prediction_type="v_prediction" ``` 3. change the sampler to always start from the last timestep ```py pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") ``` 4. rescale classifier-free guidance to prevent over-exposure ```py image = pipe(prompt, guidance_rescale=0.7).images[0] ``` For example: ```py from diffusers import DiffusionPipeline, DDIMScheduler import torch pipe = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config( pipe.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing" ) pipe.to("cuda") prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipe(prompt, guidance_rescale=0.7).images[0] image ``` ## DDIMScheduler [[autodoc]] DDIMScheduler ## DDIMSchedulerOutput [[autodoc]] schedulers.scheduling_ddim.DDIMSchedulerOutput
diffusers/docs/source/en/api/schedulers/ddim.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/ddim.md", "repo_id": "diffusers", "token_count": 1122 }
105
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LMSDiscreteScheduler `LMSDiscreteScheduler` is a linear multistep scheduler for discrete beta schedules. The scheduler is ported from and created by [Katherine Crowson](https://github.com/crowsonkb/), and the original implementation can be found at [crowsonkb/k-diffusion](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181). ## LMSDiscreteScheduler [[autodoc]] LMSDiscreteScheduler ## LMSDiscreteSchedulerOutput [[autodoc]] schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput
diffusers/docs/source/en/api/schedulers/lms_discrete.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/lms_discrete.md", "repo_id": "diffusers", "token_count": 335 }
106
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to contribute to Diffusers 🧨 We ❤️ contributions from the open-source community! Everyone is welcome, and all types of participation –not just code– are valued and appreciated. Answering questions, helping others, reaching out, and improving the documentation are all immensely valuable to the community, so don't be afraid and get involved if you're up for it! Everyone is encouraged to start by saying 👋 in our public Discord channel. We discuss the latest trends in diffusion models, ask questions, show off personal projects, help each other with contributions, or just hang out ☕. <a href="https://Discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a> Whichever way you choose to contribute, we strive to be part of an open, welcoming, and kind community. Please, read our [code of conduct](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md) and be mindful to respect it during your interactions. We also recommend you become familiar with the [ethical guidelines](https://huggingface.co/docs/diffusers/conceptual/ethical_guidelines) that guide our project and ask you to adhere to the same principles of transparency and responsibility. We enormously value feedback from the community, so please do not be afraid to speak up if you believe you have valuable feedback that can help improve the library - every message, comment, issue, and pull request (PR) is read and considered. ## Overview You can contribute in many ways ranging from answering questions on issues and discussions to adding new diffusion models to the core library. In the following, we give an overview of different ways to contribute, ranked by difficulty in ascending order. All of them are valuable to the community. * 1. Asking and answering questions on [the Diffusers discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers) or on [Discord](https://discord.gg/G7tWnz98XR). * 2. Opening new issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues/new/choose) or new discussions on [the GitHub Discussions tab](https://github.com/huggingface/diffusers/discussions/new/choose). * 3. Answering issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues) or discussions on [the GitHub Discussions tab](https://github.com/huggingface/diffusers/discussions). * 4. Fix a simple issue, marked by the "Good first issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). * 5. Contribute to the [documentation](https://github.com/huggingface/diffusers/tree/main/docs/source). * 6. Contribute a [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples). * 7. Contribute to the [examples](https://github.com/huggingface/diffusers/tree/main/examples). * 8. Fix a more difficult issue, marked by the "Good second issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22). * 9. Add a new pipeline, model, or scheduler, see ["New Pipeline/Model"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) and ["New scheduler"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) issues. For this contribution, please have a look at [Design Philosophy](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md). As said before, **all contributions are valuable to the community**. In the following, we will explain each contribution a bit more in detail. For all contributions 4 - 9, you will need to open a PR. It is explained in detail how to do so in [Opening a pull request](#how-to-open-a-pr). ### 1. Asking and answering questions on the Diffusers discussion forum or on the Diffusers Discord Any question or comment related to the Diffusers library can be asked on the [discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/) or on [Discord](https://discord.gg/G7tWnz98XR). Such questions and comments include (but are not limited to): - Reports of training or inference experiments in an attempt to share knowledge - Presentation of personal projects - Questions to non-official training examples - Project proposals - General feedback - Paper summaries - Asking for help on personal projects that build on top of the Diffusers library - General questions - Ethical questions regarding diffusion models - ... Every question that is asked on the forum or on Discord actively encourages the community to publicly share knowledge and might very well help a beginner in the future who has the same question you're having. Please do pose any questions you might have. In the same spirit, you are of immense help to the community by answering such questions because this way you are publicly documenting knowledge for everybody to learn from. **Please** keep in mind that the more effort you put into asking or answering a question, the higher the quality of the publicly documented knowledge. In the same way, well-posed and well-answered questions create a high-quality knowledge database accessible to everybody, while badly posed questions or answers reduce the overall quality of the public knowledge database. In short, a high quality question or answer is *precise*, *concise*, *relevant*, *easy-to-understand*, *accessible*, and *well-formatted/well-posed*. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. **NOTE about channels**: [*The forum*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) is much better indexed by search engines, such as Google. Posts are ranked by popularity rather than chronologically. Hence, it's easier to look up questions and answers that we posted some time ago. In addition, questions and answers posted in the forum can easily be linked to. In contrast, *Discord* has a chat-like format that invites fast back-and-forth communication. While it will most likely take less time for you to get an answer to your question on Discord, your question won't be visible anymore over time. Also, it's much harder to find information that was posted a while back on Discord. We therefore strongly recommend using the forum for high-quality questions and answers in an attempt to create long-lasting knowledge for the community. If discussions on Discord lead to very interesting answers and conclusions, we recommend posting the results on the forum to make the information more available for future readers. ### 2. Opening new issues on the GitHub issues tab The 🧨 Diffusers library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue. Remember, GitHub issues are reserved for technical questions directly related to the Diffusers library, bug reports, feature requests, or feedback on the library design. In a nutshell, this means that everything that is **not** related to the **code of the Diffusers library** (including the documentation) should **not** be asked on GitHub, but rather on either the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR). **Please consider the following guidelines when opening a new issue**: - Make sure you have searched whether your issue has already been asked before (use the search bar on GitHub under Issues). - Please never report a new issue on another (related) issue. If another issue is highly related, please open a new issue nevertheless and link to the related issue. - Make sure your issue is written in English. Please use one of the great, free online translation services, such as [DeepL](https://www.deepl.com/translator) to translate from your native language to English if you are not comfortable in English. - Check whether your issue might be solved by updating to the newest Diffusers version. Before posting your issue, please make sure that `python -c "import diffusers; print(diffusers.__version__)"` is higher or matches the latest Diffusers version. - Remember that the more effort you put into opening a new issue, the higher the quality of your answer will be and the better the overall quality of the Diffusers issues. New issues usually include the following. #### 2.1. Reproducible, minimal bug reports A bug report should always have a reproducible code snippet and be as minimal and concise as possible. This means in more detail: - Narrow the bug down as much as you can, **do not just dump your whole code file**. - Format your code. - Do not include any external libraries except for Diffusers depending on them. - **Always** provide all necessary information about your environment; for this, you can run: `diffusers-cli env` in your shell and copy-paste the displayed information to the issue. - Explain the issue. If the reader doesn't know what the issue is and why it is an issue, (s)he cannot solve it. - **Always** make sure the reader can reproduce your issue with as little effort as possible. If your code snippet cannot be run because of missing libraries or undefined variables, the reader cannot help you. Make sure your reproducible code snippet is as minimal as possible and can be copy-pasted into a simple Python shell. - If in order to reproduce your issue a model and/or dataset is required, make sure the reader has access to that model or dataset. You can always upload your model or dataset to the [Hub](https://huggingface.co) to make it easily downloadable. Try to keep your model and dataset as small as possible, to make the reproduction of your issue as effortless as possible. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. You can open a bug report [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&projects=&template=bug-report.yml). #### 2.2. Feature requests A world-class feature request addresses the following points: 1. Motivation first: * Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best. * Is it related to something you would need for a project? We'd love to hear about it! * Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you. 2. Write a *full paragraph* describing the feature; 3. Provide a **code snippet** that demonstrates its future use; 4. In case this is related to a paper, please attach a link; 5. Attach any additional information (drawings, screenshots, etc.) you think may help. You can open a feature request [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=). #### 2.3 Feedback Feedback about the library design and why it is good or not good helps the core maintainers immensely to build a user-friendly library. To understand the philosophy behind the current design philosophy, please have a look [here](https://huggingface.co/docs/diffusers/conceptual/philosophy). If you feel like a certain design choice does not fit with the current design philosophy, please explain why and how it should be changed. If a certain design choice follows the design philosophy too much, hence restricting use cases, explain why and how it should be changed. If a certain design choice is very useful for you, please also leave a note as this is great feedback for future design decisions. You can open an issue about feedback [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=). #### 2.4 Technical questions Technical questions are mainly about why certain code of the library was written in a certain way, or what a certain part of the code does. Please make sure to link to the code in question and please provide details on why this part of the code is difficult to understand. You can open an issue about a technical question [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&template=bug-report.yml). #### 2.5 Proposal to add a new model, scheduler, or pipeline If the diffusion model community released a new model, pipeline, or scheduler that you would like to see in the Diffusers library, please provide the following information: * Short description of the diffusion pipeline, model, or scheduler and link to the paper or public release. * Link to any of its open-source implementation(s). * Link to the model weights if they are available. If you are willing to contribute to the model yourself, let us know so we can best guide you. Also, don't forget to tag the original author of the component (model, scheduler, pipeline, etc.) by GitHub handle if you can find it. You can open a request for a model/pipeline/scheduler [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=New+model%2Fpipeline%2Fscheduler&template=new-model-addition.yml). ### 3. Answering issues on the GitHub issues tab Answering issues on GitHub might require some technical knowledge of Diffusers, but we encourage everybody to give it a try even if you are not 100% certain that your answer is correct. Some tips to give a high-quality answer to an issue: - Be as concise and minimal as possible. - Stay on topic. An answer to the issue should concern the issue and only the issue. - Provide links to code, papers, or other sources that prove or encourage your point. - Answer in code. If a simple code snippet is the answer to the issue or shows how the issue can be solved, please provide a fully reproducible code snippet. Also, many issues tend to be simply off-topic, duplicates of other issues, or irrelevant. It is of great help to the maintainers if you can answer such issues, encouraging the author of the issue to be more precise, provide the link to a duplicated issue or redirect them to [the forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR). If you have verified that the issued bug report is correct and requires a correction in the source code, please have a look at the next sections. For all of the following contributions, you will need to open a PR. It is explained in detail how to do so in the [Opening a pull request](#how-to-open-a-pr) section. ### 4. Fixing a "Good first issue" *Good first issues* are marked by the [Good first issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) label. Usually, the issue already explains how a potential solution should look so that it is easier to fix. If the issue hasn't been closed and you would like to try to fix this issue, you can just leave a message "I would like to try this issue.". There are usually three scenarios: - a.) The issue description already proposes a fix. In this case and if the solution makes sense to you, you can open a PR or draft PR to fix it. - b.) The issue description does not propose a fix. In this case, you can ask what a proposed fix could look like and someone from the Diffusers team should answer shortly. If you have a good idea of how to fix it, feel free to directly open a PR. - c.) There is already an open PR to fix the issue, but the issue hasn't been closed yet. If the PR has gone stale, you can simply open a new PR and link to the stale PR. PRs often go stale if the original contributor who wanted to fix the issue suddenly cannot find the time anymore to proceed. This often happens in open-source and is very normal. In this case, the community will be very happy if you give it a new try and leverage the knowledge of the existing PR. If there is already a PR and it is active, you can help the author by giving suggestions, reviewing the PR or even asking whether you can contribute to the PR. ### 5. Contribute to the documentation A good library **always** has good documentation! The official documentation is often one of the first points of contact for new users of the library, and therefore contributing to the documentation is a **highly valuable contribution**. Contributing to the library can have many forms: - Correcting spelling or grammatical errors. - Correct incorrect formatting of the docstring. If you see that the official documentation is weirdly displayed or a link is broken, we would be very happy if you take some time to correct it. - Correct the shape or dimensions of a docstring input or output tensor. - Clarify documentation that is hard to understand or incorrect. - Update outdated code examples. - Translating the documentation to another language. Anything displayed on [the official Diffusers doc page](https://huggingface.co/docs/diffusers/index) is part of the official documentation and can be corrected, adjusted in the respective [documentation source](https://github.com/huggingface/diffusers/tree/main/docs/source). Please have a look at [this page](https://github.com/huggingface/diffusers/tree/main/docs) on how to verify changes made to the documentation locally. ### 6. Contribute a community pipeline > [!TIP] > Read the [Community pipelines](../using-diffusers/custom_pipeline_overview#community-pipelines) guide to learn more about the difference between a GitHub and Hugging Face Hub community pipeline. If you're interested in why we have community pipelines, take a look at GitHub Issue [#841](https://github.com/huggingface/diffusers/issues/841) (basically, we can't maintain all the possible ways diffusion models can be used for inference but we also don't want to prevent the community from building them). Contributing a community pipeline is a great way to share your creativity and work with the community. It lets you build on top of the [`DiffusionPipeline`] so that anyone can load and use it by setting the `custom_pipeline` parameter. This section will walk you through how to create a simple pipeline where the UNet only does a single forward pass and calls the scheduler once (a "one-step" pipeline). 1. Create a one_step_unet.py file for your community pipeline. This file can contain whatever package you want to use as long as it's installed by the user. Make sure you only have one pipeline class that inherits from [`DiffusionPipeline`] to load model weights and the scheduler configuration from the Hub. Add a UNet and scheduler to the `__init__` function. You should also add the `register_modules` function to ensure your pipeline and its components can be saved with [`~DiffusionPipeline.save_pretrained`]. ```py from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) ``` 1. In the forward pass (which we recommend defining as `__call__`), you can add any feature you'd like. For the "one-step" pipeline, create a random image and call the UNet and scheduler once by setting `timestep=1`. ```py from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) def __call__(self): image = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), ) timestep = 1 model_output = self.unet(image, timestep).sample scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample return scheduler_output ``` Now you can run the pipeline by passing a UNet and scheduler to it or load pretrained weights if the pipeline structure is identical. ```py from diffusers import DDPMScheduler, UNet2DModel scheduler = DDPMScheduler() unet = UNet2DModel() pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler) output = pipeline() # load pretrained weights pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) output = pipeline() ``` You can either share your pipeline as a GitHub community pipeline or Hub community pipeline. <hfoptions id="pipeline type"> <hfoption id="GitHub pipeline"> Share your GitHub pipeline by opening a pull request on the Diffusers [repository](https://github.com/huggingface/diffusers) and add the one_step_unet.py file to the [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) subfolder. </hfoption> <hfoption id="Hub pipeline"> Share your Hub pipeline by creating a model repository on the Hub and uploading the one_step_unet.py file to it. </hfoption> </hfoptions> ### 7. Contribute to training examples Diffusers examples are a collection of training scripts that reside in [examples](https://github.com/huggingface/diffusers/tree/main/examples). We support two types of training examples: - Official training examples - Research training examples Research training examples are located in [examples/research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) whereas official training examples include all folders under [examples](https://github.com/huggingface/diffusers/tree/main/examples) except the `research_projects` and `community` folders. The official training examples are maintained by the Diffusers' core maintainers whereas the research training examples are maintained by the community. This is because of the same reasons put forward in [6. Contribute a community pipeline](#6-contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models. If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author. Both official training and research examples consist of a directory that contains one or more training scripts, a `requirements.txt` file, and a `README.md` file. In order for the user to make use of the training examples, it is required to clone the repository: ```bash git clone https://github.com/huggingface/diffusers ``` as well as to install all additional dependencies required for training: ```bash cd diffusers pip install -r examples/<your-example-folder>/requirements.txt ``` Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt). Training examples of the Diffusers library should adhere to the following philosophy: - All the code necessary to run the examples should be found in a single Python file. - One should be able to run the example from the command line with `python <your-example>.py --args`. - Examples should be kept simple and serve as **an example** on how to use Diffusers for training. The purpose of example scripts is **not** to create state-of-the-art diffusion models, but rather to reproduce known training schemes without adding too much custom logic. As a byproduct of this point, our examples also strive to serve as good educational materials. To contribute an example, it is highly recommended to look at already existing examples such as [dreambooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) to get an idea of how they should look like. We strongly advise contributors to make use of the [Accelerate library](https://github.com/huggingface/accelerate) as it's tightly integrated with Diffusers. Once an example script works, please make sure to add a comprehensive `README.md` that states how to use the example exactly. This README should include: - An example command on how to run the example script as shown [here](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#running-locally-with-pytorch). - A link to some training results (logs, models, etc.) that show what the user can expect as shown [here](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5). - If you are adding a non-official/research training example, **please don't forget** to add a sentence that you are maintaining this training example which includes your git handle as shown [here](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations). If you are contributing to the official training examples, please also make sure to add a test to its folder such as [examples/dreambooth/test_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/test_dreambooth.py). This is not necessary for non-official training examples. ### 8. Fixing a "Good second issue" *Good second issues* are marked by the [Good second issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22) label. Good second issues are usually more complicated to solve than [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). The issue description usually gives less guidance on how to fix the issue and requires a decent understanding of the library by the interested contributor. If you are interested in tackling a good second issue, feel free to open a PR to fix it and link the PR to the issue. If you see that a PR has already been opened for this issue but did not get merged, have a look to understand why it wasn't merged and try to open an improved PR. Good second issues are usually more difficult to get merged compared to good first issues, so don't hesitate to ask for help from the core maintainers. If your PR is almost finished the core maintainers can also jump into your PR and commit to it in order to get it merged. ### 9. Adding pipelines, models, schedulers Pipelines, models, and schedulers are the most important pieces of the Diffusers library. They provide easy access to state-of-the-art diffusion technologies and thus allow the community to build powerful generative AI applications. By adding a new model, pipeline, or scheduler you might enable a new powerful use case for any of the user interfaces relying on Diffusers which can be of immense value for the whole generative AI ecosystem. Diffusers has a couple of open feature requests for all three components - feel free to gloss over them if you don't know yet what specific component you would like to add: - [Model or pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) - [Scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Before adding any of the three components, it is strongly recommended that you give the [Philosophy guide](philosophy) a read to better understand the design of any of the three components. Please be aware that we cannot merge model, scheduler, or pipeline additions that strongly diverge from our design philosophy as it will lead to API inconsistencies. If you fundamentally disagree with a design choice, please open a [Feedback issue](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=) instead so that it can be discussed whether a certain design pattern/design choice shall be changed everywhere in the library and whether we shall update our design philosophy. Consistency across the library is very important for us. Please make sure to add links to the original codebase/paper to the PR and ideally also ping the original author directly on the PR so that they can follow the progress and potentially help with questions. If you are unsure or stuck in the PR, don't hesitate to leave a message to ask for a first review or help. #### Copied from mechanism A unique and important feature to understand when adding any pipeline, model or scheduler code is the `# Copied from` mechanism. You'll see this all over the Diffusers codebase, and the reason we use it is to keep the codebase easy to understand and maintain. Marking code with the `# Copied from` mechanism forces the marked code to be identical to the code it was copied from. This makes it easy to update and propagate changes across many files whenever you run `make fix-copies`. For example, in the code example below, [`~diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is the original code and `AltDiffusionPipelineOutput` uses the `# Copied from` mechanism to copy it. The only difference is changing the class prefix from `Stable` to `Alt`. ```py # Copied from diffusers.pipelines.stable_diffusion.pipeline_output.StableDiffusionPipelineOutput with Stable->Alt class AltDiffusionPipelineOutput(BaseOutput): """ Output class for Alt Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`List[bool]`) List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ ``` To learn more, read this section of the [~Don't~ Repeat Yourself*](https://huggingface.co/blog/transformers-design-philosophy#4-machine-learning-models-are-static) blog post. ## How to write a good issue **The better your issue is written, the higher the chances that it will be quickly resolved.** 1. Make sure that you've used the correct template for your issue. You can pick between *Bug Report*, *Feature Request*, *Feedback about API Design*, *New model/pipeline/scheduler addition*, *Forum*, or a blank issue. Make sure to pick the correct one when opening [a new issue](https://github.com/huggingface/diffusers/issues/new/choose). 2. **Be precise**: Give your issue a fitting title. Try to formulate your issue description as simple as possible. The more precise you are when submitting an issue, the less time it takes to understand the issue and potentially solve it. Make sure to open an issue for one issue only and not for multiple issues. If you found multiple issues, simply open multiple issues. If your issue is a bug, try to be as precise as possible about what bug it is - you should not just write "Error in diffusers". 3. **Reproducibility**: No reproducible code snippet == no solution. If you encounter a bug, maintainers **have to be able to reproduce** it. Make sure that you include a code snippet that can be copy-pasted into a Python interpreter to reproduce the issue. Make sure that your code snippet works, *i.e.* that there are no missing imports or missing links to images, ... Your issue should contain an error message **and** a code snippet that can be copy-pasted without any changes to reproduce the exact same error message. If your issue is using local model weights or local data that cannot be accessed by the reader, the issue cannot be solved. If you cannot share your data or model, try to make a dummy model or dummy data. 4. **Minimalistic**: Try to help the reader as much as you can to understand the issue as quickly as possible by staying as concise as possible. Remove all code / all information that is irrelevant to the issue. If you have found a bug, try to create the easiest code example you can to demonstrate your issue, do not just dump your whole workflow into the issue as soon as you have found a bug. E.g., if you train a model and get an error at some point during the training, you should first try to understand what part of the training code is responsible for the error and try to reproduce it with a couple of lines. Try to use dummy data instead of full datasets. 5. Add links. If you are referring to a certain naming, method, or model make sure to provide a link so that the reader can better understand what you mean. If you are referring to a specific PR or issue, make sure to link it to your issue. Do not assume that the reader knows what you are talking about. The more links you add to your issue the better. 6. Formatting. Make sure to nicely format your issue by formatting code into Python code syntax, and error messages into normal code syntax. See the [official GitHub formatting docs](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) for more information. 7. Think of your issue not as a ticket to be solved, but rather as a beautiful entry to a well-written encyclopedia. Every added issue is a contribution to publicly available knowledge. By adding a nicely written issue you not only make it easier for maintainers to solve your issue, but you are helping the whole community to better understand a certain aspect of the library. ## How to write a good PR 1. Be a chameleon. Understand existing design patterns and syntax and make sure your code additions flow seamlessly into the existing code base. Pull requests that significantly diverge from existing design patterns or user interfaces will not be merged. 2. Be laser focused. A pull request should solve one problem and one problem only. Make sure to not fall into the trap of "also fixing another problem while we're adding it". It is much more difficult to review pull requests that solve multiple, unrelated problems at once. 3. If helpful, try to add a code snippet that displays an example of how your addition can be used. 4. The title of your pull request should be a summary of its contribution. 5. If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it); 6. To indicate a work in progress please prefix the title with `[WIP]`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged; 7. Try to formulate and format your text as explained in [How to write a good issue](#how-to-write-a-good-issue). 8. Make sure existing tests pass; 9. Add high-coverage tests. No quality testing = no merge. - If you are adding new `@slow` tests, make sure they pass using `RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`. CircleCI does not run the slow tests, but GitHub Actions does every night! 10. All public methods must have informative docstrings that work nicely with markdown. See [`pipeline_latent_diffusion.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py) for an example. 11. Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) or [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images) to place these files. If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## How to open a PR Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to be able to contribute to 🧨 Diffusers. `git` is not the easiest tool to use but it has the greatest manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/83bc6c94eaeb6f7704a2a428931cf2d9ad973ae9/setup.py#L270)): 1. Fork the [repository](https://github.com/huggingface/diffusers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone git@github.com:<your GitHub handle>/diffusers.git $ cd diffusers $ git remote add upstream https://github.com/huggingface/diffusers.git ``` 3. Create a new branch to hold your development changes: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` **Do not** work on the `main` branch. 4. Set up a development environment by running the following command in a virtual environment: ```bash $ pip install -e ".[dev]" ``` If you have already cloned the repo, you might need to `git pull` to get the most recent changes in the library. 5. Develop the features on your branch. As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this: ```bash $ pytest tests/<TEST_TO_RUN>.py ``` Before you run the tests, please make sure you install the dependencies required for testing. You can do so with this command: ```bash $ pip install -e ".[test]" ``` You can also run the full test suite with the following command, but it takes a beefy machine to produce a result in a decent amount of time now that Diffusers has grown a lot. Here is the command for it: ```bash $ make test ``` 🧨 Diffusers relies on `black` and `isort` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: ```bash $ make style ``` 🧨 Diffusers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however, you can also run the same checks with: ```bash $ make quality ``` Once you're happy with your changes, add changed files using `git add` and make a commit with `git commit` to record your changes locally: ```bash $ git add modified_file.py $ git commit -m "A descriptive message about your changes." ``` It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes: ```bash $ git pull upstream main ``` Push the changes to your account using: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. Once you are satisfied, go to the webpage of your fork on GitHub. Click on 'Pull request' to send your changes to the project maintainers for review. 7. It's OK if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/diffusers/tree/main/tests). We like `pytest` and `pytest-xdist` because it's faster. From the root of the repository, here's how to run tests with `pytest` for the library: ```bash $ python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` In fact, that's how `make test` is implemented! You can specify a smaller set of tests in order to test only the feature you're working on. By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to `yes` to run them. This will download many gigabytes of models — make sure you have enough disk space and a good Internet connection, or a lot of patience! ```bash $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` `unittest` is fully supported, here's how to run tests with it: ```bash $ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v ``` ### Syncing forked main with upstream (HuggingFace) main To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs, when syncing the main branch of a forked repository, please, follow these steps: 1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main. 2. If a PR is absolutely necessary, use the following steps after checking out your branch: ```bash $ git checkout -b your-branch-for-syncing $ git pull --squash --no-commit upstream main $ git commit -m '<your message without GitHub references>' $ git push --set-upstream origin your-branch-for-syncing ``` ### Style guide For documentation strings, 🧨 Diffusers follows the [Google style](https://google.github.io/styleguide/pyguide.html).
diffusers/docs/source/en/conceptual/contribution.md/0
{ "file_path": "diffusers/docs/source/en/conceptual/contribution.md", "repo_id": "diffusers", "token_count": 10990 }
107
# T-GATE [T-GATE](https://github.com/HaozheLiu-ST/T-GATE/tree/main) accelerates inference for [Stable Diffusion](../api/pipelines/stable_diffusion/overview), [PixArt](../api/pipelines/pixart), and [Latency Consistency Model](../api/pipelines/latent_consistency_models.md) pipelines by skipping the cross-attention calculation once it converges. This method doesn't require any additional training and it can speed up inference from 10-50%. T-GATE is also compatible with other optimization methods like [DeepCache](./deepcache). Before you begin, make sure you install T-GATE. ```bash pip install tgate pip install -U torch diffusers transformers accelerate DeepCache ``` To use T-GATE with a pipeline, you need to use its corresponding loader. | Pipeline | T-GATE Loader | |---|---| | PixArt | TgatePixArtLoader | | Stable Diffusion XL | TgateSDXLLoader | | Stable Diffusion XL + DeepCache | TgateSDXLDeepCacheLoader | | Stable Diffusion | TgateSDLoader | | Stable Diffusion + DeepCache | TgateSDDeepCacheLoader | Next, create a `TgateLoader` with a pipeline, the gate step (the time step to stop calculating the cross attention), and the number of inference steps. Then call the `tgate` method on the pipeline with a prompt, gate step, and the number of inference steps. Let's see how to enable this for several different pipelines. <hfoptions id="pipelines"> <hfoption id="PixArt"> Accelerate `PixArtAlphaPipeline` with T-GATE: ```py import torch from diffusers import PixArtAlphaPipeline from tgate import TgatePixArtLoader pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16) gate_step = 8 inference_step = 25 pipe = TgatePixArtLoader( pipe, gate_step=gate_step, num_inference_steps=inference_step, ).to("cuda") image = pipe.tgate( "An alpaca made of colorful building blocks, cyberpunk.", gate_step=gate_step, num_inference_steps=inference_step, ).images[0] ``` </hfoption> <hfoption id="Stable Diffusion XL"> Accelerate `StableDiffusionXLPipeline` with T-GATE: ```py import torch from diffusers import StableDiffusionXLPipeline from diffusers import DPMSolverMultistepScheduler from tgate import TgateSDXLLoader pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) gate_step = 10 inference_step = 25 pipe = TgateSDXLLoader( pipe, gate_step=gate_step, num_inference_steps=inference_step, ).to("cuda") image = pipe.tgate( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", gate_step=gate_step, num_inference_steps=inference_step ).images[0] ``` </hfoption> <hfoption id="StableDiffusionXL with DeepCache"> Accelerate `StableDiffusionXLPipeline` with [DeepCache](https://github.com/horseee/DeepCache) and T-GATE: ```py import torch from diffusers import StableDiffusionXLPipeline from diffusers import DPMSolverMultistepScheduler from tgate import TgateSDXLDeepCacheLoader pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) gate_step = 10 inference_step = 25 pipe = TgateSDXLDeepCacheLoader( pipe, cache_interval=3, cache_branch_id=0, ).to("cuda") image = pipe.tgate( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", gate_step=gate_step, num_inference_steps=inference_step ).images[0] ``` </hfoption> <hfoption id="Latent Consistency Model"> Accelerate `latent-consistency/lcm-sdxl` with T-GATE: ```py import torch from diffusers import StableDiffusionXLPipeline from diffusers import UNet2DConditionModel, LCMScheduler from diffusers import DPMSolverMultistepScheduler from tgate import TgateSDXLLoader unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-sdxl", torch_dtype=torch.float16, variant="fp16", ) pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16, variant="fp16", ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) gate_step = 1 inference_step = 4 pipe = TgateSDXLLoader( pipe, gate_step=gate_step, num_inference_steps=inference_step, lcm=True ).to("cuda") image = pipe.tgate( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.", gate_step=gate_step, num_inference_steps=inference_step ).images[0] ``` </hfoption> </hfoptions> T-GATE also supports [`StableDiffusionPipeline`] and [PixArt-alpha/PixArt-LCM-XL-2-1024-MS](https://hf.co/PixArt-alpha/PixArt-LCM-XL-2-1024-MS). ## Benchmarks | Model | MACs | Param | Latency | Zero-shot 10K-FID on MS-COCO | |-----------------------|----------|-----------|---------|---------------------------| | SD-1.5 | 16.938T | 859.520M | 7.032s | 23.927 | | SD-1.5 w/ T-GATE | 9.875T | 815.557M | 4.313s | 20.789 | | SD-2.1 | 38.041T | 865.785M | 16.121s | 22.609 | | SD-2.1 w/ T-GATE | 22.208T | 815.433 M | 9.878s | 19.940 | | SD-XL | 149.438T | 2.570B | 53.187s | 24.628 | | SD-XL w/ T-GATE | 84.438T | 2.024B | 27.932s | 22.738 | | Pixart-Alpha | 107.031T | 611.350M | 61.502s | 38.669 | | Pixart-Alpha w/ T-GATE | 65.318T | 462.585M | 37.867s | 35.825 | | DeepCache (SD-XL) | 57.888T | - | 19.931s | 23.755 | | DeepCache w/ T-GATE | 43.868T | - | 14.666s | 23.999 | | LCM (SD-XL) | 11.955T | 2.570B | 3.805s | 25.044 | | LCM w/ T-GATE | 11.171T | 2.024B | 3.533s | 25.028 | | LCM (Pixart-Alpha) | 8.563T | 611.350M | 4.733s | 36.086 | | LCM w/ T-GATE | 7.623T | 462.585M | 4.543s | 37.048 | The latency is tested on an NVIDIA 1080TI, MACs and Params are calculated with [calflops](https://github.com/MrYxJ/calculate-flops.pytorch), and the FID is calculated with [PytorchFID](https://github.com/mseitzer/pytorch-fid).
diffusers/docs/source/en/optimization/tgate.md/0
{ "file_path": "diffusers/docs/source/en/optimization/tgate.md", "repo_id": "diffusers", "token_count": 2963 }
108
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LoRA <Tip warning={true}> This is experimental and the API may change in the future. </Tip> [LoRA (Low-Rank Adaptation of Large Language Models)](https://hf.co/papers/2106.09685) is a popular and lightweight training technique that significantly reduces the number of trainable parameters. It works by inserting a smaller number of new weights into the model and only these are trained. This makes training with LoRA much faster, memory-efficient, and produces smaller model weights (a few hundred MBs), which are easier to store and share. LoRA can also be combined with other training techniques like DreamBooth to speedup training. <Tip> LoRA is very versatile and supported for [DreamBooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py), [Kandinsky 2.2](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_decoder.py), [Stable Diffusion XL](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora_sdxl.py), [text-to-image](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py), and [Wuerstchen](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py). </Tip> This guide will explore the [train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Navigate to the example folder with the training script and install the required dependencies for the script you're using: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/text_to_image pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/text_to_image pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/text_to_image_lora.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script has many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L85) function. Default values are provided for most parameters that work pretty well, but you can also set your own values in the training command if you'd like. For example, to increase the number of epochs to train: ```bash accelerate launch train_text_to_image_lora.py \ --num_train_epochs=150 \ ``` Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the LoRA relevant parameters: - `--rank`: the inner dimension of the low-rank matrices to train; a higher rank means more trainable parameters - `--learning_rate`: the default learning rate is 1e-4, but with LoRA, you can use a higher learning rate ## Training script The dataset preprocessing code and training loop are found in the [`main()`](https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L371) function, and if you need to adapt the training script, this is where you'll make your changes. As with the script parameters, a walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the LoRA relevant parts of the script. <hfoptions id="lora"> <hfoption id="UNet"> Diffusers uses [`~peft.LoraConfig`] from the [PEFT](https://hf.co/docs/peft) library to set up the parameters of the LoRA adapter such as the rank, alpha, and which modules to insert the LoRA weights into. The adapter is added to the UNet, and only the LoRA layers are filtered for optimization in `lora_layers`. ```py unet_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], ) unet.add_adapter(unet_lora_config) lora_layers = filter(lambda p: p.requires_grad, unet.parameters()) ``` </hfoption> <hfoption id="text encoder"> Diffusers also supports finetuning the text encoder with LoRA from the [PEFT](https://hf.co/docs/peft) library when necessary such as finetuning Stable Diffusion XL (SDXL). The [`~peft.LoraConfig`] is used to configure the parameters of the LoRA adapter which are then added to the text encoder, and only the LoRA layers are filtered for training. ```py text_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], ) text_encoder_one.add_adapter(text_lora_config) text_encoder_two.add_adapter(text_lora_config) text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters())) text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters())) ``` </hfoption> </hfoptions> The [optimizer](https://github.com/huggingface/diffusers/blob/e4b8f173b97731686e290b2eb98e7f5df2b1b322/examples/text_to_image/train_text_to_image_lora.py#L529) is initialized with the `lora_layers` because these are the only weights that'll be optimized: ```py optimizer = optimizer_cls( lora_layers, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Aside from setting up the LoRA layers, the training script is more or less the same as train_text_to_image.py! ## Launch the script Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 Let's train on the [Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) dataset to generate your own Naruto characters. Set the environment variables `MODEL_NAME` and `DATASET_NAME` to the model and dataset respectively. You should also specify where to save the model in `OUTPUT_DIR`, and the name of the model to save to on the Hub with `HUB_MODEL_ID`. The script creates and saves the following files to your repository: - saved model checkpoints - `pytorch_lora_weights.safetensors` (the trained LoRA weights) If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. <Tip warning={true}> A full training run takes ~5 hours on a 2080 Ti GPU with 11GB of VRAM. </Tip> ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="/sddata/finetune/lora/naruto" export HUB_MODEL_ID="naruto-lora" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --dataloader_num_workers=8 \ --resolution=512 \ --center_crop \ --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=15000 \ --learning_rate=1e-04 \ --max_grad_norm=1 \ --lr_scheduler="cosine" \ --lr_warmup_steps=0 \ --output_dir=${OUTPUT_DIR} \ --push_to_hub \ --hub_model_id=${HUB_MODEL_ID} \ --report_to=wandb \ --checkpointing_steps=500 \ --validation_prompt="A naruto with blue eyes." \ --seed=1337 ``` Once training has been completed, you can use your model for inference: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipeline.load_lora_weights("path/to/lora/model", weight_name="pytorch_lora_weights.safetensors") image = pipeline("A naruto with blue eyes").images[0] ``` ## Next steps Congratulations on training a new model with LoRA! To learn more about how to use your new model, the following guides may be helpful: - Learn how to [load different LoRA formats](../using-diffusers/loading_adapters#LoRA) trained using community trainers like Kohya and TheLastBen. - Learn how to use and [combine multiple LoRA's](../tutorials/using_peft_for_inference) with PEFT for inference.
diffusers/docs/source/en/training/lora.md/0
{ "file_path": "diffusers/docs/source/en/training/lora.md", "repo_id": "diffusers", "token_count": 3340 }
109
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Controlled generation Controlling outputs generated by diffusion models has been long pursued by the community and is now an active research topic. In many popular diffusion models, subtle changes in inputs, both images and text prompts, can drastically change outputs. In an ideal world we want to be able to control how semantics are preserved and changed. Most examples of preserving semantics reduce to being able to accurately map a change in input to a change in output. I.e. adding an adjective to a subject in a prompt preserves the entire image, only modifying the changed subject. Or, image variation of a particular subject preserves the subject's pose. Additionally, there are qualities of generated images that we would like to influence beyond semantic preservation. I.e. in general, we would like our outputs to be of good quality, adhere to a particular style, or be realistic. We will document some of the techniques `diffusers` supports to control generation of diffusion models. Much is cutting edge research and can be quite nuanced. If something needs clarifying or you have a suggestion, don't hesitate to open a discussion on the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or a [GitHub issue](https://github.com/huggingface/diffusers/issues). We provide a high level explanation of how the generation can be controlled as well as a snippet of the technicals. For more in depth explanations on the technicals, the original papers which are linked from the pipelines are always the best resources. Depending on the use case, one should choose a technique accordingly. In many cases, these techniques can be combined. For example, one can combine Textual Inversion with SEGA to provide more semantic guidance to the outputs generated using Textual Inversion. Unless otherwise mentioned, these are techniques that work with existing models and don't require their own weights. 1. [InstructPix2Pix](#instruct-pix2pix) 2. [Pix2Pix Zero](#pix2pix-zero) 3. [Attend and Excite](#attend-and-excite) 4. [Semantic Guidance](#semantic-guidance-sega) 5. [Self-attention Guidance](#self-attention-guidance-sag) 6. [Depth2Image](#depth2image) 7. [MultiDiffusion Panorama](#multidiffusion-panorama) 8. [DreamBooth](#dreambooth) 9. [Textual Inversion](#textual-inversion) 10. [ControlNet](#controlnet) 11. [Prompt Weighting](#prompt-weighting) 12. [Custom Diffusion](#custom-diffusion) 13. [Model Editing](#model-editing) 14. [DiffEdit](#diffedit) 15. [T2I-Adapter](#t2i-adapter) 16. [FABRIC](#fabric) For convenience, we provide a table to denote which methods are inference-only and which require fine-tuning/training. | **Method** | **Inference only** | **Requires training /<br> fine-tuning** | **Comments** | | :-------------------------------------------------: | :----------------: | :-------------------------------------: | :---------------------------------------------------------------------------------------------: | | [InstructPix2Pix](#instruct-pix2pix) | ✅ | ❌ | Can additionally be<br>fine-tuned for better <br>performance on specific <br>edit instructions. | | [Pix2Pix Zero](#pix2pix-zero) | ✅ | ❌ | | | [Attend and Excite](#attend-and-excite) | ✅ | ❌ | | | [Semantic Guidance](#semantic-guidance-sega) | ✅ | ❌ | | | [Self-attention Guidance](#self-attention-guidance-sag) | ✅ | ❌ | | | [Depth2Image](#depth2image) | ✅ | ❌ | | | [MultiDiffusion Panorama](#multidiffusion-panorama) | ✅ | ❌ | | | [DreamBooth](#dreambooth) | ❌ | ✅ | | | [Textual Inversion](#textual-inversion) | ❌ | ✅ | | | [ControlNet](#controlnet) | ✅ | ❌ | A ControlNet can be <br>trained/fine-tuned on<br>a custom conditioning. | | [Prompt Weighting](#prompt-weighting) | ✅ | ❌ | | | [Custom Diffusion](#custom-diffusion) | ❌ | ✅ | | | [Model Editing](#model-editing) | ✅ | ❌ | | | [DiffEdit](#diffedit) | ✅ | ❌ | | | [T2I-Adapter](#t2i-adapter) | ✅ | ❌ | | | [Fabric](#fabric) | ✅ | ❌ | | ## InstructPix2Pix [Paper](https://arxiv.org/abs/2211.09800) [InstructPix2Pix](../api/pipelines/pix2pix) is fine-tuned from Stable Diffusion to support editing input images. It takes as inputs an image and a prompt describing an edit, and it outputs the edited image. InstructPix2Pix has been explicitly trained to work well with [InstructGPT](https://openai.com/blog/instruction-following/)-like prompts. ## Pix2Pix Zero [Paper](https://arxiv.org/abs/2302.03027) [Pix2Pix Zero](../api/pipelines/pix2pix_zero) allows modifying an image so that one concept or subject is translated to another one while preserving general image semantics. The denoising process is guided from one conceptual embedding towards another conceptual embedding. The intermediate latents are optimized during the denoising process to push the attention maps towards reference attention maps. The reference attention maps are from the denoising process of the input image and are used to encourage semantic preservation. Pix2Pix Zero can be used both to edit synthetic images as well as real images. - To edit synthetic images, one first generates an image given a caption. Next, we generate image captions for the concept that shall be edited and for the new target concept. We can use a model like [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) for this purpose. Then, "mean" prompt embeddings for both the source and target concepts are created via the text encoder. Finally, the pix2pix-zero algorithm is used to edit the synthetic image. - To edit a real image, one first generates an image caption using a model like [BLIP](https://huggingface.co/docs/transformers/model_doc/blip). Then one applies DDIM inversion on the prompt and image to generate "inverse" latents. Similar to before, "mean" prompt embeddings for both source and target concepts are created and finally the pix2pix-zero algorithm in combination with the "inverse" latents is used to edit the image. <Tip> Pix2Pix Zero is the first model that allows "zero-shot" image editing. This means that the model can edit an image in less than a minute on a consumer GPU as shown [here](../api/pipelines/pix2pix_zero#usage-example). </Tip> As mentioned above, Pix2Pix Zero includes optimizing the latents (and not any of the UNet, VAE, or the text encoder) to steer the generation toward a specific concept. This means that the overall pipeline might require more memory than a standard [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img). <Tip> An important distinction between methods like InstructPix2Pix and Pix2Pix Zero is that the former involves fine-tuning the pre-trained weights while the latter does not. This means that you can apply Pix2Pix Zero to any of the available Stable Diffusion models. </Tip> ## Attend and Excite [Paper](https://arxiv.org/abs/2301.13826) [Attend and Excite](../api/pipelines/attend_and_excite) allows subjects in the prompt to be faithfully represented in the final image. A set of token indices are given as input, corresponding to the subjects in the prompt that need to be present in the image. During denoising, each token index is guaranteed to have a minimum attention threshold for at least one patch of the image. The intermediate latents are iteratively optimized during the denoising process to strengthen the attention of the most neglected subject token until the attention threshold is passed for all subject tokens. Like Pix2Pix Zero, Attend and Excite also involves a mini optimization loop (leaving the pre-trained weights untouched) in its pipeline and can require more memory than the usual [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img). ## Semantic Guidance (SEGA) [Paper](https://arxiv.org/abs/2301.12247) [SEGA](../api/pipelines/semantic_stable_diffusion) allows applying or removing one or more concepts from an image. The strength of the concept can also be controlled. I.e. the smile concept can be used to incrementally increase or decrease the smile of a portrait. Similar to how classifier free guidance provides guidance via empty prompt inputs, SEGA provides guidance on conceptual prompts. Multiple of these conceptual prompts can be applied simultaneously. Each conceptual prompt can either add or remove their concept depending on if the guidance is applied positively or negatively. Unlike Pix2Pix Zero or Attend and Excite, SEGA directly interacts with the diffusion process instead of performing any explicit gradient-based optimization. ## Self-attention Guidance (SAG) [Paper](https://arxiv.org/abs/2210.00939) [Self-attention Guidance](../api/pipelines/self_attention_guidance) improves the general quality of images. SAG provides guidance from predictions not conditioned on high-frequency details to fully conditioned images. The high frequency details are extracted out of the UNet self-attention maps. ## Depth2Image [Project](https://huggingface.co/stabilityai/stable-diffusion-2-depth) [Depth2Image](../api/pipelines/stable_diffusion/depth2img) is fine-tuned from Stable Diffusion to better preserve semantics for text guided image variation. It conditions on a monocular depth estimate of the original image. ## MultiDiffusion Panorama [Paper](https://arxiv.org/abs/2302.08113) [MultiDiffusion Panorama](../api/pipelines/panorama) defines a new generation process over a pre-trained diffusion model. This process binds together multiple diffusion generation methods that can be readily applied to generate high quality and diverse images. Results adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes. MultiDiffusion Panorama allows to generate high-quality images at arbitrary aspect ratios (e.g., panoramas). ## Fine-tuning your own models In addition to pre-trained models, Diffusers has training scripts for fine-tuning models on user-provided data. ## DreamBooth [Project](https://dreambooth.github.io/) [DreamBooth](../training/dreambooth) fine-tunes a model to teach it about a new subject. I.e. a few pictures of a person can be used to generate images of that person in different styles. ## Textual Inversion [Paper](https://arxiv.org/abs/2208.01618) [Textual Inversion](../training/text_inversion) fine-tunes a model to teach it about a new concept. I.e. a few pictures of a style of artwork can be used to generate images in that style. ## ControlNet [Paper](https://arxiv.org/abs/2302.05543) [ControlNet](../api/pipelines/controlnet) is an auxiliary network which adds an extra condition. There are 8 canonical pre-trained ControlNets trained on different conditionings such as edge detection, scribbles, depth maps, and semantic segmentations. ## Prompt Weighting [Prompt weighting](../using-diffusers/weighted_prompts) is a simple technique that puts more attention weight on certain parts of the text input. ## Custom Diffusion [Paper](https://arxiv.org/abs/2212.04488) [Custom Diffusion](../training/custom_diffusion) only fine-tunes the cross-attention maps of a pre-trained text-to-image diffusion model. It also allows for additionally performing Textual Inversion. It supports multi-concept training by design. Like DreamBooth and Textual Inversion, Custom Diffusion is also used to teach a pre-trained text-to-image diffusion model about new concepts to generate outputs involving the concept(s) of interest. ## Model Editing [Paper](https://arxiv.org/abs/2303.08084) The [text-to-image model editing pipeline](../api/pipelines/model_editing) helps you mitigate some of the incorrect implicit assumptions a pre-trained text-to-image diffusion model might make about the subjects present in the input prompt. For example, if you prompt Stable Diffusion to generate images for "A pack of roses", the roses in the generated images are more likely to be red. This pipeline helps you change that assumption. ## DiffEdit [Paper](https://arxiv.org/abs/2210.11427) [DiffEdit](../api/pipelines/diffedit) allows for semantic editing of input images along with input prompts while preserving the original input images as much as possible. ## T2I-Adapter [Paper](https://arxiv.org/abs/2302.08453) [T2I-Adapter](../api/pipelines/stable_diffusion/adapter) is an auxiliary network which adds an extra condition. There are 8 canonical pre-trained adapters trained on different conditionings such as edge detection, sketch, depth maps, and semantic segmentations. ## Fabric [Paper](https://arxiv.org/abs/2307.10159) [Fabric](https://github.com/huggingface/diffusers/tree/442017ccc877279bcf24fbe92f92d3d0def191b6/examples/community#stable-diffusion-fabric-pipeline) is a training-free approach applicable to a wide range of popular diffusion models, which exploits the self-attention layer present in the most widely used architectures to condition the diffusion process on a set of feedback images.
diffusers/docs/source/en/using-diffusers/controlling_generation.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/controlling_generation.md", "repo_id": "diffusers", "token_count": 6311 }
110
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Model files and layouts [[open-in-colab]] Diffusion models are saved in various file types and organized in different layouts. Diffusers stores model weights as safetensors files in *Diffusers-multifolder* layout and it also supports loading files (like safetensors and ckpt files) from a *single-file* layout which is commonly used in the diffusion ecosystem. Each layout has its own benefits and use cases, and this guide will show you how to load the different files and layouts, and how to convert them. ## Files PyTorch model weights are typically saved with Python's [pickle](https://docs.python.org/3/library/pickle.html) utility as ckpt or bin files. However, pickle is not secure and pickled files may contain malicious code that can be executed. This vulnerability is a serious concern given the popularity of model sharing. To address this security issue, the [Safetensors](https://hf.co/docs/safetensors) library was developed as a secure alternative to pickle, which saves models as safetensors files. ### safetensors > [!TIP] > Learn more about the design decisions and why safetensor files are preferred for saving and loading model weights in the [Safetensors audited as really safe and becoming the default](https://blog.eleuther.ai/safetensors-security-audit/) blog post. [Safetensors](https://hf.co/docs/safetensors) is a safe and fast file format for securely storing and loading tensors. Safetensors restricts the header size to limit certain types of attacks, supports lazy loading (useful for distributed setups), and has generally faster loading speeds. Make sure you have the [Safetensors](https://hf.co/docs/safetensors) library installed. ```py !pip install safetensors ``` Safetensors stores weights in a safetensors file. Diffusers loads safetensors files by default if they're available and the Safetensors library is installed. There are two ways safetensors files can be organized: 1. Diffusers-multifolder layout: there may be several separate safetensors files, one for each pipeline component (text encoder, UNet, VAE), organized in subfolders (check out the [runwayml/stable-diffusion-v1-5](https://hf.co/runwayml/stable-diffusion-v1-5/tree/main) repository as an example) 2. single-file layout: all the model weights may be saved in a single file (check out the [WarriorMama777/OrangeMixs](https://hf.co/WarriorMama777/OrangeMixs/tree/main/Models/AbyssOrangeMix) repository as an example) <hfoptions id="safetensors"> <hfoption id="multifolder"> Use the [`~DiffusionPipeline.from_pretrained`] method to load a model with safetensors files stored in multiple folders. ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", use_safetensors=True ) ``` </hfoption> <hfoption id="single file"> Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to load a model with all the weights stored in a single safetensors file. ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_single_file( "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" ) ``` </hfoption> </hfoptions> #### LoRA files [LoRA](https://hf.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) is a lightweight adapter that is fast and easy to train, making them especially popular for generating images in a certain way or style. These adapters are commonly stored in a safetensors file, and are widely popular on model sharing platforms like [civitai](https://civitai.com/). LoRAs are loaded into a base model with the [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] method. ```py from diffusers import StableDiffusionXLPipeline import torch # base model pipeline = StableDiffusionXLPipeline.from_pretrained( "Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16, variant="fp16" ).to("cuda") # download LoRA weights !wget https://civitai.com/api/download/models/168776 -O blueprintify.safetensors # load LoRA weights pipeline.load_lora_weights(".", weight_name="blueprintify.safetensors") prompt = "bl3uprint, a highly detailed blueprint of the empire state building, explaining how to build all parts, many txt, blueprint grid backdrop" negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" image = pipeline( prompt=prompt, negative_prompt=negative_prompt, generator=torch.manual_seed(0), ).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/blueprint-lora.png"/> </div> ### ckpt > [!WARNING] > Pickled files may be unsafe because they can be exploited to execute malicious code. It is recommended to use safetensors files instead where possible, or convert the weights to safetensors files. PyTorch's [torch.save](https://pytorch.org/docs/stable/generated/torch.save.html) function uses Python's [pickle](https://docs.python.org/3/library/pickle.html) utility to serialize and save models. These files are saved as a ckpt file and they contain the entire model's weights. Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to directly load a ckpt file. ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.ckpt" ) ``` ## Storage layout There are two ways model files are organized, either in a Diffusers-multifolder layout or in a single-file layout. The Diffusers-multifolder layout is the default, and each component file (text encoder, UNet, VAE) is stored in a separate subfolder. Diffusers also supports loading models from a single-file layout where all the components are bundled together. ### Diffusers-multifolder The Diffusers-multifolder layout is the default storage layout for Diffusers. Each component's (text encoder, UNet, VAE) weights are stored in a separate subfolder. The weights can be stored as safetensors or ckpt files. <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-layout.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">multifolder layout</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-unet.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">UNet subfolder</figcaption> </div> </div> To load from Diffusers-multifolder layout, use the [`~DiffusionPipeline.from_pretrained`] method. ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` Benefits of using the Diffusers-multifolder layout include: 1. Faster to load each component file individually or in parallel. 2. Reduced memory usage because you only load the components you need. For example, models like [SDXL Turbo](https://hf.co/stabilityai/sdxl-turbo), [SDXL Lightning](https://hf.co/ByteDance/SDXL-Lightning), and [Hyper-SD](https://hf.co/ByteDance/Hyper-SD) have the same components except for the UNet. You can reuse their shared components with the [`~DiffusionPipeline.from_pipe`] method without consuming any additional memory (take a look at the [Reuse a pipeline](./loading#reuse-a-pipeline) guide) and only load the UNet. This way, you don't need to download redundant components and unnecessarily use more memory. ```py import torch from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler # download one model sdxl_pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") # switch UNet for another model unet = UNet2DConditionModel.from_pretrained( "stabilityai/sdxl-turbo", subfolder="unet", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) # reuse all the same components in new model except for the UNet turbo_pipeline = StableDiffusionXLPipeline.from_pipe( sdxl_pipeline, unet=unet, ).to("cuda") turbo_pipeline.scheduler = EulerDiscreteScheduler.from_config( turbo_pipeline.scheduler.config, timestep+spacing="trailing" ) image = turbo_pipeline( "an astronaut riding a unicorn on mars", num_inference_steps=1, guidance_scale=0.0, ).images[0] image ``` 3. Reduced storage requirements because if a component, such as the SDXL [VAE](https://hf.co/madebyollin/sdxl-vae-fp16-fix), is shared across multiple models, you only need to download and store a single copy of it instead of downloading and storing it multiple times. For 10 SDXL models, this can save ~3.5GB of storage. The storage savings is even greater for newer models like PixArt Sigma, where the [text encoder](https://hf.co/PixArt-alpha/PixArt-Sigma-XL-2-1024-MS/tree/main/text_encoder) alone is ~19GB! 4. Flexibility to replace a component in the model with a newer or better version. ```py from diffusers import DiffusionPipeline, AutoencoderKL vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` 5. More visibility and information about a model's components, which are stored in a [config.json](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json) file in each component subfolder. ### Single-file The single-file layout stores all the model weights in a single file. All the model components (text encoder, UNet, VAE) weights are kept together instead of separately in subfolders. This can be a safetensors or ckpt file. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/single-file-layout.png"/> </div> To load from a single-file layout, use the [`~loaders.FromSingleFileMixin.from_single_file`] method. ```py import torch from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` Benefits of using a single-file layout include: 1. Easy compatibility with diffusion interfaces such as [ComfyUI](https://github.com/comfyanonymous/ComfyUI) or [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) which commonly use a single-file layout. 2. Easier to manage (download and share) a single file. ## Convert layout and files Diffusers provides many scripts and methods to convert storage layouts and file formats to enable broader support across the diffusion ecosystem. Take a look at the [diffusers/scripts](https://github.com/huggingface/diffusers/tree/main/scripts) collection to find a script that fits your conversion needs. > [!TIP] > Scripts that have "`to_diffusers`" appended at the end mean they convert a model to the Diffusers-multifolder layout. Each script has their own specific set of arguments for configuring the conversion, so make sure you check what arguments are available! For example, to convert a Stable Diffusion XL model stored in Diffusers-multifolder layout to a single-file layout, run the [convert_diffusers_to_original_sdxl.py](https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_sdxl.py) script. Provide the path to the model to convert, and the path to save the converted model to. You can optionally specify whether you want to save the model as a safetensors file and whether to save the model in half-precision. ```bash python convert_diffusers_to_original_sdxl.py --model_path path/to/model/to/convert --checkpoint_path path/to/save/model/to --use_safetensors ``` You can also save a model to Diffusers-multifolder layout with the [`~DiffusionPipeline.save_pretrained`] method. This creates a directory for you if it doesn't already exist, and it also saves the files as a safetensors file by default. ```py from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", ) pipeline.save_pretrained() ``` Lastly, there are also Spaces, such as [SD To Diffusers](https://hf.co/spaces/diffusers/sd-to-diffusers) and [SD-XL To Diffusers](https://hf.co/spaces/diffusers/sdxl-to-diffusers), that provide a more user-friendly interface for converting models to Diffusers-multifolder layout. This is the easiest and most convenient option for converting layouts, and it'll open a PR on your model repository with the converted files. However, this option is not as reliable as running a script, and the Space may fail for more complicated models. ## Single-file layout usage Now that you're familiar with the differences between the Diffusers-multifolder and single-file layout, this section shows you how to load models and pipeline components, customize configuration options for loading, and load local files with the [`~loaders.FromSingleFileMixin.from_single_file`] method. ### Load a pipeline or model Pass the file path of the pipeline or model to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load it. <hfoptions id="pipeline-model"> <hfoption id="pipeline"> ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path) ``` </hfoption> <hfoption id="model"> ```py from diffusers import StableCascadeUNet ckpt_path = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite.safetensors" model = StableCascadeUNet.from_single_file(ckpt_path) ``` </hfoption> </hfoptions> Customize components in the pipeline by passing them directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. For example, you can use a different scheduler in a pipeline. ```py from diffusers import StableDiffusionXLPipeline, DDIMScheduler ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" scheduler = DDIMScheduler() pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, scheduler=scheduler) ``` Or you could use a ControlNet model in the pipeline. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipeline = StableDiffusionControlNetPipeline.from_single_file(ckpt_path, controlnet=controlnet) ``` ### Customize configuration options Models have a configuration file that define their attributes like the number of inputs in a UNet. Pipelines configuration options are available in the pipeline's class. For example, if you look at the [`StableDiffusionXLInstructPix2PixPipeline`] class, there is an option to scale the image latents with the `is_cosxl_edit` parameter. These configuration files can be found in the models Hub repository or another location from which the configuration file originated (for example, a GitHub repository or locally on your device). <hfoptions id="config-file"> <hfoption id="Hub configuration file"> > [!TIP] > The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically maps the checkpoint to the appropriate model repository, but there are cases where it is useful to use the `config` parameter. For example, if the model components in the checkpoint are different from the original checkpoint or if a checkpoint doesn't have the necessary metadata to correctly determine the configuration to use for the pipeline. The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically determines the configuration to use from the configuration file in the model repository. You could also explicitly specify the configuration to use by providing the repository id to the `config` parameter. ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/segmind/SSD-1B/blob/main/SSD-1B.safetensors" repo_id = "segmind/SSD-1B" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, config=repo_id) ``` The model loads the configuration file for the [UNet](https://huggingface.co/segmind/SSD-1B/blob/main/unet/config.json), [VAE](https://huggingface.co/segmind/SSD-1B/blob/main/vae/config.json), and [text encoder](https://huggingface.co/segmind/SSD-1B/blob/main/text_encoder/config.json) from their respective subfolders in the repository. </hfoption> <hfoption id="original configuration file"> The [`~loaders.FromSingleFileMixin.from_single_file`] method can also load the original configuration file of a pipeline that is stored elsewhere. Pass a local path or URL of the original configuration file to the `original_config` parameter. ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" original_config = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, original_config=original_config) ``` > [!TIP] > Diffusers attempts to infer the pipeline components based on the type signatures of the pipeline class when you use `original_config` with `local_files_only=True`, instead of fetching the configuration files from the model repository on the Hub. This prevents backward breaking changes in code that can't connect to the internet to fetch the necessary configuration files. > > This is not as reliable as providing a path to a local model repository with the `config` parameter, and might lead to errors during pipeline configuration. To avoid errors, run the pipeline with `local_files_only=False` once to download the appropriate pipeline configuration files to the local cache. </hfoption> </hfoptions> While the configuration files specify the pipeline or models default parameters, you can override them by providing the parameters directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. Any parameter supported by the model or pipeline class can be configured in this way. <hfoptions id="override"> <hfoption id="pipeline"> For example, to scale the image latents in [`StableDiffusionXLInstructPix2PixPipeline`] pass the `is_cosxl_edit` parameter. ```python from diffusers import StableDiffusionXLInstructPix2PixPipeline ckpt_path = "https://huggingface.co/stabilityai/cosxl/blob/main/cosxl_edit.safetensors" pipeline = StableDiffusionXLInstructPix2PixPipeline.from_single_file(ckpt_path, config="diffusers/sdxl-instructpix2pix-768", is_cosxl_edit=True) ``` </hfoption> <hfoption id="model"> For example, to upcast the attention dimensions in a [`UNet2DConditionModel`] pass the `upcast_attention` parameter. ```python from diffusers import UNet2DConditionModel ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" model = UNet2DConditionModel.from_single_file(ckpt_path, upcast_attention=True) ``` </hfoption> </hfoptions> ### Local files In Diffusers>=v0.28.0, the [`~loaders.FromSingleFileMixin.from_single_file`] method attempts to configure a pipeline or model by inferring the model type from the keys in the checkpoint file. The inferred model type is used to determine the appropriate model repository on the Hugging Face Hub to configure the model or pipeline. For example, any single file checkpoint based on the Stable Diffusion XL base model will use the [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) model repository to configure the pipeline. But if you're working in an environment with restricted internet access, you should download the configuration files with the [`~huggingface_hub.snapshot_download`] function, and the model checkpoint with the [`~huggingface_hub.hf_hub_download`] function. By default, these files are downloaded to the Hugging Face Hub [cache directory](https://huggingface.co/docs/huggingface_hub/en/guides/manage-cache), but you can specify a preferred directory to download the files to with the `local_dir` parameter. Pass the configuration and checkpoint paths to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load locally. <hfoptions id="local"> <hfoption id="Hub cache directory"> ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" ) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] ) pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ``` </hfoption> <hfoption id="specific local directory"> ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" local_dir="my_local_checkpoints" ) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] local_dir="my_local_config" ) pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ``` </hfoption> </hfoptions> #### Local files without symlink > [!TIP] > In huggingface_hub>=v0.23.0, the `local_dir_use_symlinks` argument isn't necessary for the [`~huggingface_hub.hf_hub_download`] and [`~huggingface_hub.snapshot_download`] functions. The [`~loaders.FromSingleFileMixin.from_single_file`] method relies on the [huggingface_hub](https://hf.co/docs/huggingface_hub/index) caching mechanism to fetch and store checkpoints and configuration files for models and pipelines. If you're working with a file system that does not support symlinking, you should download the checkpoint file to a local directory first, and disable symlinking with the `local_dir_use_symlink=False` parameter in the [`~huggingface_hub.hf_hub_download`] function and [`~huggingface_hub.snapshot_download`] functions. ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" local_dir="my_local_checkpoints", local_dir_use_symlinks=False ) print("My local checkpoint: ", my_local_checkpoint_path) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] local_dir_use_symlinks=False, ) print("My local config: ", my_local_config_path) ``` Then you can pass the local paths to the `pretrained_model_link_or_path` and `config` parameters. ```python pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ```
diffusers/docs/source/en/using-diffusers/other-formats.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/other-formats.md", "repo_id": "diffusers", "token_count": 7966 }
111
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Prompt techniques [[open-in-colab]] Prompts are important because they describe what you want a diffusion model to generate. The best prompts are detailed, specific, and well-structured to help the model realize your vision. But crafting a great prompt takes time and effort and sometimes it may not be enough because language and words can be imprecise. This is where you need to boost your prompt with other techniques, such as prompt enhancing and prompt weighting, to get the results you want. This guide will show you how you can use these prompt techniques to generate high-quality images with lower effort and adjust the weight of certain keywords in a prompt. ## Prompt engineering > [!TIP] > This is not an exhaustive guide on prompt engineering, but it will help you understand the necessary parts of a good prompt. We encourage you to continue experimenting with different prompts and combine them in new ways to see what works best. As you write more prompts, you'll develop an intuition for what works and what doesn't! New diffusion models do a pretty good job of generating high-quality images from a basic prompt, but it is still important to create a well-written prompt to get the best results. Here are a few tips for writing a good prompt: 1. What is the image *medium*? Is it a photo, a painting, a 3D illustration, or something else? 2. What is the image *subject*? Is it a person, animal, object, or scene? 3. What *details* would you like to see in the image? This is where you can get really creative and have a lot of fun experimenting with different words to bring your image to life. For example, what is the lighting like? What is the vibe and aesthetic? What kind of art or illustration style are you looking for? The more specific and precise words you use, the better the model will understand what you want to generate. <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/plain-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"A photo of a banana-shaped couch in a living room"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/detail-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"A vibrant yellow banana-shaped couch sits in a cozy living room, its curve cradling a pile of colorful cushions. on the wooden floor, a patterned rug adds a touch of eclectic charm, and a potted plant sits in the corner, reaching towards the sunlight filtering through the windows"</figcaption> </div> </div> ## Prompt enhancing with GPT2 Prompt enhancing is a technique for quickly improving prompt quality without spending too much effort constructing one. It uses a model like GPT2 pretrained on Stable Diffusion text prompts to automatically enrich a prompt with additional important keywords to generate high-quality images. The technique works by curating a list of specific keywords and forcing the model to generate those words to enhance the original prompt. This way, your prompt can be "a cat" and GPT2 can enhance the prompt to "cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain quality sharp focus beautiful detailed intricate stunning amazing epic". > [!TIP] > You should also use a [*offset noise*](https://www.crosslabs.org//blog/diffusion-with-offset-noise) LoRA to improve the contrast in bright and dark images and create better lighting overall. This [LoRA](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_offset_example-lora_1.0.safetensors) is available from [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0). Start by defining certain styles and a list of words (you can check out a more comprehensive list of [words](https://hf.co/LykosAI/GPT-Prompt-Expansion-Fooocus-v2/blob/main/positive.txt) and [styles](https://github.com/lllyasviel/Fooocus/tree/main/sdxl_styles) used by Fooocus) to enhance a prompt with. ```py import torch from transformers import GenerationConfig, GPT2LMHeadModel, GPT2Tokenizer, LogitsProcessor, LogitsProcessorList from diffusers import StableDiffusionXLPipeline styles = { "cinematic": "cinematic film still of {prompt}, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain", "anime": "anime artwork of {prompt}, anime style, key visual, vibrant, studio anime, highly detailed", "photographic": "cinematic photo of {prompt}, 35mm photograph, film, professional, 4k, highly detailed", "comic": "comic of {prompt}, graphic illustration, comic art, graphic novel art, vibrant, highly detailed", "lineart": "line art drawing {prompt}, professional, sleek, modern, minimalist, graphic, line art, vector graphics", "pixelart": " pixel-art {prompt}, low-res, blocky, pixel art style, 8-bit graphics", } words = [ "aesthetic", "astonishing", "beautiful", "breathtaking", "composition", "contrasted", "epic", "moody", "enhanced", "exceptional", "fascinating", "flawless", "glamorous", "glorious", "illumination", "impressive", "improved", "inspirational", "magnificent", "majestic", "hyperrealistic", "smooth", "sharp", "focus", "stunning", "detailed", "intricate", "dramatic", "high", "quality", "perfect", "light", "ultra", "highly", "radiant", "satisfying", "soothing", "sophisticated", "stylish", "sublime", "terrific", "touching", "timeless", "wonderful", "unbelievable", "elegant", "awesome", "amazing", "dynamic", "trendy", ] ``` You may have noticed in the `words` list, there are certain words that can be paired together to create something more meaningful. For example, the words "high" and "quality" can be combined to create "high quality". Let's pair these words together and remove the words that can't be paired. ```py word_pairs = ["highly detailed", "high quality", "enhanced quality", "perfect composition", "dynamic light"] def find_and_order_pairs(s, pairs): words = s.split() found_pairs = [] for pair in pairs: pair_words = pair.split() if pair_words[0] in words and pair_words[1] in words: found_pairs.append(pair) words.remove(pair_words[0]) words.remove(pair_words[1]) for word in words[:]: for pair in pairs: if word in pair.split(): words.remove(word) break ordered_pairs = ", ".join(found_pairs) remaining_s = ", ".join(words) return ordered_pairs, remaining_s ``` Next, implement a custom [`~transformers.LogitsProcessor`] class that assigns tokens in the `words` list a value of 0 and assigns tokens not in the `words` list a negative value so they aren't picked during generation. This way, generation is biased towards words in the `words` list. After a word from the list is used, it is also assigned a negative value so it isn't picked again. ```py class CustomLogitsProcessor(LogitsProcessor): def __init__(self, bias): super().__init__() self.bias = bias def __call__(self, input_ids, scores): if len(input_ids.shape) == 2: last_token_id = input_ids[0, -1] self.bias[last_token_id] = -1e10 return scores + self.bias word_ids = [tokenizer.encode(word, add_prefix_space=True)[0] for word in words] bias = torch.full((tokenizer.vocab_size,), -float("Inf")).to("cuda") bias[word_ids] = 0 processor = CustomLogitsProcessor(bias) processor_list = LogitsProcessorList([processor]) ``` Combine the prompt and the `cinematic` style prompt defined in the `styles` dictionary earlier. ```py prompt = "a cat basking in the sun on a roof in Turkey" style = "cinematic" prompt = styles[style].format(prompt=prompt) prompt "cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ``` Load a GPT2 tokenizer and model from the [Gustavosta/MagicPrompt-Stable-Diffusion](https://huggingface.co/Gustavosta/MagicPrompt-Stable-Diffusion) checkpoint (this specific checkpoint is trained to generate prompts) to enhance the prompt. ```py tokenizer = GPT2Tokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") model = GPT2LMHeadModel.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion", torch_dtype=torch.float16).to( "cuda" ) model.eval() inputs = tokenizer(prompt, return_tensors="pt").to("cuda") token_count = inputs["input_ids"].shape[1] max_new_tokens = 50 - token_count generation_config = GenerationConfig( penalty_alpha=0.7, top_k=50, eos_token_id=model.config.eos_token_id, pad_token_id=model.config.eos_token_id, pad_token=model.config.pad_token_id, do_sample=True, ) with torch.no_grad(): generated_ids = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=max_new_tokens, generation_config=generation_config, logits_processor=proccesor_list, ) ``` Then you can combine the input prompt and the generated prompt. Feel free to take a look at what the generated prompt (`generated_part`) is, the word pairs that were found (`pairs`), and the remaining words (`words`). This is all packed together in the `enhanced_prompt`. ```py output_tokens = [tokenizer.decode(generated_id, skip_special_tokens=True) for generated_id in generated_ids] input_part, generated_part = output_tokens[0][: len(prompt)], output_tokens[0][len(prompt) :] pairs, words = find_and_order_pairs(generated_part, word_pairs) formatted_generated_part = pairs + ", " + words enhanced_prompt = input_part + ", " + formatted_generated_part enhanced_prompt ["cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain quality sharp focus beautiful detailed intricate stunning amazing epic"] ``` Finally, load a pipeline and the offset noise LoRA with a *low weight* to generate an image with the enhanced prompt. ```py pipeline = StableDiffusionXLPipeline.from_pretrained( "RunDiffusion/Juggernaut-XL-v9", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.load_lora_weights( "stabilityai/stable-diffusion-xl-base-1.0", weight_name="sd_xl_offset_example-lora_1.0.safetensors", adapter_name="offset", ) pipeline.set_adapters(["offset"], adapter_weights=[0.2]) image = pipeline( enhanced_prompt, width=1152, height=896, guidance_scale=7.5, num_inference_steps=25, ).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a cat basking in the sun on a roof in Turkey"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/enhanced-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain"</figcaption> </div> </div> ## Prompt weighting Prompt weighting provides a way to emphasize or de-emphasize certain parts of a prompt, allowing for more control over the generated image. A prompt can include several concepts, which gets turned into contextualized text embeddings. The embeddings are used by the model to condition its cross-attention layers to generate an image (read the Stable Diffusion [blog post](https://huggingface.co/blog/stable_diffusion) to learn more about how it works). Prompt weighting works by increasing or decreasing the scale of the text embedding vector that corresponds to its concept in the prompt because you may not necessarily want the model to focus on all concepts equally. The easiest way to prepare the prompt-weighted embeddings is to use [Compel](https://github.com/damian0815/compel), a text prompt-weighting and blending library. Once you have the prompt-weighted embeddings, you can pass them to any pipeline that has a [`prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) (and optionally [`negative_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.negative_prompt_embeds)) parameter, such as [`StableDiffusionPipeline`], [`StableDiffusionControlNetPipeline`], and [`StableDiffusionXLPipeline`]. <Tip> If your favorite pipeline doesn't have a `prompt_embeds` parameter, please open an [issue](https://github.com/huggingface/diffusers/issues/new/choose) so we can add it! </Tip> This guide will show you how to weight and blend your prompts with Compel in 🤗 Diffusers. Before you begin, make sure you have the latest version of Compel installed: ```py # uncomment to install in Colab #!pip install compel --upgrade ``` For this guide, let's generate an image with the prompt `"a red cat playing with a ball"` using the [`StableDiffusionPipeline`]: ```py from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler import torch pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_safetensors=True) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") prompt = "a red cat playing with a ball" generator = torch.Generator(device="cpu").manual_seed(33) image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_0.png"/> </div> ### Weighting You'll notice there is no "ball" in the image! Let's use compel to upweight the concept of "ball" in the prompt. Create a [`Compel`](https://github.com/damian0815/compel/blob/main/doc/compel.md#compel-objects) object, and pass it a tokenizer and text encoder: ```py from compel import Compel compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) ``` compel uses `+` or `-` to increase or decrease the weight of a word in the prompt. To increase the weight of "ball": <Tip> `+` corresponds to the value `1.1`, `++` corresponds to `1.1^2`, and so on. Similarly, `-` corresponds to `0.9` and `--` corresponds to `0.9^2`. Feel free to experiment with adding more `+` or `-` in your prompt! </Tip> ```py prompt = "a red cat playing with a ball++" ``` Pass the prompt to `compel_proc` to create the new prompt embeddings which are passed to the pipeline: ```py prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_1.png"/> </div> To downweight parts of the prompt, use the `-` suffix: ```py prompt = "a red------- cat playing with a ball" prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"/> </div> You can even up or downweight multiple concepts in the same prompt: ```py prompt = "a red cat++ playing with a ball----" prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-pos-neg.png"/> </div> ### Blending You can also create a weighted *blend* of prompts by adding `.blend()` to a list of prompts and passing it some weights. Your blend may not always produce the result you expect because it breaks some assumptions about how the text encoder functions, so just have fun and experiment with it! ```py prompt_embeds = compel_proc('("a red cat playing with a ball", "jungle").blend(0.7, 0.8)') generator = torch.Generator(device="cuda").manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-blend.png"/> </div> ### Conjunction A conjunction diffuses each prompt independently and concatenates their results by their weighted sum. Add `.and()` to the end of a list of prompts to create a conjunction: ```py prompt_embeds = compel_proc('["a red cat", "playing with a", "ball"].and()') generator = torch.Generator(device="cuda").manual_seed(55) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-conj.png"/> </div> ### Textual inversion [Textual inversion](../training/text_inversion) is a technique for learning a specific concept from some images which you can use to generate new images conditioned on that concept. Create a pipeline and use the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] function to load the textual inversion embeddings (feel free to browse the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer) for 100+ trained concepts): ```py import torch from diffusers import StableDiffusionPipeline from compel import Compel, DiffusersTextualInversionManager pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda") pipe.load_textual_inversion("sd-concepts-library/midjourney-style") ``` Compel provides a `DiffusersTextualInversionManager` class to simplify prompt weighting with textual inversion. Instantiate `DiffusersTextualInversionManager` and pass it to the `Compel` class: ```py textual_inversion_manager = DiffusersTextualInversionManager(pipe) compel_proc = Compel( tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, textual_inversion_manager=textual_inversion_manager) ``` Incorporate the concept to condition a prompt with using the `<concept>` syntax: ```py prompt_embeds = compel_proc('("A red cat++ playing with a ball <midjourney-style>")') image = pipe(prompt_embeds=prompt_embeds).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-text-inversion.png"/> </div> ### DreamBooth [DreamBooth](../training/dreambooth) is a technique for generating contextualized images of a subject given just a few images of the subject to train on. It is similar to textual inversion, but DreamBooth trains the full model whereas textual inversion only fine-tunes the text embeddings. This means you should use [`~DiffusionPipeline.from_pretrained`] to load the DreamBooth model (feel free to browse the [Stable Diffusion Dreambooth Concepts Library](https://huggingface.co/sd-dreambooth-library) for 100+ trained models): ```py import torch from diffusers import DiffusionPipeline, UniPCMultistepScheduler from compel import Compel pipe = DiffusionPipeline.from_pretrained("sd-dreambooth-library/dndcoverart-v1", torch_dtype=torch.float16).to("cuda") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) ``` Create a `Compel` class with a tokenizer and text encoder, and pass your prompt to it. Depending on the model you use, you'll need to incorporate the model's unique identifier into your prompt. For example, the `dndcoverart-v1` model uses the identifier `dndcoverart`: ```py compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) prompt_embeds = compel_proc('("magazine cover of a dndcoverart dragon, high quality, intricate details, larry elmore art style").and()') image = pipe(prompt_embeds=prompt_embeds).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-dreambooth.png"/> </div> ### Stable Diffusion XL Stable Diffusion XL (SDXL) has two tokenizers and text encoders so it's usage is a bit different. To address this, you should pass both tokenizers and encoders to the `Compel` class: ```py from compel import Compel, ReturnedEmbeddingsType from diffusers import DiffusionPipeline from diffusers.utils import make_image_grid import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", use_safetensors=True, torch_dtype=torch.float16 ).to("cuda") compel = Compel( tokenizer=[pipeline.tokenizer, pipeline.tokenizer_2] , text_encoder=[pipeline.text_encoder, pipeline.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True] ) ``` This time, let's upweight "ball" by a factor of 1.5 for the first prompt, and downweight "ball" by 0.6 for the second prompt. The [`StableDiffusionXLPipeline`] also requires [`pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.pooled_prompt_embeds) (and optionally [`negative_pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_pooled_prompt_embeds)) so you should pass those to the pipeline along with the conditioning tensors: ```py # apply weights prompt = ["a red cat playing with a (ball)1.5", "a red cat playing with a (ball)0.6"] conditioning, pooled = compel(prompt) # generate image generator = [torch.Generator().manual_seed(33) for _ in range(len(prompt))] images = pipeline(prompt_embeds=conditioning, pooled_prompt_embeds=pooled, generator=generator, num_inference_steps=30).images make_image_grid(images, rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)1.5"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)0.6"</figcaption> </div> </div>
diffusers/docs/source/en/using-diffusers/weighted_prompts.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/weighted_prompts.md", "repo_id": "diffusers", "token_count": 7819 }
112