repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/README.md
# candle-reinforcement-learning Reinforcement Learning examples for candle. This has been tested with `gymnasium` version `0.29.1`. You can install the Python package with: ```bash pip install "gymnasium[accept-rom-license]" ``` In order to run the example, use the following command. Note the additional `--package` flag to ensure that there is no conflict with the `candle-pyo3` crate. ```bash cargo run --example reinforcement-learning --features=pyo3 --package candle-examples ```
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/gym_env.rs
#![allow(unused)] //! Wrappers around the Python API of Gymnasium (the new version of OpenAI gym) use candle::{Device, Result, Tensor}; use pyo3::prelude::*; use pyo3::types::PyDict; /// The return value for a step. #[derive(Debug)] pub struct Step<A> { pub state: Tensor, pub action: A, pub reward: f64, pub terminated: bool, pub truncated: bool, } impl<A: Copy> Step<A> { /// Returns a copy of this step changing the observation tensor. pub fn copy_with_obs(&self, state: &Tensor) -> Step<A> { Step { state: state.clone(), action: self.action, reward: self.reward, terminated: self.terminated, truncated: self.truncated, } } } /// An OpenAI Gym session. pub struct GymEnv { env: PyObject, action_space: usize, observation_space: Vec<usize>, } fn w(res: PyErr) -> candle::Error { candle::Error::wrap(res) } impl GymEnv { /// Creates a new session of the specified OpenAI Gym environment. pub fn new(name: &str) -> Result<GymEnv> { Python::with_gil(|py| { let gym = py.import("gymnasium")?; let make = gym.getattr("make")?; let env = make.call1((name,))?; let action_space = env.getattr("action_space")?; let action_space = if let Ok(val) = action_space.getattr("n") { val.extract()? } else { let action_space: Vec<usize> = action_space.getattr("shape")?.extract()?; action_space[0] }; let observation_space = env.getattr("observation_space")?; let observation_space = observation_space.getattr("shape")?.extract()?; Ok(GymEnv { env: env.into(), action_space, observation_space, }) }) .map_err(w) } /// Resets the environment, returning the observation tensor. pub fn reset(&self, seed: u64) -> Result<Tensor> { let state: Vec<f32> = Python::with_gil(|py| { let kwargs = PyDict::new(py); kwargs.set_item("seed", seed)?; let state = self.env.call_method(py, "reset", (), Some(kwargs))?; state.as_ref(py).get_item(0)?.extract() }) .map_err(w)?; Tensor::new(state, &Device::Cpu) } /// Applies an environment step using the specified action. pub fn step<A: pyo3::IntoPy<pyo3::Py<pyo3::PyAny>> + Clone>( &self, action: A, ) -> Result<Step<A>> { let (state, reward, terminated, truncated) = Python::with_gil(|py| { let step = self.env.call_method(py, "step", (action.clone(),), None)?; let step = step.as_ref(py); let state: Vec<f32> = step.get_item(0)?.extract()?; let reward: f64 = step.get_item(1)?.extract()?; let terminated: bool = step.get_item(2)?.extract()?; let truncated: bool = step.get_item(3)?.extract()?; Ok((state, reward, terminated, truncated)) }) .map_err(w)?; let state = Tensor::new(state, &Device::Cpu)?; Ok(Step { state, action, reward, terminated, truncated, }) } /// Returns the number of allowed actions for this environment. pub fn action_space(&self) -> usize { self.action_space } /// Returns the shape of the observation tensors. pub fn observation_space(&self) -> &[usize] { &self.observation_space } }
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/main.rs
#![allow(unused)] #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; mod gym_env; mod vec_gym_env; mod ddpg; use candle::{Device, Result, Tensor}; use clap::Parser; use rand::Rng; // The impact of the q value of the next state on the current state's q value. const GAMMA: f64 = 0.99; // The weight for updating the target networks. const TAU: f64 = 0.005; // The capacity of the replay buffer used for sampling training data. const REPLAY_BUFFER_CAPACITY: usize = 100_000; // The training batch size for each training iteration. const TRAINING_BATCH_SIZE: usize = 100; // The total number of episodes. const MAX_EPISODES: usize = 100; // The maximum length of an episode. const EPISODE_LENGTH: usize = 200; // The number of training iterations after one episode finishes. const TRAINING_ITERATIONS: usize = 200; // Ornstein-Uhlenbeck process parameters. const MU: f64 = 0.0; const THETA: f64 = 0.15; const SIGMA: f64 = 0.1; const ACTOR_LEARNING_RATE: f64 = 1e-4; const CRITIC_LEARNING_RATE: f64 = 1e-3; #[derive(Parser, Debug, Clone)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let env = gym_env::GymEnv::new("Pendulum-v1")?; println!("action space: {}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let size_state = env.observation_space().iter().product::<usize>(); let size_action = env.action_space(); let mut agent = ddpg::DDPG::new( &Device::Cpu, size_state, size_action, true, ACTOR_LEARNING_RATE, CRITIC_LEARNING_RATE, GAMMA, TAU, REPLAY_BUFFER_CAPACITY, ddpg::OuNoise::new(MU, THETA, SIGMA, size_action)?, )?; let mut rng = rand::thread_rng(); for episode in 0..MAX_EPISODES { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; agent.remember( &state, &Tensor::new(vec![action], &Device::Cpu)?, &Tensor::new(vec![step.reward as f32], &Device::Cpu)?, &step.state, step.terminated, step.truncated, ); if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); for _ in 0..TRAINING_ITERATIONS { agent.train(TRAINING_BATCH_SIZE)?; } } println!("Testing..."); agent.train = false; for episode in 0..10 { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); } Ok(()) }
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/ddpg.rs
use std::collections::VecDeque; use std::fmt::Display; use candle::{DType, Device, Error, Module, Result, Tensor, Var}; use candle_nn::{ func, linear, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, Sequential, VarBuilder, VarMap, }; use rand::{distributions::Uniform, thread_rng, Rng}; pub struct OuNoise { mu: f64, theta: f64, sigma: f64, state: Tensor, } impl OuNoise { pub fn new(mu: f64, theta: f64, sigma: f64, size_action: usize) -> Result<Self> { Ok(Self { mu, theta, sigma, state: Tensor::ones(size_action, DType::F32, &Device::Cpu)?, }) } pub fn sample(&mut self) -> Result<Tensor> { let rand = Tensor::randn_like(&self.state, 0.0, 1.0)?; let dx = ((self.theta * (self.mu - &self.state)?)? + (self.sigma * rand)?)?; self.state = (&self.state + dx)?; Ok(self.state.clone()) } } #[derive(Clone)] struct Transition { state: Tensor, action: Tensor, reward: Tensor, next_state: Tensor, terminated: bool, truncated: bool, } impl Transition { fn new( state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) -> Self { Self { state: state.clone(), action: action.clone(), reward: reward.clone(), next_state: next_state.clone(), terminated, truncated, } } } pub struct ReplayBuffer { buffer: VecDeque<Transition>, capacity: usize, size: usize, } impl ReplayBuffer { pub fn new(capacity: usize) -> Self { Self { buffer: VecDeque::with_capacity(capacity), capacity, size: 0, } } pub fn push( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { if self.size == self.capacity { self.buffer.pop_front(); } else { self.size += 1; } self.buffer.push_back(Transition::new( state, action, reward, next_state, terminated, truncated, )); } #[allow(clippy::type_complexity)] pub fn random_batch( &self, batch_size: usize, ) -> Result<Option<(Tensor, Tensor, Tensor, Tensor, Vec<bool>, Vec<bool>)>> { if self.size < batch_size { Ok(None) } else { let transitions: Vec<&Transition> = thread_rng() .sample_iter(Uniform::from(0..self.size)) .take(batch_size) .map(|i| self.buffer.get(i).unwrap()) .collect(); let states: Vec<Tensor> = transitions .iter() .map(|t| t.state.unsqueeze(0)) .collect::<Result<_>>()?; let actions: Vec<Tensor> = transitions .iter() .map(|t| t.action.unsqueeze(0)) .collect::<Result<_>>()?; let rewards: Vec<Tensor> = transitions .iter() .map(|t| t.reward.unsqueeze(0)) .collect::<Result<_>>()?; let next_states: Vec<Tensor> = transitions .iter() .map(|t| t.next_state.unsqueeze(0)) .collect::<Result<_>>()?; let terminateds: Vec<bool> = transitions.iter().map(|t| t.terminated).collect(); let truncateds: Vec<bool> = transitions.iter().map(|t| t.truncated).collect(); Ok(Some(( Tensor::cat(&states, 0)?, Tensor::cat(&actions, 0)?, Tensor::cat(&rewards, 0)?, Tensor::cat(&next_states, 0)?, terminateds, truncateds, ))) } } } fn track( varmap: &mut VarMap, vb: &VarBuilder, target_prefix: &str, network_prefix: &str, dims: &[(usize, usize)], tau: f64, ) -> Result<()> { for (i, &(in_dim, out_dim)) in dims.iter().enumerate() { let target_w = vb.get((out_dim, in_dim), &format!("{target_prefix}-fc{i}.weight"))?; let network_w = vb.get((out_dim, in_dim), &format!("{network_prefix}-fc{i}.weight"))?; varmap.set_one( format!("{target_prefix}-fc{i}.weight"), ((tau * network_w)? + ((1.0 - tau) * target_w)?)?, )?; let target_b = vb.get(out_dim, &format!("{target_prefix}-fc{i}.bias"))?; let network_b = vb.get(out_dim, &format!("{network_prefix}-fc{i}.bias"))?; varmap.set_one( format!("{target_prefix}-fc{i}.bias"), ((tau * network_b)? + ((1.0 - tau) * target_b)?)?, )?; } Ok(()) } struct Actor<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Actor<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims = vec![(size_state, 400), (400, 300), (300, size_action)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?) .add(func(|xs| xs.tanh())); Ok::<Sequential, Error>(seq) }; let network = make_network("actor")?; let target_network = make_network("target-actor")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0); Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor) -> Result<Tensor> { self.network.forward(state) } fn target_forward(&self, state: &Tensor) -> Result<Tensor> { self.target_network.forward(state) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-actor", "actor", &self.dims, tau, ) } } struct Critic<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Critic<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims: Vec<(usize, usize)> = vec![(size_state + size_action, 400), (400, 300), (300, 1)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?); Ok::<Sequential, Error>(seq) }; let network = make_network("critic")?; let target_network = make_network("target-critic")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0); Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.network.forward(&xs) } fn target_forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.target_network.forward(&xs) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-critic", "critic", &self.dims, tau, ) } } #[allow(clippy::upper_case_acronyms)] pub struct DDPG<'a> { actor: Actor<'a>, actor_optim: AdamW, critic: Critic<'a>, critic_optim: AdamW, gamma: f64, tau: f64, replay_buffer: ReplayBuffer, ou_noise: OuNoise, size_state: usize, size_action: usize, pub train: bool, } impl DDPG<'_> { #[allow(clippy::too_many_arguments)] pub fn new( device: &Device, size_state: usize, size_action: usize, train: bool, actor_lr: f64, critic_lr: f64, gamma: f64, tau: f64, buffer_capacity: usize, ou_noise: OuNoise, ) -> Result<Self> { let filter_by_prefix = |varmap: &VarMap, prefix: &str| { varmap .data() .lock() .unwrap() .iter() .filter_map(|(name, var)| name.starts_with(prefix).then_some(var.clone())) .collect::<Vec<Var>>() }; let actor = Actor::new(device, DType::F32, size_state, size_action)?; let actor_optim = AdamW::new( filter_by_prefix(&actor.varmap, "actor"), ParamsAdamW { lr: actor_lr, ..Default::default() }, )?; let critic = Critic::new(device, DType::F32, size_state, size_action)?; let critic_optim = AdamW::new( filter_by_prefix(&critic.varmap, "critic"), ParamsAdamW { lr: critic_lr, ..Default::default() }, )?; Ok(Self { actor, actor_optim, critic, critic_optim, gamma, tau, replay_buffer: ReplayBuffer::new(buffer_capacity), ou_noise, size_state, size_action, train, }) } pub fn remember( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { self.replay_buffer .push(state, action, reward, next_state, terminated, truncated) } pub fn actions(&mut self, state: &Tensor) -> Result<f32> { let actions = self .actor .forward(&state.detach()?.unsqueeze(0)?)? .squeeze(0)?; let actions = if self.train { (actions + self.ou_noise.sample()?)? } else { actions }; actions.squeeze(0)?.to_scalar::<f32>() } pub fn train(&mut self, batch_size: usize) -> Result<()> { let (states, actions, rewards, next_states, _, _) = match self.replay_buffer.random_batch(batch_size)? { Some(v) => v, _ => return Ok(()), }; let q_target = self .critic .target_forward(&next_states, &self.actor.target_forward(&next_states)?)?; let q_target = (rewards + (self.gamma * q_target)?.detach())?; let q = self.critic.forward(&states, &actions)?; let diff = (q_target - q)?; let critic_loss = diff.sqr()?.mean_all()?; self.critic_optim.backward_step(&critic_loss)?; let actor_loss = self .critic .forward(&states, &self.actor.forward(&states)?)? .mean_all()? .neg()?; self.actor_optim.backward_step(&actor_loss)?; self.critic.track(self.tau)?; self.actor.track(self.tau)?; Ok(()) } }
0
hf_public_repos/candle/candle-examples
hf_public_repos/candle/candle-examples/src/imagenet.rs
use candle::{Device, Result, Tensor}; /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 224, 224). imagenet normalization is applied. pub fn load_image224<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { let img = image::io::Reader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(224, 224, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (224, 224, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.485f32, 0.456, 0.406], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.229f32, 0.224, 0.225], &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } pub const CLASS_COUNT: i64 = 1000; pub const CLASSES: [&str; 1000] = [ "tench, Tinca tinca", "goldfish, Carassius auratus", "great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias", "tiger shark, Galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, Struthio camelus", "brambling, Fringilla montifringilla", "goldfinch, Carduelis carduelis", "house finch, linnet, Carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, Passerina cyanea", "robin, American robin, Turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, American eagle, Haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, Strix nebulosa", "European fire salamander, Salamandra salamandra", "common newt, Triturus vulgaris", "eft", "spotted salamander, Ambystoma maculatum", "axolotl, mud puppy, Ambystoma mexicanum", "bullfrog, Rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui", "loggerhead, loggerhead turtle, Caretta caretta", "leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, Iguana iguana", "American chameleon, anole, Anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, Chlamydosaurus kingi", "alligator lizard", "Gila monster, Heloderma suspectum", "green lizard, Lacerta viridis", "African chameleon, Chamaeleo chamaeleon", "Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis", "African crocodile, Nile crocodile, Crocodylus niloticus", "American alligator, Alligator mississipiensis", "triceratops", "thunder snake, worm snake, Carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, Hypsiglena torquata", "boa constrictor, Constrictor constrictor", "rock python, rock snake, Python sebae", "Indian cobra, Naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, Cerastes cornutus", "diamondback, diamondback rattlesnake, Crotalus adamanteus", "sidewinder, horned rattlesnake, Crotalus cerastes", "trilobite", "harvestman, daddy longlegs, Phalangium opilio", "scorpion", "black and gold garden spider, Argiope aurantia", "barn spider, Araneus cavaticus", "garden spider, Aranea diademata", "black widow, Latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, Bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "African grey, African gray, Psittacus erithacus", "macaw", "sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, Mergus serrator", "goose", "black swan, Cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "Dungeness crab, Cancer magister", "rock crab, Cancer irroratus", "fiddler crab", "king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica", "American lobster, Northern lobster, Maine lobster, Homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, Ciconia ciconia", "black stork, Ciconia nigra", "spoonbill", "flamingo", "little blue heron, Egretta caerulea", "American egret, great white heron, Egretta albus", "bittern", "crane", "limpkin, Aramus pictus", "European gallinule, Porphyrio porphyrio", "American coot, marsh hen, mud hen, water hen, Fulica americana", "bustard", "ruddy turnstone, Arenaria interpres", "red-backed sandpiper, dunlin, Erolia alpina", "redshank, Tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, Aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, Orcinus orca", "dugong, Dugong dugon", "sea lion", "Chihuahua", "Japanese spaniel", "Maltese dog, Maltese terrier, Maltese", "Pekinese, Pekingese, Peke", "Shih-Tzu", "Blenheim spaniel", "papillon", "toy terrier", "Rhodesian ridgeback", "Afghan hound, Afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "Walker hound, Walker foxhound", "English foxhound", "redbone", "borzoi, Russian wolfhound", "Irish wolfhound", "Italian greyhound", "whippet", "Ibizan hound, Ibizan Podenco", "Norwegian elkhound, elkhound", "otterhound, otter hound", "Saluki, gazelle hound", "Scottish deerhound, deerhound", "Weimaraner", "Staffordshire bullterrier, Staffordshire bull terrier", "American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier", "Bedlington terrier", "Border terrier", "Kerry blue terrier", "Irish terrier", "Norfolk terrier", "Norwich terrier", "Yorkshire terrier", "wire-haired fox terrier", "Lakeland terrier", "Sealyham terrier, Sealyham", "Airedale, Airedale terrier", "cairn, cairn terrier", "Australian terrier", "Dandie Dinmont, Dandie Dinmont terrier", "Boston bull, Boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "Scotch terrier, Scottish terrier, Scottie", "Tibetan terrier, chrysanthemum dog", "silky terrier, Sydney silky", "soft-coated wheaten terrier", "West Highland white terrier", "Lhasa, Lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "Labrador retriever", "Chesapeake Bay retriever", "German short-haired pointer", "vizsla, Hungarian pointer", "English setter", "Irish setter, red setter", "Gordon setter", "Brittany spaniel", "clumber, clumber spaniel", "English springer, English springer spaniel", "Welsh springer spaniel", "cocker spaniel, English cocker spaniel, cocker", "Sussex spaniel", "Irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "Old English sheepdog, bobtail", "Shetland sheepdog, Shetland sheep dog, Shetland", "collie", "Border collie", "Bouvier des Flandres, Bouviers des Flandres", "Rottweiler", "German shepherd, German shepherd dog, German police dog, alsatian", "Doberman, Doberman pinscher", "miniature pinscher", "Greater Swiss Mountain dog", "Bernese mountain dog", "Appenzeller", "EntleBucher", "boxer", "bull mastiff", "Tibetan mastiff", "French bulldog", "Great Dane", "Saint Bernard, St Bernard", "Eskimo dog, husky", "malamute, malemute, Alaskan malamute", "Siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "Leonberg", "Newfoundland, Newfoundland dog", "Great Pyrenees", "Samoyed, Samoyede", "Pomeranian", "chow, chow chow", "keeshond", "Brabancon griffon", "Pembroke, Pembroke Welsh corgi", "Cardigan, Cardigan Welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "Mexican hairless", "timber wolf, grey wolf, gray wolf, Canis lupus", "white wolf, Arctic wolf, Canis lupus tundrarum", "red wolf, maned wolf, Canis rufus, Canis niger", "coyote, prairie wolf, brush wolf, Canis latrans", "dingo, warrigal, warragal, Canis dingo", "dhole, Cuon alpinus", "African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus", "hyena, hyaena", "red fox, Vulpes vulpes", "kit fox, Vulpes macrotis", "Arctic fox, white fox, Alopex lagopus", "grey fox, gray fox, Urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "Persian cat", "Siamese cat, Siamese", "Egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, Felis concolor", "lynx, catamount", "leopard, Panthera pardus", "snow leopard, ounce, Panthera uncia", "jaguar, panther, Panthera onca, Felis onca", "lion, king of beasts, Panthera leo", "tiger, Panthera tigris", "cheetah, chetah, Acinonyx jubatus", "brown bear, bruin, Ursus arctos", "American black bear, black bear, Ursus americanus, Euarctos americanus", "ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus", "sloth bear, Melursus ursinus, Ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, Danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "Angora, Angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, Sciurus niger", "marmot", "beaver", "guinea pig, Cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, Sus scrofa", "wild boar, boar, Sus scrofa", "warthog", "hippopotamus, hippo, river horse, Hippopotamus amphibius", "ox", "water buffalo, water ox, Asiatic buffalo, Bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis", "ibex, Capra ibex", "hartebeest", "impala, Aepyceros melampus", "gazelle", "Arabian camel, dromedary, Camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, Mustela putorius", "black-footed ferret, ferret, Mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, Bradypus tridactylus", "orangutan, orang, orangutang, Pongo pygmaeus", "gorilla, Gorilla gorilla", "chimpanzee, chimp, Pan troglodytes", "gibbon, Hylobates lar", "siamang, Hylobates syndactylus, Symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, Erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, Nasalis larvatus", "marmoset", "capuchin, ringtail, Cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, Ateles geoffroyi", "squirrel monkey, Saimiri sciureus", "Madagascar cat, ring-tailed lemur, Lemur catta", "indri, indris, Indri indri, Indri brevicaudatus", "Indian elephant, Elephas maximus", "African elephant, Loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens", "giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch", "rock beauty, Holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, Lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, Biro", "Band Aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM", "cassette", "cassette player", "castle", "catamaran", "CD player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "Christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "Dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "French horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "iPod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, T-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "Loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "Model T", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, CRO", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "Petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "Polaroid camera, Polaroid Land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, RV, R.V.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, CRT screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, U-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "Windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, Virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "French loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue", ];
0
hf_public_repos/candle/candle-examples
hf_public_repos/candle/candle-examples/src/token_output_stream.rs
use candle::Result; /// This is a wrapper around a tokenizer to ensure that tokens can be returned to the user in a /// streaming way rather than having to wait for the full decoding. pub struct TokenOutputStream { tokenizer: tokenizers::Tokenizer, tokens: Vec<u32>, prev_index: usize, current_index: usize, } impl TokenOutputStream { pub fn new(tokenizer: tokenizers::Tokenizer) -> Self { Self { tokenizer, tokens: Vec::new(), prev_index: 0, current_index: 0, } } pub fn into_inner(self) -> tokenizers::Tokenizer { self.tokenizer } fn decode(&self, tokens: &[u32]) -> Result<String> { match self.tokenizer.decode(tokens, true) { Ok(str) => Ok(str), Err(err) => candle::bail!("cannot decode: {err}"), } } // https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/models/model.py#L68 pub fn next_token(&mut self, token: u32) -> Result<Option<String>> { let prev_text = if self.tokens.is_empty() { String::new() } else { let tokens = &self.tokens[self.prev_index..self.current_index]; self.decode(tokens)? }; self.tokens.push(token); let text = self.decode(&self.tokens[self.prev_index..])?; if text.len() > prev_text.len() && text.chars().last().unwrap().is_ascii() { let text = text.split_at(prev_text.len()); self.prev_index = self.current_index; self.current_index = self.tokens.len(); Ok(Some(text.1.to_string())) } else { Ok(None) } } pub fn decode_rest(&self) -> Result<Option<String>> { let prev_text = if self.tokens.is_empty() { String::new() } else { let tokens = &self.tokens[self.prev_index..self.current_index]; self.decode(tokens)? }; let text = self.decode(&self.tokens[self.prev_index..])?; if text.len() > prev_text.len() { let text = text.split_at(prev_text.len()); Ok(Some(text.1.to_string())) } else { Ok(None) } } pub fn decode_all(&self) -> Result<String> { self.decode(&self.tokens) } pub fn get_token(&self, token_s: &str) -> Option<u32> { self.tokenizer.get_vocab(true).get(token_s).copied() } pub fn tokenizer(&self) -> &tokenizers::Tokenizer { &self.tokenizer } pub fn clear(&mut self) { self.tokens.clear(); self.prev_index = 0; self.current_index = 0; } }
0
hf_public_repos/candle/candle-examples
hf_public_repos/candle/candle-examples/src/lib.rs
pub mod coco_classes; pub mod imagenet; pub mod token_output_stream; use candle::utils::{cuda_is_available, metal_is_available}; use candle::{Device, Result, Tensor}; pub fn device(cpu: bool) -> Result<Device> { if cpu { Ok(Device::Cpu) } else if cuda_is_available() { Ok(Device::new_cuda(0)?) } else if metal_is_available() { Ok(Device::new_metal(0)?) } else { #[cfg(all(target_os = "macos", target_arch = "aarch64"))] { println!( "Running on CPU, to run on GPU(metal), build this example with `--features metal`" ); } #[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] { println!("Running on CPU, to run on GPU, build this example with `--features cuda`"); } Ok(Device::Cpu) } } pub fn load_image<P: AsRef<std::path::Path>>( p: P, resize_longest: Option<usize>, ) -> Result<(Tensor, usize, usize)> { let img = image::io::Reader::open(p)? .decode() .map_err(candle::Error::wrap)?; let (initial_h, initial_w) = (img.height() as usize, img.width() as usize); let img = match resize_longest { None => img, Some(resize_longest) => { let (height, width) = (img.height(), img.width()); let resize_longest = resize_longest as u32; let (height, width) = if height < width { let h = (resize_longest * height) / width; (h, resize_longest) } else { let w = (resize_longest * width) / height; (resize_longest, w) }; img.resize_exact(width, height, image::imageops::FilterType::CatmullRom) } }; let (height, width) = (img.height() as usize, img.width() as usize); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (height, width, 3), &Device::Cpu)?.permute((2, 0, 1))?; Ok((data, initial_h, initial_w)) } pub fn load_image_and_resize<P: AsRef<std::path::Path>>( p: P, width: usize, height: usize, ) -> Result<Tensor> { let img = image::io::Reader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill( width as u32, height as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let data = img.into_raw(); Tensor::from_vec(data, (width, height, 3), &Device::Cpu)?.permute((2, 0, 1)) } /// Saves an image to disk using the image crate, this expects an input with shape /// (c, height, width). pub fn save_image<P: AsRef<std::path::Path>>(img: &Tensor, p: P) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; image.save(p).map_err(candle::Error::wrap)?; Ok(()) } pub fn save_image_resize<P: AsRef<std::path::Path>>( img: &Tensor, p: P, h: usize, w: usize, ) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; let image = image::DynamicImage::from(image); let image = image.resize_to_fill(w as u32, h as u32, image::imageops::FilterType::CatmullRom); image.save(p).map_err(candle::Error::wrap)?; Ok(()) }
0
hf_public_repos/candle/candle-examples
hf_public_repos/candle/candle-examples/src/coco_classes.rs
pub const NAMES: [&str; 80] = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", ];
0
hf_public_repos
hf_public_repos/diffusers/CODE_OF_CONDUCT.md
# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall Diffusers community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Spamming issues or PRs with links to projects unrelated to this library * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at feedback@huggingface.co. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at https://www.contributor-covenant.org/version/2/1/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
0
hf_public_repos
hf_public_repos/diffusers/_typos.toml
# Files for typos # Instruction: https://github.com/marketplace/actions/typos-action#getting-started [default.extend-identifiers] [default.extend-words] NIN="NIN" # NIN is used in scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py nd="np" # nd may be np (numpy) parms="parms" # parms is used in scripts/convert_original_stable_diffusion_to_diffusers.py [files] extend-exclude = ["_typos.toml"]
0
hf_public_repos
hf_public_repos/diffusers/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos
hf_public_repos/diffusers/Makefile
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples scripts src tests utils modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ ruff check $(modified_py_files) --fix; \ ruff format $(modified_py_files);\ else \ echo "No library .py files were modified"; \ fi # Update src/diffusers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py # this target runs checks on all files quality: ruff check $(check_dirs) setup.py ruff format --check $(check_dirs) setup.py python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: ruff check $(check_dirs) setup.py --fix ruff format $(check_dirs) setup.py ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/ # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch
0
hf_public_repos
hf_public_repos/diffusers/CITATION.cff
cff-version: 1.2.0 title: 'Diffusers: State-of-the-art diffusion models' message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - given-names: Patrick family-names: von Platen - given-names: Suraj family-names: Patil - given-names: Anton family-names: Lozhkov - given-names: Pedro family-names: Cuenca - given-names: Nathan family-names: Lambert - given-names: Kashif family-names: Rasul - given-names: Mishig family-names: Davaadorj - given-names: Thomas family-names: Wolf repository-code: 'https://github.com/huggingface/diffusers' abstract: >- Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models. keywords: - deep-learning - pytorch - image-generation - hacktoberfest - diffusion - text2image - image2image - score-based-generative-modeling - stable-diffusion - stable-diffusion-diffusers license: Apache-2.0 version: 0.12.1
0
hf_public_repos
hf_public_repos/diffusers/pyproject.toml
[tool.ruff] # Never enforce `E501` (line length violations). ignore = ["C901", "E501", "E741", "F402", "F823"] select = ["C", "E", "F", "I", "W"] line-length = 119 # Ignore import violations in all `__init__.py` files. [tool.ruff.per-file-ignores] "__init__.py" = ["E402", "F401", "F403", "F811"] "src/diffusers/utils/dummy_*.py" = ["F401"] [tool.ruff.isort] lines-after-imports = 2 known-first-party = ["diffusers"] [tool.ruff.format] # Like Black, use double quotes for strings. quote-style = "double" # Like Black, indent with spaces, rather than tabs. indent-style = "space" # Like Black, respect magic trailing commas. skip-magic-trailing-comma = false # Like Black, automatically detect the appropriate line ending. line-ending = "auto"
0
hf_public_repos
hf_public_repos/diffusers/MANIFEST.in
include LICENSE include src/diffusers/utils/model_card_template.md
0
hf_public_repos
hf_public_repos/diffusers/README.md
<!--- Copyright 2022 - The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/en/imgs/diffusers_library.jpg" width="400"/> <br> <p> <p align="center"> <a href="https://github.com/huggingface/diffusers/blob/main/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue"> </a> <a href="https://github.com/huggingface/diffusers/releases"> <img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg"> </a> <a href="https://pepy.tech/project/diffusers"> <img alt="GitHub release" src="https://static.pepy.tech/badge/diffusers/month"> </a> <a href="CODE_OF_CONDUCT.md"> <img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg"> </a> <a href="https://twitter.com/diffuserslib"> <img alt="X account" src="https://img.shields.io/twitter/url/https/twitter.com/diffuserslib.svg?style=social&label=Follow%20%40diffuserslib"> </a> </p> 🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction). 🤗 Diffusers offers three core components: - State-of-the-art [diffusion pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) that can be run in inference with just a few lines of code. - Interchangeable noise [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview) for different diffusion speeds and output quality. - Pretrained [models](https://huggingface.co/docs/diffusers/api/models/overview) that can be used as building blocks, and combined with schedulers, for creating your own end-to-end diffusion systems. ## Installation We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation. ### PyTorch With `pip` (official package): ```bash pip install --upgrade diffusers[torch] ``` With `conda` (maintained by the community): ```sh conda install -c conda-forge diffusers ``` ### Flax With `pip` (official package): ```bash pip install --upgrade diffusers[flax] ``` ### Apple Silicon (M1/M2) support Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide. ## Quickstart Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 15000+ checkpoints): ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipeline.to("cuda") pipeline("An image of a squirrel in Picasso style").images[0] ``` You can also dig into the models and schedulers toolbox to build your own diffusion system: ```python from diffusers import DDPMScheduler, UNet2DModel from PIL import Image import torch scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda") scheduler.set_timesteps(50) sample_size = model.config.sample_size noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") input = noise for t in scheduler.timesteps: with torch.no_grad(): noisy_residual = model(input, t).sample prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample input = prev_noisy_sample image = (input / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy()[0] image = Image.fromarray((image * 255).round().astype("uint8")) image ``` Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to launch your diffusion journey today! ## How to navigate the documentation | **Documentation** | **What can I learn?** | |---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. | | [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading_overview) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. | | [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/pipeline_overview) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. | | [Optimization](https://huggingface.co/docs/diffusers/optimization/opt_overview) | Guides for how to optimize your diffusion model to run faster and consume less memory. | | [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. | ## Contribution We ❤️ contributions from the open-source community! If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md). You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library. - See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute - See [New model/pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) to contribute exciting new diffusion models / diffusion pipelines - See [New scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or just hang out ☕. ## Popular Tasks & Pipelines <table> <tr> <th>Task</th> <th>Pipeline</th> <th>🤗 Hub</th> </tr> <tr style="border-top: 2px solid black"> <td>Unconditional Image Generation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/ddpm"> DDPM </a></td> <td><a href="https://huggingface.co/google/ddpm-ema-church-256"> google/ddpm-ema-church-256 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img">Stable Diffusion Text-to-Image</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/unclip">unCLIP</a></td> <td><a href="https://huggingface.co/kakaobrain/karlo-v1-alpha"> kakaobrain/karlo-v1-alpha </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/deepfloyd_if">DeepFloyd IF</a></td> <td><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0"> DeepFloyd/IF-I-XL-v1.0 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/kandinsky">Kandinsky</a></td> <td><a href="https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder"> kandinsky-community/kandinsky-2-2-decoder </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/controlnet">ControlNet</a></td> <td><a href="https://huggingface.co/lllyasviel/sd-controlnet-canny"> lllyasviel/sd-controlnet-canny </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/pix2pix">InstructPix2Pix</a></td> <td><a href="https://huggingface.co/timbrooks/instruct-pix2pix"> timbrooks/instruct-pix2pix </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/img2img">Stable Diffusion Image-to-Image</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image Inpainting</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Image Variation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/image_variation">Stable Diffusion Image Variation</a></td> <td><a href="https://huggingface.co/lambdalabs/sd-image-variations-diffusers"> lambdalabs/sd-image-variations-diffusers </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/upscale">Stable Diffusion Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler"> stabilityai/stable-diffusion-x4-upscaler </a></td> </tr> <tr> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_upscale">Stable Diffusion Latent Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler"> stabilityai/sd-x2-latent-upscaler </a></td> </tr> </table> ## Popular libraries using 🧨 Diffusers - https://github.com/microsoft/TaskMatrix - https://github.com/invoke-ai/InvokeAI - https://github.com/apple/ml-stable-diffusion - https://github.com/Sanster/lama-cleaner - https://github.com/IDEA-Research/Grounded-Segment-Anything - https://github.com/ashawkey/stable-dreamfusion - https://github.com/deep-floyd/IF - https://github.com/bentoml/BentoML - https://github.com/bmaltais/kohya_ss - +6000 other amazing GitHub repositories 💪 Thank you for using us ❤️. ## Credits This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today: - @CompVis' latent diffusion models library, available [here](https://github.com/CompVis/latent-diffusion) - @hojonathanho original DDPM implementation, available [here](https://github.com/hojonathanho/diffusion) as well as the extremely useful translation into PyTorch by @pesser, available [here](https://github.com/pesser/pytorch_diffusion) - @ermongroup's DDIM implementation, available [here](https://github.com/ermongroup/ddim) - @yang-song's Score-VE and Score-VP implementations, available [here](https://github.com/yang-song/score_sde_pytorch) We also want to thank @heejkoo for the very helpful overview of papers, code and resources on diffusion models, available [here](https://github.com/heejkoo/Awesome-Diffusion-Models) as well as @crowsonkb and @rromb for useful discussions and insights. ## Citation ```bibtex @misc{von-platen-etal-2022-diffusers, author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Thomas Wolf}, title = {Diffusers: State-of-the-art diffusion models}, year = {2022}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/diffusers}} } ```
0
hf_public_repos
hf_public_repos/diffusers/setup.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py To create the package for PyPI. 1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the documentation. If releasing on a special branch, copy the updated README.md on the main branch for the commit you will make for the post-release and run `make fix-copies` on the main branch as well. 2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid. 3. Unpin specific versions from setup.py that use a git install. 4. Checkout the release branch (v<RELEASE>-release, for example v4.19-release), and commit these changes with the message: "Release: <RELEASE>" and push. 5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs). 6. Add a tag in git to mark the release: "git tag v<RELEASE> -m 'Adds tag v<RELEASE> for PyPI'" Push the tag to git: git push --tags origin v<RELEASE>-release 7. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory (This will build a wheel for the Python version you use to build it). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. Long story cut short, you need to run both before you can upload the distribution to the test PyPI and the actual PyPI servers: python setup.py bdist_wheel && python setup.py sdist 8. Check that everything looks correct by uploading the package to the PyPI test server: twine upload dist/* -r pypitest (pypi suggests using twine as other methods upload files via plaintext.) You may have to specify the repository url, use the following command then: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi diffusers If you are testing from a Colab Notebook, for instance, then do: pip install diffusers && pip uninstall diffusers pip install -i https://testpypi.python.org/pypi diffusers Check you can run the following commands: python -c "from diffusers import __version__; print(__version__)" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')" python -c "from diffusers import *" 9. Upload the final version to the actual PyPI: twine upload dist/* -r pypi 10. Prepare the release notes and publish them on GitHub once everything is looking hunky-dory. 11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release, you need to go back to main before executing this. """ import os import re import sys from distutils.core import Command from setuptools import find_packages, setup # IMPORTANT: # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py _deps = [ "Pillow", # keep the PIL.Image.Resampling deprecation away "accelerate>=0.11.0", "compel==0.1.8", "datasets", "filelock", "flax>=0.4.1", "hf-doc-builder>=0.3.0", "huggingface-hub>=0.19.4", "requests-mock==1.10.0", "importlib_metadata", "invisible-watermark>=0.2.0", "isort>=5.5.4", "jax>=0.4.1", "jaxlib>=0.4.1", "Jinja2", "k-diffusion>=0.0.12", "torchsde", "note_seq", "librosa", "numpy", "omegaconf", "parameterized", "peft>=0.6.0", "protobuf>=3.20.3,<4", "pytest", "pytest-timeout", "pytest-xdist", "python>=3.8.0", "ruff==0.1.5", "safetensors>=0.3.1", "sentencepiece>=0.1.91,!=0.1.92", "GitPython<3.1.19", "scipy", "onnx", "regex!=2019.12.17", "requests", "tensorboard", "torch>=1.4", "torchvision", "transformers>=4.25.1", "urllib3<=2.0.0", ] # this is a lookup table with items like: # # tokenizers: "huggingface-hub==0.8.0" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)} # since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: # # python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets # # Just pass the desired package names to that script as it's shown with 2 packages above. # # If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above # # You can then feed this for example to `pip`: # # pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets) # def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] class DepsTableUpdateCommand(Command): """ A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ( "dep-table-update", None, "updates src/diffusers/dependency_versions_table.py", ), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make deps_table_update`", "deps = {", entries, "}", "", ] target = "src/diffusers/dependency_versions_table.py" print(f"updating {target}") with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content)) extras = {} extras["quality"] = deps_list("urllib3", "isort", "ruff", "hf-doc-builder") extras["docs"] = deps_list("hf-doc-builder") extras["training"] = deps_list("accelerate", "datasets", "protobuf", "tensorboard", "Jinja2") extras["test"] = deps_list( "compel", "GitPython", "datasets", "Jinja2", "invisible-watermark", "k-diffusion", "librosa", "omegaconf", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock", "safetensors", "sentencepiece", "scipy", "torchvision", "transformers", ) extras["torch"] = deps_list("torch", "accelerate") if os.name == "nt": # windows extras["flax"] = [] # jax is not supported on windows else: extras["flax"] = deps_list("jax", "jaxlib", "flax") extras["dev"] = ( extras["quality"] + extras["test"] + extras["training"] + extras["docs"] + extras["torch"] + extras["flax"] ) install_requires = [ deps["importlib_metadata"], deps["filelock"], deps["huggingface-hub"], deps["numpy"], deps["regex"], deps["requests"], deps["safetensors"], deps["Pillow"], ] version_range_max = max(sys.version_info[1], 10) + 1 setup( name="diffusers", version="0.25.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="State-of-the-art diffusion in PyTorch and JAX.", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning diffusion jax pytorch stable diffusion audioldm", license="Apache 2.0 License", author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)", author_email="patrick@huggingface.co", url="https://github.com/huggingface/diffusers", package_dir={"": "src"}, packages=find_packages("src"), package_data={"diffusers": ["py.typed"]}, include_package_data=True, python_requires=">=3.8.0", install_requires=list(install_requires), extras_require=extras, entry_points={"console_scripts": ["diffusers-cli=diffusers.commands.diffusers_cli:main"]}, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Programming Language :: Python :: 3", ] + [f"Programming Language :: Python :: 3.{i}" for i in range(8, version_range_max)], cmdclass={"deps_table_update": DepsTableUpdateCommand}, )
0
hf_public_repos
hf_public_repos/diffusers/CONTRIBUTING.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to contribute to Diffusers 🧨 We ❤️ contributions from the open-source community! Everyone is welcome, and all types of participation –not just code– are valued and appreciated. Answering questions, helping others, reaching out, and improving the documentation are all immensely valuable to the community, so don't be afraid and get involved if you're up for it! Everyone is encouraged to start by saying 👋 in our public Discord channel. We discuss the latest trends in diffusion models, ask questions, show off personal projects, help each other with contributions, or just hang out ☕. <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=Discord&logoColor=white"></a> Whichever way you choose to contribute, we strive to be part of an open, welcoming, and kind community. Please, read our [code of conduct](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md) and be mindful to respect it during your interactions. We also recommend you become familiar with the [ethical guidelines](https://huggingface.co/docs/diffusers/conceptual/ethical_guidelines) that guide our project and ask you to adhere to the same principles of transparency and responsibility. We enormously value feedback from the community, so please do not be afraid to speak up if you believe you have valuable feedback that can help improve the library - every message, comment, issue, and pull request (PR) is read and considered. ## Overview You can contribute in many ways ranging from answering questions on issues to adding new diffusion models to the core library. In the following, we give an overview of different ways to contribute, ranked by difficulty in ascending order. All of them are valuable to the community. * 1. Asking and answering questions on [the Diffusers discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers) or on [Discord](https://discord.gg/G7tWnz98XR). * 2. Opening new issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues/new/choose). * 3. Answering issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues). * 4. Fix a simple issue, marked by the "Good first issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). * 5. Contribute to the [documentation](https://github.com/huggingface/diffusers/tree/main/docs/source). * 6. Contribute a [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples). * 7. Contribute to the [examples](https://github.com/huggingface/diffusers/tree/main/examples). * 8. Fix a more difficult issue, marked by the "Good second issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22). * 9. Add a new pipeline, model, or scheduler, see ["New Pipeline/Model"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) and ["New scheduler"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) issues. For this contribution, please have a look at [Design Philosophy](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md). As said before, **all contributions are valuable to the community**. In the following, we will explain each contribution a bit more in detail. For all contributions 4-9, you will need to open a PR. It is explained in detail how to do so in [Opening a pull request](#how-to-open-a-pr). ### 1. Asking and answering questions on the Diffusers discussion forum or on the Diffusers Discord Any question or comment related to the Diffusers library can be asked on the [discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/) or on [Discord](https://discord.gg/G7tWnz98XR). Such questions and comments include (but are not limited to): - Reports of training or inference experiments in an attempt to share knowledge - Presentation of personal projects - Questions to non-official training examples - Project proposals - General feedback - Paper summaries - Asking for help on personal projects that build on top of the Diffusers library - General questions - Ethical questions regarding diffusion models - ... Every question that is asked on the forum or on Discord actively encourages the community to publicly share knowledge and might very well help a beginner in the future that has the same question you're having. Please do pose any questions you might have. In the same spirit, you are of immense help to the community by answering such questions because this way you are publicly documenting knowledge for everybody to learn from. **Please** keep in mind that the more effort you put into asking or answering a question, the higher the quality of the publicly documented knowledge. In the same way, well-posed and well-answered questions create a high-quality knowledge database accessible to everybody, while badly posed questions or answers reduce the overall quality of the public knowledge database. In short, a high quality question or answer is *precise*, *concise*, *relevant*, *easy-to-understand*, *accessible*, and *well-formated/well-posed*. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. **NOTE about channels**: [*The forum*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) is much better indexed by search engines, such as Google. Posts are ranked by popularity rather than chronologically. Hence, it's easier to look up questions and answers that we posted some time ago. In addition, questions and answers posted in the forum can easily be linked to. In contrast, *Discord* has a chat-like format that invites fast back-and-forth communication. While it will most likely take less time for you to get an answer to your question on Discord, your question won't be visible anymore over time. Also, it's much harder to find information that was posted a while back on Discord. We therefore strongly recommend using the forum for high-quality questions and answers in an attempt to create long-lasting knowledge for the community. If discussions on Discord lead to very interesting answers and conclusions, we recommend posting the results on the forum to make the information more available for future readers. ### 2. Opening new issues on the GitHub issues tab The 🧨 Diffusers library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue. Remember, GitHub issues are reserved for technical questions directly related to the Diffusers library, bug reports, feature requests, or feedback on the library design. In a nutshell, this means that everything that is **not** related to the **code of the Diffusers library** (including the documentation) should **not** be asked on GitHub, but rather on either the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR). **Please consider the following guidelines when opening a new issue**: - Make sure you have searched whether your issue has already been asked before (use the search bar on GitHub under Issues). - Please never report a new issue on another (related) issue. If another issue is highly related, please open a new issue nevertheless and link to the related issue. - Make sure your issue is written in English. Please use one of the great, free online translation services, such as [DeepL](https://www.deepl.com/translator) to translate from your native language to English if you are not comfortable in English. - Check whether your issue might be solved by updating to the newest Diffusers version. Before posting your issue, please make sure that `python -c "import diffusers; print(diffusers.__version__)"` is higher or matches the latest Diffusers version. - Remember that the more effort you put into opening a new issue, the higher the quality of your answer will be and the better the overall quality of the Diffusers issues. New issues usually include the following. #### 2.1. Reproducible, minimal bug reports A bug report should always have a reproducible code snippet and be as minimal and concise as possible. This means in more detail: - Narrow the bug down as much as you can, **do not just dump your whole code file**. - Format your code. - Do not include any external libraries except for Diffusers depending on them. - **Always** provide all necessary information about your environment; for this, you can run: `diffusers-cli env` in your shell and copy-paste the displayed information to the issue. - Explain the issue. If the reader doesn't know what the issue is and why it is an issue, she cannot solve it. - **Always** make sure the reader can reproduce your issue with as little effort as possible. If your code snippet cannot be run because of missing libraries or undefined variables, the reader cannot help you. Make sure your reproducible code snippet is as minimal as possible and can be copy-pasted into a simple Python shell. - If in order to reproduce your issue a model and/or dataset is required, make sure the reader has access to that model or dataset. You can always upload your model or dataset to the [Hub](https://huggingface.co) to make it easily downloadable. Try to keep your model and dataset as small as possible, to make the reproduction of your issue as effortless as possible. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. You can open a bug report [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&projects=&template=bug-report.yml). #### 2.2. Feature requests A world-class feature request addresses the following points: 1. Motivation first: * Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best. * Is it related to something you would need for a project? We'd love to hear about it! * Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you. 2. Write a *full paragraph* describing the feature; 3. Provide a **code snippet** that demonstrates its future use; 4. In case this is related to a paper, please attach a link; 5. Attach any additional information (drawings, screenshots, etc.) you think may help. You can open a feature request [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=). #### 2.3 Feedback Feedback about the library design and why it is good or not good helps the core maintainers immensely to build a user-friendly library. To understand the philosophy behind the current design philosophy, please have a look [here](https://huggingface.co/docs/diffusers/conceptual/philosophy). If you feel like a certain design choice does not fit with the current design philosophy, please explain why and how it should be changed. If a certain design choice follows the design philosophy too much, hence restricting use cases, explain why and how it should be changed. If a certain design choice is very useful for you, please also leave a note as this is great feedback for future design decisions. You can open an issue about feedback [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=). #### 2.4 Technical questions Technical questions are mainly about why certain code of the library was written in a certain way, or what a certain part of the code does. Please make sure to link to the code in question and please provide detail on why this part of the code is difficult to understand. You can open an issue about a technical question [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&template=bug-report.yml). #### 2.5 Proposal to add a new model, scheduler, or pipeline If the diffusion model community released a new model, pipeline, or scheduler that you would like to see in the Diffusers library, please provide the following information: * Short description of the diffusion pipeline, model, or scheduler and link to the paper or public release. * Link to any of its open-source implementation. * Link to the model weights if they are available. If you are willing to contribute to the model yourself, let us know so we can best guide you. Also, don't forget to tag the original author of the component (model, scheduler, pipeline, etc.) by GitHub handle if you can find it. You can open a request for a model/pipeline/scheduler [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=New+model%2Fpipeline%2Fscheduler&template=new-model-addition.yml). ### 3. Answering issues on the GitHub issues tab Answering issues on GitHub might require some technical knowledge of Diffusers, but we encourage everybody to give it a try even if you are not 100% certain that your answer is correct. Some tips to give a high-quality answer to an issue: - Be as concise and minimal as possible. - Stay on topic. An answer to the issue should concern the issue and only the issue. - Provide links to code, papers, or other sources that prove or encourage your point. - Answer in code. If a simple code snippet is the answer to the issue or shows how the issue can be solved, please provide a fully reproducible code snippet. Also, many issues tend to be simply off-topic, duplicates of other issues, or irrelevant. It is of great help to the maintainers if you can answer such issues, encouraging the author of the issue to be more precise, provide the link to a duplicated issue or redirect them to [the forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR). If you have verified that the issued bug report is correct and requires a correction in the source code, please have a look at the next sections. For all of the following contributions, you will need to open a PR. It is explained in detail how to do so in the [Opening a pull request](#how-to-open-a-pr) section. ### 4. Fixing a "Good first issue" *Good first issues* are marked by the [Good first issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) label. Usually, the issue already explains how a potential solution should look so that it is easier to fix. If the issue hasn't been closed and you would like to try to fix this issue, you can just leave a message "I would like to try this issue.". There are usually three scenarios: - a.) The issue description already proposes a fix. In this case and if the solution makes sense to you, you can open a PR or draft PR to fix it. - b.) The issue description does not propose a fix. In this case, you can ask what a proposed fix could look like and someone from the Diffusers team should answer shortly. If you have a good idea of how to fix it, feel free to directly open a PR. - c.) There is already an open PR to fix the issue, but the issue hasn't been closed yet. If the PR has gone stale, you can simply open a new PR and link to the stale PR. PRs often go stale if the original contributor who wanted to fix the issue suddenly cannot find the time anymore to proceed. This often happens in open-source and is very normal. In this case, the community will be very happy if you give it a new try and leverage the knowledge of the existing PR. If there is already a PR and it is active, you can help the author by giving suggestions, reviewing the PR or even asking whether you can contribute to the PR. ### 5. Contribute to the documentation A good library **always** has good documentation! The official documentation is often one of the first points of contact for new users of the library, and therefore contributing to the documentation is a **highly valuable contribution**. Contributing to the library can have many forms: - Correcting spelling or grammatical errors. - Correct incorrect formatting of the docstring. If you see that the official documentation is weirdly displayed or a link is broken, we are very happy if you take some time to correct it. - Correct the shape or dimensions of a docstring input or output tensor. - Clarify documentation that is hard to understand or incorrect. - Update outdated code examples. - Translating the documentation to another language. Anything displayed on [the official Diffusers doc page](https://huggingface.co/docs/diffusers/index) is part of the official documentation and can be corrected, adjusted in the respective [documentation source](https://github.com/huggingface/diffusers/tree/main/docs/source). Please have a look at [this page](https://github.com/huggingface/diffusers/tree/main/docs) on how to verify changes made to the documentation locally. ### 6. Contribute a community pipeline [Pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) are usually the first point of contact between the Diffusers library and the user. Pipelines are examples of how to use Diffusers [models](https://huggingface.co/docs/diffusers/api/models/overview) and [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview). We support two types of pipelines: - Official Pipelines - Community Pipelines Both official and community pipelines follow the same design and consist of the same type of components. Official pipelines are tested and maintained by the core maintainers of Diffusers. Their code resides in [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines). In contrast, community pipelines are contributed and maintained purely by the **community** and are **not** tested. They reside in [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and while they can be accessed via the [PyPI diffusers package](https://pypi.org/project/diffusers/), their code is not part of the PyPI distribution. The reason for the distinction is that the core maintainers of the Diffusers library cannot maintain and test all possible ways diffusion models can be used for inference, but some of them may be of interest to the community. Officially released diffusion pipelines, such as Stable Diffusion are added to the core src/diffusers/pipelines package which ensures high quality of maintenance, no backward-breaking code changes, and testing. More bleeding edge pipelines should be added as community pipelines. If usage for a community pipeline is high, the pipeline can be moved to the official pipelines upon request from the community. This is one of the ways we strive to be a community-driven library. To add a community pipeline, one should add a <name-of-the-community>.py file to [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and adapt the [examples/community/README.md](https://github.com/huggingface/diffusers/tree/main/examples/community/README.md) to include an example of the new pipeline. An example can be seen [here](https://github.com/huggingface/diffusers/pull/2400). Community pipeline PRs are only checked at a superficial level and ideally they should be maintained by their original authors. Contributing a community pipeline is a great way to understand how Diffusers models and schedulers work. Having contributed a community pipeline is usually the first stepping stone to contributing an official pipeline to the core package. ### 7. Contribute to training examples Diffusers examples are a collection of training scripts that reside in [examples](https://github.com/huggingface/diffusers/tree/main/examples). We support two types of training examples: - Official training examples - Research training examples Research training examples are located in [examples/research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) whereas official training examples include all folders under [examples](https://github.com/huggingface/diffusers/tree/main/examples) except the `research_projects` and `community` folders. The official training examples are maintained by the Diffusers' core maintainers whereas the research training examples are maintained by the community. This is because of the same reasons put forward in [6. Contribute a community pipeline](#6-contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models. If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author. Both official training and research examples consist of a directory that contains one or more training scripts, a requirements.txt file, and a README.md file. In order for the user to make use of the training examples, it is required to clone the repository: ```bash git clone https://github.com/huggingface/diffusers ``` as well as to install all additional dependencies required for training: ```bash pip install -r /examples/<your-example-folder>/requirements.txt ``` Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt). Training examples of the Diffusers library should adhere to the following philosophy: - All the code necessary to run the examples should be found in a single Python file. - One should be able to run the example from the command line with `python <your-example>.py --args`. - Examples should be kept simple and serve as **an example** on how to use Diffusers for training. The purpose of example scripts is **not** to create state-of-the-art diffusion models, but rather to reproduce known training schemes without adding too much custom logic. As a byproduct of this point, our examples also strive to serve as good educational materials. To contribute an example, it is highly recommended to look at already existing examples such as [dreambooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) to get an idea of how they should look like. We strongly advise contributors to make use of the [Accelerate library](https://github.com/huggingface/accelerate) as it's tightly integrated with Diffusers. Once an example script works, please make sure to add a comprehensive `README.md` that states how to use the example exactly. This README should include: - An example command on how to run the example script as shown [here e.g.](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#running-locally-with-pytorch). - A link to some training results (logs, models, ...) that show what the user can expect as shown [here e.g.](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5). - If you are adding a non-official/research training example, **please don't forget** to add a sentence that you are maintaining this training example which includes your git handle as shown [here](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations). If you are contributing to the official training examples, please also make sure to add a test to [examples/test_examples.py](https://github.com/huggingface/diffusers/blob/main/examples/test_examples.py). This is not necessary for non-official training examples. ### 8. Fixing a "Good second issue" *Good second issues* are marked by the [Good second issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22) label. Good second issues are usually more complicated to solve than [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). The issue description usually gives less guidance on how to fix the issue and requires a decent understanding of the library by the interested contributor. If you are interested in tackling a good second issue, feel free to open a PR to fix it and link the PR to the issue. If you see that a PR has already been opened for this issue but did not get merged, have a look to understand why it wasn't merged and try to open an improved PR. Good second issues are usually more difficult to get merged compared to good first issues, so don't hesitate to ask for help from the core maintainers. If your PR is almost finished the core maintainers can also jump into your PR and commit to it in order to get it merged. ### 9. Adding pipelines, models, schedulers Pipelines, models, and schedulers are the most important pieces of the Diffusers library. They provide easy access to state-of-the-art diffusion technologies and thus allow the community to build powerful generative AI applications. By adding a new model, pipeline, or scheduler you might enable a new powerful use case for any of the user interfaces relying on Diffusers which can be of immense value for the whole generative AI ecosystem. Diffusers has a couple of open feature requests for all three components - feel free to gloss over them if you don't know yet what specific component you would like to add: - [Model or pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) - [Scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Before adding any of the three components, it is strongly recommended that you give the [Philosophy guide](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md) a read to better understand the design of any of the three components. Please be aware that we cannot merge model, scheduler, or pipeline additions that strongly diverge from our design philosophy as it will lead to API inconsistencies. If you fundamentally disagree with a design choice, please open a [Feedback issue](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=) instead so that it can be discussed whether a certain design pattern/design choice shall be changed everywhere in the library and whether we shall update our design philosophy. Consistency across the library is very important for us. Please make sure to add links to the original codebase/paper to the PR and ideally also ping the original author directly on the PR so that they can follow the progress and potentially help with questions. If you are unsure or stuck in the PR, don't hesitate to leave a message to ask for a first review or help. ## How to write a good issue **The better your issue is written, the higher the chances that it will be quickly resolved.** 1. Make sure that you've used the correct template for your issue. You can pick between *Bug Report*, *Feature Request*, *Feedback about API Design*, *New model/pipeline/scheduler addition*, *Forum*, or a blank issue. Make sure to pick the correct one when opening [a new issue](https://github.com/huggingface/diffusers/issues/new/choose). 2. **Be precise**: Give your issue a fitting title. Try to formulate your issue description as simple as possible. The more precise you are when submitting an issue, the less time it takes to understand the issue and potentially solve it. Make sure to open an issue for one issue only and not for multiple issues. If you found multiple issues, simply open multiple issues. If your issue is a bug, try to be as precise as possible about what bug it is - you should not just write "Error in diffusers". 3. **Reproducibility**: No reproducible code snippet == no solution. If you encounter a bug, maintainers **have to be able to reproduce** it. Make sure that you include a code snippet that can be copy-pasted into a Python interpreter to reproduce the issue. Make sure that your code snippet works, *i.e.* that there are no missing imports or missing links to images, ... Your issue should contain an error message **and** a code snippet that can be copy-pasted without any changes to reproduce the exact same error message. If your issue is using local model weights or local data that cannot be accessed by the reader, the issue cannot be solved. If you cannot share your data or model, try to make a dummy model or dummy data. 4. **Minimalistic**: Try to help the reader as much as you can to understand the issue as quickly as possible by staying as concise as possible. Remove all code / all information that is irrelevant to the issue. If you have found a bug, try to create the easiest code example you can to demonstrate your issue, do not just dump your whole workflow into the issue as soon as you have found a bug. E.g., if you train a model and get an error at some point during the training, you should first try to understand what part of the training code is responsible for the error and try to reproduce it with a couple of lines. Try to use dummy data instead of full datasets. 5. Add links. If you are referring to a certain naming, method, or model make sure to provide a link so that the reader can better understand what you mean. If you are referring to a specific PR or issue, make sure to link it to your issue. Do not assume that the reader knows what you are talking about. The more links you add to your issue the better. 6. Formatting. Make sure to nicely format your issue by formatting code into Python code syntax, and error messages into normal code syntax. See the [official GitHub formatting docs](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) for more information. 7. Think of your issue not as a ticket to be solved, but rather as a beautiful entry to a well-written encyclopedia. Every added issue is a contribution to publicly available knowledge. By adding a nicely written issue you not only make it easier for maintainers to solve your issue, but you are helping the whole community to better understand a certain aspect of the library. ## How to write a good PR 1. Be a chameleon. Understand existing design patterns and syntax and make sure your code additions flow seamlessly into the existing code base. Pull requests that significantly diverge from existing design patterns or user interfaces will not be merged. 2. Be laser focused. A pull request should solve one problem and one problem only. Make sure to not fall into the trap of "also fixing another problem while we're adding it". It is much more difficult to review pull requests that solve multiple, unrelated problems at once. 3. If helpful, try to add a code snippet that displays an example of how your addition can be used. 4. The title of your pull request should be a summary of its contribution. 5. If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it); 6. To indicate a work in progress please prefix the title with `[WIP]`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged; 7. Try to formulate and format your text as explained in [How to write a good issue](#how-to-write-a-good-issue). 8. Make sure existing tests pass; 9. Add high-coverage tests. No quality testing = no merge. - If you are adding new `@slow` tests, make sure they pass using `RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`. CircleCI does not run the slow tests, but GitHub Actions does every night! 10. All public methods must have informative docstrings that work nicely with markdown. See [`pipeline_latent_diffusion.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py) for an example. 11. Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) or [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images) to place these files. If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## How to open a PR Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to be able to contribute to 🧨 Diffusers. `git` is not the easiest tool to use but it has the greatest manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/main/setup.py#L265)): 1. Fork the [repository](https://github.com/huggingface/diffusers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone git@github.com:<your GitHub handle>/diffusers.git $ cd diffusers $ git remote add upstream https://github.com/huggingface/diffusers.git ``` 3. Create a new branch to hold your development changes: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` **Do not** work on the `main` branch. 4. Set up a development environment by running the following command in a virtual environment: ```bash $ pip install -e ".[dev]" ``` If you have already cloned the repo, you might need to `git pull` to get the most recent changes in the library. 5. Develop the features on your branch. As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this: ```bash $ pytest tests/<TEST_TO_RUN>.py ``` Before you run the tests, please make sure you install the dependencies required for testing. You can do so with this command: ```bash $ pip install -e ".[test]" ``` You can also run the full test suite with the following command, but it takes a beefy machine to produce a result in a decent amount of time now that Diffusers has grown a lot. Here is the command for it: ```bash $ make test ``` 🧨 Diffusers relies on `ruff` and `isort` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: ```bash $ make style ``` 🧨 Diffusers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however, you can also run the same checks with: ```bash $ make quality ``` Once you're happy with your changes, add changed files using `git add` and make a commit with `git commit` to record your changes locally: ```bash $ git add modified_file.py $ git commit -m "A descriptive message about your changes." ``` It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes: ```bash $ git pull upstream main ``` Push the changes to your account using: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. Once you are satisfied, go to the webpage of your fork on GitHub. Click on 'Pull request' to send your changes to the project maintainers for review. 7. It's ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/diffusers/tree/main/tests). We like `pytest` and `pytest-xdist` because it's faster. From the root of the repository, here's how to run tests with `pytest` for the library: ```bash $ python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` In fact, that's how `make test` is implemented! You can specify a smaller set of tests in order to test only the feature you're working on. By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to `yes` to run them. This will download many gigabytes of models — make sure you have enough disk space and a good Internet connection, or a lot of patience! ```bash $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` `unittest` is fully supported, here's how to run tests with it: ```bash $ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v ``` ### Syncing forked main with upstream (HuggingFace) main To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs, when syncing the main branch of a forked repository, please, follow these steps: 1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main. 2. If a PR is absolutely necessary, use the following steps after checking out your branch: ```bash $ git checkout -b your-branch-for-syncing $ git pull --squash --no-commit upstream main $ git commit -m '<your message without GitHub references>' $ git push --set-upstream origin your-branch-for-syncing ``` ### Style guide For documentation strings, 🧨 Diffusers follows the [Google style](https://google.github.io/styleguide/pyguide.html).
0
hf_public_repos
hf_public_repos/diffusers/PHILOSOPHY.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Philosophy 🧨 Diffusers provides **state-of-the-art** pretrained diffusion models across multiple modalities. Its purpose is to serve as a **modular toolbox** for both inference and training. We aim at building a library that stands the test of time and therefore take API design very seriously. In a nutshell, Diffusers is built to be a natural extension of PyTorch. Therefore, most of our design choices are based on [PyTorch's Design Principles](https://pytorch.org/docs/stable/community/design.html#pytorch-design-philosophy). Let's go over the most important ones: ## Usability over Performance - While Diffusers has many built-in performance-enhancing features (see [Memory and Speed](https://huggingface.co/docs/diffusers/optimization/fp16)), models are always loaded with the highest precision and lowest optimization. Therefore, by default diffusion pipelines are always instantiated on CPU with float32 precision if not otherwise defined by the user. This ensures usability across different platforms and accelerators and means that no complex installations are required to run the library. - Diffusers aims to be a **light-weight** package and therefore has very few required dependencies, but many soft dependencies that can improve performance (such as `accelerate`, `safetensors`, `onnx`, etc...). We strive to keep the library as lightweight as possible so that it can be added without much concern as a dependency on other packages. - Diffusers prefers simple, self-explainable code over condensed, magic code. This means that short-hand code syntaxes such as lambda functions, and advanced PyTorch operators are often not desired. ## Simple over easy As PyTorch states, **explicit is better than implicit** and **simple is better than complex**. This design philosophy is reflected in multiple parts of the library: - We follow PyTorch's API with methods like [`DiffusionPipeline.to`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.to) to let the user handle device management. - Raising concise error messages is preferred to silently correct erroneous input. Diffusers aims at teaching the user, rather than making the library as easy to use as possible. - Complex model vs. scheduler logic is exposed instead of magically handled inside. Schedulers/Samplers are separated from diffusion models with minimal dependencies on each other. This forces the user to write the unrolled denoising loop. However, the separation allows for easier debugging and gives the user more control over adapting the denoising process or switching out diffusion models or schedulers. - Separately trained components of the diffusion pipeline, *e.g.* the text encoder, the UNet, and the variational autoencoder, each has their own model class. This forces the user to handle the interaction between the different model components, and the serialization format separates the model components into different files. However, this allows for easier debugging and customization. DreamBooth or Textual Inversion training is very simple thanks to Diffusers' ability to separate single components of the diffusion pipeline. ## Tweakable, contributor-friendly over abstraction For large parts of the library, Diffusers adopts an important design principle of the [Transformers library](https://github.com/huggingface/transformers), which is to prefer copy-pasted code over hasty abstractions. This design principle is very opinionated and stands in stark contrast to popular design principles such as [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself). In short, just like Transformers does for modeling files, Diffusers prefers to keep an extremely low level of abstraction and very self-contained code for pipelines and schedulers. Functions, long code blocks, and even classes can be copied across multiple files which at first can look like a bad, sloppy design choice that makes the library unmaintainable. **However**, this design has proven to be extremely successful for Transformers and makes a lot of sense for community-driven, open-source machine learning libraries because: - Machine Learning is an extremely fast-moving field in which paradigms, model architectures, and algorithms are changing rapidly, which therefore makes it very difficult to define long-lasting code abstractions. - Machine Learning practitioners like to be able to quickly tweak existing code for ideation and research and therefore prefer self-contained code over one that contains many abstractions. - Open-source libraries rely on community contributions and therefore must build a library that is easy to contribute to. The more abstract the code, the more dependencies, the harder to read, and the harder to contribute to. Contributors simply stop contributing to very abstract libraries out of fear of breaking vital functionality. If contributing to a library cannot break other fundamental code, not only is it more inviting for potential new contributors, but it is also easier to review and contribute to multiple parts in parallel. At Hugging Face, we call this design the **single-file policy** which means that almost all of the code of a certain class should be written in a single, self-contained file. To read more about the philosophy, you can have a look at [this blog post](https://huggingface.co/blog/transformers-design-philosophy). In Diffusers, we follow this philosophy for both pipelines and schedulers, but only partly for diffusion models. The reason we don't follow this design fully for diffusion models is because almost all diffusion pipelines, such as [DDPM](https://huggingface.co/docs/diffusers/api/pipelines/ddpm), [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview#stable-diffusion-pipelines), [unCLIP (DALL·E 2)](https://huggingface.co/docs/diffusers/api/pipelines/unclip) and [Imagen](https://imagen.research.google/) all rely on the same diffusion model, the [UNet](https://huggingface.co/docs/diffusers/api/models/unet2d-cond). Great, now you should have generally understood why 🧨 Diffusers is designed the way it is 🤗. We try to apply these design principles consistently across the library. Nevertheless, there are some minor exceptions to the philosophy or some unlucky design choices. If you have feedback regarding the design, we would ❤️ to hear it [directly on GitHub](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=). ## Design Philosophy in Details Now, let's look a bit into the nitty-gritty details of the design philosophy. Diffusers essentially consists of three major classes: [pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines), [models](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models), and [schedulers](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). Let's walk through more detailed design decisions for each class. ### Pipelines Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference. The following design principles are followed: - Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [#Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251). - Pipelines all inherit from [`DiffusionPipeline`]. - Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function. - Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function. - Pipelines should be used **only** for inference. - Pipelines should be very readable, self-explanatory, and easy to tweak. - Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs. - Pipelines are **not** intended to be feature-complete user interfaces. For future complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner). - Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines. - Pipelines should be named after the task they are intended to solve. - In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file. ### Models Models are designed as configurable toolboxes that are natural extensions of [PyTorch's Module class](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). They only partly follow the **single-file policy**. The following design principles are followed: - Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context. - All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py), [`transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformer_2d.py), etc... - Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy. - Models intend to expose complexity, just like PyTorch's `Module` class, and give clear error messages. - Models all inherit from `ModelMixin` and `ConfigMixin`. - Models can be optimized for performance when it doesn’t demand major code changes, keep backward compatibility, and give significant memory or compute gain. - Models should by default have the highest precision and lowest performance setting. - To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different. - Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work. - The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and readable long-term, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). ### Schedulers Schedulers are responsible to guide the denoising process for inference as well as to define a noise schedule for training. They are designed as individual classes with loadable configuration files and strongly follow the **single-file policy**. The following design principles are followed: - All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). - Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained. - One scheduler Python file corresponds to one scheduler algorithm (as might be defined in a paper). - If schedulers share similar functionalities, we can make use of the `#Copied from` mechanism. - Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`. - Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](./docs/source/en/using-diffusers/schedulers.md). - Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called. - Every scheduler exposes the timesteps to be "looped over" via a `timesteps` attribute, which is an array of timesteps the model will be called upon. - The `step(...)` function takes a predicted model output and the "current" sample (x_t) and returns the "previous", slightly more denoised sample (x_t-1). - Given the complexity of diffusion schedulers, the `step` function does not expose all the complexity and can be a bit of a "black box". - In almost all cases, novel schedulers shall be implemented in a new scheduling file.
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-pytorch-cuda/Dockerfile
FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ libgl1 \ python3.8 \ python3-pip \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ torch \ torchvision \ torchaudio \ invisible_watermark && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers \ omegaconf \ pytorch-lightning CMD ["/bin/bash"]
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-flax-tpu/Dockerfile
FROM ubuntu:20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ python3.8 \ python3-pip \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) # follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ "jax[tpu]>=0.2.16,!=0.3.2" \ -f https://storage.googleapis.com/jax-releases/libtpu_releases.html && \ python3 -m pip install --upgrade --no-cache-dir \ clu \ "flax>=0.4.1" \ "jaxlib>=0.1.65" && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers CMD ["/bin/bash"]
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-pytorch-compile-cuda/Dockerfile
FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ libgl1 \ python3.9 \ python3.9-dev \ python3-pip \ python3.9-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3.9 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3.9 -m pip install --no-cache-dir --upgrade pip && \ python3.9 -m pip install --no-cache-dir \ torch \ torchvision \ torchaudio \ invisible_watermark && \ python3.9 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers \ omegaconf CMD ["/bin/bash"]
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-onnxruntime-cpu/Dockerfile
FROM ubuntu:20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ python3.8 \ python3-pip \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ torch \ torchvision \ torchaudio \ onnxruntime \ --extra-index-url https://download.pytorch.org/whl/cpu && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers CMD ["/bin/bash"]
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-pytorch-cpu/Dockerfile
FROM ubuntu:20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ python3.8 \ python3-pip \ libgl1 \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ torch \ torchvision \ torchaudio \ invisible_watermark \ --extra-index-url https://download.pytorch.org/whl/cpu && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers CMD ["/bin/bash"]
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-onnxruntime-cuda/Dockerfile
FROM nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ python3.8 \ python3-pip \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ torch \ torchvision \ torchaudio \ "onnxruntime-gpu>=1.13.1" \ --extra-index-url https://download.pytorch.org/whl/cu117 && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers CMD ["/bin/bash"]
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-flax-cpu/Dockerfile
FROM ubuntu:20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ python3.8 \ python3-pip \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) # follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --upgrade --no-cache-dir \ clu \ "jax[cpu]>=0.2.16,!=0.3.2" \ "flax>=0.4.1" \ "jaxlib>=0.1.65" && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers CMD ["/bin/bash"]
0
hf_public_repos/diffusers/docker
hf_public_repos/diffusers/docker/diffusers-pytorch-xformers-cuda/Dockerfile
FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ libgl1 \ python3.8 \ python3-pip \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ torch \ torchvision \ torchaudio \ invisible_watermark && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers \ omegaconf \ xformers CMD ["/bin/bash"]
0
hf_public_repos/diffusers
hf_public_repos/diffusers/tests/conftest.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(dirname(__file__)), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_addoption(parser): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): from diffusers.utils.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from distutils.util import strtobool import pytest from diffusers import __version__ from diffusers.utils import deprecate # Used to test the hub USER = "__DUMMY_TRANSFORMERS_USER__" ENDPOINT_STAGING = "https://hub-ci.huggingface.co" # Not critical, only usable on the sandboxed CI instance. TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL" class DeprecateTester(unittest.TestCase): higher_version = ".".join([str(int(__version__.split(".")[0]) + 1)] + __version__.split(".")[1:]) lower_version = "0.0.1" def test_deprecate_function_arg(self): kwargs = {"deprecated_arg": 4} with self.assertWarns(FutureWarning) as warning: output = deprecate("deprecated_arg", self.higher_version, "message", take_from=kwargs) assert output == 4 assert ( str(warning.warning) == f"The `deprecated_arg` argument is deprecated and will be removed in version {self.higher_version}." " message" ) def test_deprecate_function_arg_tuple(self): kwargs = {"deprecated_arg": 4} with self.assertWarns(FutureWarning) as warning: output = deprecate(("deprecated_arg", self.higher_version, "message"), take_from=kwargs) assert output == 4 assert ( str(warning.warning) == f"The `deprecated_arg` argument is deprecated and will be removed in version {self.higher_version}." " message" ) def test_deprecate_function_args(self): kwargs = {"deprecated_arg_1": 4, "deprecated_arg_2": 8} with self.assertWarns(FutureWarning) as warning: output_1, output_2 = deprecate( ("deprecated_arg_1", self.higher_version, "Hey"), ("deprecated_arg_2", self.higher_version, "Hey"), take_from=kwargs, ) assert output_1 == 4 assert output_2 == 8 assert ( str(warning.warnings[0].message) == "The `deprecated_arg_1` argument is deprecated and will be removed in version" f" {self.higher_version}. Hey" ) assert ( str(warning.warnings[1].message) == "The `deprecated_arg_2` argument is deprecated and will be removed in version" f" {self.higher_version}. Hey" ) def test_deprecate_function_incorrect_arg(self): kwargs = {"deprecated_arg": 4} with self.assertRaises(TypeError) as error: deprecate(("wrong_arg", self.higher_version, "message"), take_from=kwargs) assert "test_deprecate_function_incorrect_arg in" in str(error.exception) assert "line" in str(error.exception) assert "got an unexpected keyword argument `deprecated_arg`" in str(error.exception) def test_deprecate_arg_no_kwarg(self): with self.assertWarns(FutureWarning) as warning: deprecate(("deprecated_arg", self.higher_version, "message")) assert ( str(warning.warning) == f"`deprecated_arg` is deprecated and will be removed in version {self.higher_version}. message" ) def test_deprecate_args_no_kwarg(self): with self.assertWarns(FutureWarning) as warning: deprecate( ("deprecated_arg_1", self.higher_version, "Hey"), ("deprecated_arg_2", self.higher_version, "Hey"), ) assert ( str(warning.warnings[0].message) == f"`deprecated_arg_1` is deprecated and will be removed in version {self.higher_version}. Hey" ) assert ( str(warning.warnings[1].message) == f"`deprecated_arg_2` is deprecated and will be removed in version {self.higher_version}. Hey" ) def test_deprecate_class_obj(self): class Args: arg = 5 with self.assertWarns(FutureWarning) as warning: arg = deprecate(("arg", self.higher_version, "message"), take_from=Args()) assert arg == 5 assert ( str(warning.warning) == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" ) def test_deprecate_class_objs(self): class Args: arg = 5 foo = 7 with self.assertWarns(FutureWarning) as warning: arg_1, arg_2 = deprecate( ("arg", self.higher_version, "message"), ("foo", self.higher_version, "message"), ("does not exist", self.higher_version, "message"), take_from=Args(), ) assert arg_1 == 5 assert arg_2 == 7 assert ( str(warning.warning) == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" ) assert ( str(warning.warnings[0].message) == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" ) assert ( str(warning.warnings[1].message) == f"The `foo` attribute is deprecated and will be removed in version {self.higher_version}. message" ) def test_deprecate_incorrect_version(self): kwargs = {"deprecated_arg": 4} with self.assertRaises(ValueError) as error: deprecate(("wrong_arg", self.lower_version, "message"), take_from=kwargs) assert ( str(error.exception) == "The deprecation tuple ('wrong_arg', '0.0.1', 'message') should be removed since diffusers' version" f" {__version__} is >= {self.lower_version}" ) def test_deprecate_incorrect_no_standard_warn(self): with self.assertWarns(FutureWarning) as warning: deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False) assert str(warning.warning) == "This message is better!!!" def test_deprecate_stacklevel(self): with self.assertWarns(FutureWarning) as warning: deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False) assert str(warning.warning) == "This message is better!!!" assert "diffusers/tests/others/test_utils.py" in warning.filename def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False) def is_staging_test(test_case): """ Decorator marking a test as a staging test. Those tests will run using the staging environment of huggingface.co instead of the real model hub. """ if not _run_staging: return unittest.skip("test is staging test")(test_case) else: return pytest.mark.is_staging_test()(test_case)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_ema.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from diffusers import UNet2DConditionModel from diffusers.training_utils import EMAModel from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device enable_full_determinism() class EMAModelTests(unittest.TestCase): model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" batch_size = 1 prompt_length = 77 text_encoder_hidden_dim = 32 num_in_channels = 4 latent_height = latent_width = 64 generator = torch.manual_seed(0) def get_models(self, decay=0.9999): unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") unet = unet.to(torch_device) ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) return unet, ema_unet def get_dummy_inputs(self): noisy_latents = torch.randn( self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator ).to(torch_device) timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) encoder_hidden_states = torch.randn( self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator ).to(torch_device) return noisy_latents, timesteps, encoder_hidden_states def simulate_backprop(self, unet): updated_state_dict = {} for k, param in unet.state_dict().items(): updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) updated_state_dict.update({k: updated_param}) unet.load_state_dict(updated_state_dict) return unet def test_optimization_steps_updated(self): unet, ema_unet = self.get_models() # Take the first (hypothetical) EMA step. ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 1 # Take two more. for _ in range(2): ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 3 def test_shadow_params_not_updated(self): unet, ema_unet = self.get_models() # Since the `unet` is not being updated (i.e., backprop'd) # there won't be any difference between the `params` of `unet` # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. ema_unet.step(unet.parameters()) orig_params = list(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) # The above holds true even if we call `ema.step()` multiple times since # `unet` params are still not being updated. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) def test_shadow_params_updated(self): unet, ema_unet = self.get_models() # Here we simulate the parameter updates for `unet`. Since there might # be some parameters which are initialized to zero we take extra care to # initialize their values to something non-zero before the multiplication. unet_pseudo_updated_step_one = self.simulate_backprop(unet) # Take the EMA step. ema_unet.step(unet_pseudo_updated_step_one.parameters()) # Now the EMA'd parameters won't be equal to the original model parameters. orig_params = list(unet_pseudo_updated_step_one.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) # Ensure this is the case when we take multiple EMA steps. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) def test_consecutive_shadow_params_updated(self): # If we call EMA step after a backpropagation consecutively for two times, # the shadow params from those two steps should be different. unet, ema_unet = self.get_models() # First backprop + EMA unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params # Second backprop + EMA unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert ~torch.allclose(step_one, step_two) def test_zero_decay(self): # If there's no decay even if there are backprops, EMA steps # won't take any effect i.e., the shadow params would remain the # same. unet, ema_unet = self.get_models(decay=0.0) unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert torch.allclose(step_one, step_two) @skip_mps def test_serialization(self): unet, ema_unet = self.get_models() noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) loaded_unet = loaded_unet.to(unet.device) # Since no EMA step has been performed the outputs should match. output = unet(noisy_latents, timesteps, encoder_hidden_states).sample output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample assert torch.allclose(output, output_loaded, atol=1e-4)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_hub_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from tempfile import TemporaryDirectory from unittest.mock import Mock, patch import diffusers.utils.hub_utils class CreateModelCardTest(unittest.TestCase): @patch("diffusers.utils.hub_utils.get_full_repo_name") def test_create_model_card(self, repo_name_mock: Mock) -> None: repo_name_mock.return_value = "full_repo_name" with TemporaryDirectory() as tmpdir: # Dummy args values args = Mock() args.output_dir = tmpdir args.local_rank = 0 args.hub_token = "hub_token" args.dataset_name = "dataset_name" args.learning_rate = 0.01 args.train_batch_size = 100000 args.eval_batch_size = 10000 args.gradient_accumulation_steps = 0.01 args.adam_beta1 = 0.02 args.adam_beta2 = 0.03 args.adam_weight_decay = 0.0005 args.adam_epsilon = 0.000001 args.lr_scheduler = 1 args.lr_warmup_steps = 10 args.ema_inv_gamma = 0.001 args.ema_power = 0.1 args.ema_max_decay = 0.2 args.mixed_precision = True # Model card mush be rendered and saved diffusers.utils.hub_utils.create_model_card(args, model_name="model_name") self.assertTrue((Path(tmpdir) / "README.md").is_file())
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_dependencies.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from importlib import import_module class DependencyTester(unittest.TestCase): def test_diffusers_import(self): try: import diffusers # noqa: F401 except ImportError: assert False def test_backend_registration(self): import diffusers from diffusers.dependency_versions_table import deps all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": backend = "k-diffusion" elif backend == "invisible_watermark": backend = "invisible-watermark" assert backend in deps, f"{backend} is not in the deps table!" def test_pipeline_imports(self): import diffusers import diffusers.pipelines all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if hasattr(diffusers.pipelines, cls_name): pipeline_folder_module = ".".join(str(cls_module.__module__).split(".")[:3]) _ = import_module(pipeline_folder_module, str(cls_name))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_check_copies.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import shutil import sys import tempfile import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. REFERENCE_CODE = """ \""" Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class CopyCheckTester(unittest.TestCase): def setUp(self): self.diffusers_dir = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir, "schedulers/")) check_copies.DIFFUSERS_PATH = self.diffusers_dir shutil.copy( os.path.join(git_repo_path, "src/diffusers/schedulers/scheduling_ddpm.py"), os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py"), ) def tearDown(self): check_copies.DIFFUSERS_PATH = "src/diffusers" shutil.rmtree(self.diffusers_dir) def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None): code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result code = check_copies.run_ruff(code) fname = os.path.join(self.diffusers_dir, "new_code.py") with open(fname, "w", newline="\n") as f: f.write(code) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0) else: check_copies.is_copy_consistent(f.name, overwrite=True) with open(fname, "r") as f: self.assertTrue(f.read(), expected) def test_find_code_in_diffusers(self): code = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput") self.assertEqual(code, REFERENCE_CODE) def test_is_copy_consistent(self): # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE + "\n", ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE, ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", re.sub("DDPM", "Test", REFERENCE_CODE), ) # Copy consistency with a really long name long_class_name = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", f"{long_class_name}SchedulerOutput", re.sub("Bert", long_class_name, REFERENCE_CODE), ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", REFERENCE_CODE, overwrite_result=re.sub("DDPM", "Test", REFERENCE_CODE), )
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_check_dummies.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers") class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): simple_backend = find_backend(" if not is_torch_available():") self.assertEqual(simple_backend, "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(double_backend, "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(triple_backend, "torch_and_transformers_and_onnx") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("torch_and_transformers", objects) self.assertIn("flax_and_transformers", objects) self.assertIn("torch_and_transformers_and_onnx", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel", objects["torch"]) self.assertIn("FlaxUNet2DConditionModel", objects["flax"]) self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_training.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNet2DModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow torch.backends.cuda.matmul.allow_tf32 = False class TrainingTests(unittest.TestCase): def get_model_optimizer(self, resolution=32): set_seed(0) model = UNet2DModel(sample_size=resolution, in_channels=3, out_channels=3) optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) return model, optimizer @slow def test_training_step_equality(self): device = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable ddpm_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", clip_sample=True, ) ddim_scheduler = DDIMScheduler( num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", clip_sample=True, ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) clean_images = [torch.randn((4, 3, 32, 32)).clip(-1, 1).to(device) for _ in range(4)] noise = [torch.randn((4, 3, 32, 32)).to(device) for _ in range(4)] timesteps = [torch.randint(0, 1000, (4,)).long().to(device) for _ in range(4)] # train with a DDPM scheduler model, optimizer = self.get_model_optimizer(resolution=32) model.train().to(device) for i in range(4): optimizer.zero_grad() ddpm_noisy_images = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) ddpm_noise_pred = model(ddpm_noisy_images, timesteps[i]).sample loss = torch.nn.functional.mse_loss(ddpm_noise_pred, noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM model, optimizer = self.get_model_optimizer(resolution=32) model.train().to(device) for i in range(4): optimizer.zero_grad() ddim_noisy_images = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) ddim_noise_pred = model(ddim_noisy_images, timesteps[i]).sample loss = torch.nn.functional.mse_loss(ddim_noise_pred, noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(ddpm_noisy_images, ddim_noisy_images, atol=1e-5)) self.assertTrue(torch.allclose(ddpm_noise_pred, ddim_noise_pred, atol=1e-5))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_image_processor.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import PIL.Image import torch from diffusers.image_processor import VaeImageProcessor class ImageProcessorTest(unittest.TestCase): @property def dummy_sample(self): batch_size = 1 num_channels = 3 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample @property def dummy_mask(self): batch_size = 1 num_channels = 1 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample def to_np(self, image): if isinstance(image[0], PIL.Image.Image): return np.stack([np.array(i) for i in image], axis=0) elif isinstance(image, torch.Tensor): return image.cpu().numpy().transpose(0, 2, 3, 1) return image def test_vae_image_processor_pt(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_pt = self.dummy_sample input_np = self.to_np(input_pt) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess( image_processor.preprocess(input_pt), output_type=output_type, ) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np assert ( np.abs(in_np - out_np).max() < 1e-6 ), f"decoded output does not match input for output_type {output_type}" def test_vae_image_processor_np(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess(image_processor.preprocess(input_np), output_type=output_type) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np assert ( np.abs(in_np - out_np).max() < 1e-6 ), f"decoded output does not match input for output_type {output_type}" def test_vae_image_processor_pil(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) input_pil = image_processor.numpy_to_pil(input_np) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess(image_processor.preprocess(input_pil), output_type=output_type) for i, o in zip(input_pil, out): in_np = np.array(i) out_np = self.to_np(out) if output_type == "pil" else (self.to_np(out) * 255).round() assert ( np.abs(in_np - out_np).max() < 1e-6 ), f"decoded output does not match input for output_type {output_type}" def test_preprocess_input_3d(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) input_pt_4d = self.dummy_sample input_pt_3d = input_pt_4d.squeeze(0) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) input_np_4d = self.to_np(self.dummy_sample) input_np_3d = input_np_4d.squeeze(0) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) assert np.abs(out_pt_4d - out_pt_3d).max() < 1e-6 assert np.abs(out_np_4d - out_np_3d).max() < 1e-6 def test_preprocess_input_list(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) input_pt_4d = self.dummy_sample input_pt_list = list(input_pt_4d) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_list = image_processor.postprocess( image_processor.preprocess(input_pt_list), output_type="np", ) input_np_4d = self.to_np(self.dummy_sample) input_np_list = list(input_np_4d) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_list = image_processor.postprocess( image_processor.preprocess(input_np_list), output_type="np", ) assert np.abs(out_pt_4d - out_pt_list).max() < 1e-6 assert np.abs(out_np_4d - out_np_list).max() < 1e-6 def test_preprocess_input_mask_3d(self): image_processor = VaeImageProcessor( do_resize=False, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) input_pt_4d = self.dummy_mask input_pt_3d = input_pt_4d.squeeze(0) input_pt_2d = input_pt_3d.squeeze(0) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) out_pt_2d = image_processor.postprocess( image_processor.preprocess(input_pt_2d), output_type="np", ) input_np_4d = self.to_np(self.dummy_mask) input_np_3d = input_np_4d.squeeze(0) input_np_3d_1 = input_np_4d.squeeze(-1) input_np_2d = input_np_3d.squeeze(-1) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) out_np_3d_1 = image_processor.postprocess( image_processor.preprocess(input_np_3d_1), output_type="np", ) out_np_2d = image_processor.postprocess( image_processor.preprocess(input_np_2d), output_type="np", ) assert np.abs(out_pt_4d - out_pt_3d).max() == 0 assert np.abs(out_pt_4d - out_pt_2d).max() == 0 assert np.abs(out_np_4d - out_np_3d).max() == 0 assert np.abs(out_np_4d - out_np_3d_1).max() == 0 assert np.abs(out_np_4d - out_np_2d).max() == 0 def test_preprocess_input_mask_list(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) input_pt_4d = self.dummy_mask input_pt_3d = input_pt_4d.squeeze(0) input_pt_2d = input_pt_3d.squeeze(0) inputs_pt = [input_pt_4d, input_pt_3d, input_pt_2d] inputs_pt_list = [[input_pt] for input_pt in inputs_pt] for input_pt, input_pt_list in zip(inputs_pt, inputs_pt_list): out_pt = image_processor.postprocess( image_processor.preprocess(input_pt), output_type="np", ) out_pt_list = image_processor.postprocess( image_processor.preprocess(input_pt_list), output_type="np", ) assert np.abs(out_pt - out_pt_list).max() < 1e-6 input_np_4d = self.to_np(self.dummy_mask) input_np_3d = input_np_4d.squeeze(0) input_np_2d = input_np_3d.squeeze(-1) inputs_np = [input_np_4d, input_np_3d, input_np_2d] inputs_np_list = [[input_np] for input_np in inputs_np] for input_np, input_np_list in zip(inputs_np, inputs_np_list): out_np = image_processor.postprocess( image_processor.preprocess(input_np), output_type="np", ) out_np_list = image_processor.postprocess( image_processor.preprocess(input_np_list), output_type="np", ) assert np.abs(out_np - out_np_list).max() < 1e-6 def test_preprocess_input_mask_3d_batch(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) # create a dummy mask input with batch_size 2 dummy_mask_batch = torch.cat([self.dummy_mask] * 2, axis=0) # squeeze out the channel dimension input_pt_3d = dummy_mask_batch.squeeze(1) input_np_3d = self.to_np(dummy_mask_batch).squeeze(-1) input_pt_3d_list = list(input_pt_3d) input_np_3d_list = list(input_np_3d) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) out_pt_3d_list = image_processor.postprocess( image_processor.preprocess(input_pt_3d_list), output_type="np", ) assert np.abs(out_pt_3d - out_pt_3d_list).max() < 1e-6 out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) out_np_3d_list = image_processor.postprocess( image_processor.preprocess(input_np_3d_list), output_type="np", ) assert np.abs(out_np_3d - out_np_3d_list).max() < 1e-6 def test_vae_image_processor_resize_pt(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) input_pt = self.dummy_sample b, c, h, w = input_pt.shape scale = 2 out_pt = image_processor.resize(image=input_pt, height=h // scale, width=w // scale) exp_pt_shape = (b, c, h // scale, w // scale) assert ( out_pt.shape == exp_pt_shape ), f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." def test_vae_image_processor_resize_np(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) input_pt = self.dummy_sample b, c, h, w = input_pt.shape scale = 2 input_np = self.to_np(input_pt) out_np = image_processor.resize(image=input_np, height=h // scale, width=w // scale) exp_np_shape = (b, h // scale, w // scale, c) assert ( out_np.shape == exp_np_shape ), f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'."
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_config.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from diffusers import ( DDIMScheduler, DDPMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, PNDMScheduler, logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils.testing_utils import CaptureLogger class SampleObject(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], ): pass class SampleObject2(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", f=[1, 3], ): pass class SampleObject3(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], f=[1, 3], ): pass class SampleObject4(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 5], f=[5, 4], ): pass class ConfigTester(unittest.TestCase): def test_load_not_from_mixin(self): with self.assertRaises(ValueError): ConfigMixin.load_config("dummy_path") def test_register_to_config(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # init ignore private arguments obj = SampleObject(_name_or_path="lalala") config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can override default obj = SampleObject(c=6) config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can use positional arguments. obj = SampleObject(1, c=6) config = obj.config assert config["a"] == 1 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] def test_save_load(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname)) new_config = new_obj.config # unfreeze configs config = dict(config) new_config = dict(new_config) assert config.pop("c") == (2, 5) # instantiated as tuple assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json config.pop("_use_default_values") assert config == new_config def test_load_ddim_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddim = DDIMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert ddim.__class__ == DDIMScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_ancestral_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerAncestralDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerAncestralDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pndm = PNDMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert pndm.__class__ == PNDMScheduler # no warning should be thrown assert cap_logger.out == "" def test_overwrite_config_on_load(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddpm = DDPMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler", prediction_type="sample", beta_end=8, ) with CaptureLogger(logger) as cap_logger_2: ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88) assert ddpm.__class__ == DDPMScheduler assert ddpm.config.prediction_type == "sample" assert ddpm.config.beta_end == 8 assert ddpm_2.config.beta_start == 88 # no warning should be thrown assert cap_logger.out == "" assert cap_logger_2.out == "" def test_load_dpmsolver(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: dpm = DPMSolverMultistepScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert dpm.__class__ == DPMSolverMultistepScheduler # no warning should be thrown assert cap_logger.out == "" def test_use_default_values(self): # let's first save a config that should be in the form # a=2, # b=5, # c=(2, 5), # d="for diffusion", # e=[1, 3], config = SampleObject() config_dict = {k: v for k, v in config.config.items() if not k.startswith("_")} # make sure that default config has all keys in `_use_default_values` assert set(config_dict.keys()) == set(config.config._use_default_values) with tempfile.TemporaryDirectory() as tmpdirname: config.save_config(tmpdirname) # now loading it with SampleObject2 should put f into `_use_default_values` config = SampleObject2.from_config(tmpdirname) assert "f" in config._use_default_values assert config.f == [1, 3] # now loading the config, should **NOT** use [1, 3] for `f`, but the default [1, 4] value # **BECAUSE** it is part of `config._use_default_values` new_config = SampleObject4.from_config(config.config) assert new_config.f == [5, 4] config.config._use_default_values.pop() new_config_2 = SampleObject4.from_config(config.config) assert new_config_2.f == [1, 3] # Nevertheless "e" should still be correctly loaded to [1, 3] from SampleObject2 instead of defaulting to [1, 5] assert new_config_2.e == [1, 3]
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_outputs.py
import pickle as pkl import unittest from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from diffusers.utils.outputs import BaseOutput from diffusers.utils.testing_utils import require_torch @dataclass class CustomOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] class ConfigTester(unittest.TestCase): def test_outputs_single_attribute(self): outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4)) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_dict_init(self): # test output reinitialization with a `dict` for compatibility with `accelerate` outputs = CustomOutput({"images": np.random.rand(1, 3, 4, 4)}) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput({"images": [PIL.Image.new("RGB", (4, 4))]}) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_serialization(self): outputs_orig = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) serialized = pkl.dumps(outputs_orig) outputs_copy = pkl.loads(serialized) # Check original and copy are equal assert dir(outputs_orig) == dir(outputs_copy) assert dict(outputs_orig) == dict(outputs_copy) assert vars(outputs_orig) == vars(outputs_copy) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch import torch.utils._pytree data = np.random.rand(1, 3, 4, 4) x = CustomOutput(images=data) self.assertFalse(torch.utils._pytree._is_leaf(x)) expected_flat_outs = [data] expected_tree_spec = torch.utils._pytree.TreeSpec(CustomOutput, ["images"], [torch.utils._pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = torch.utils._pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = torch.utils._pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_common.py
import contextlib import gc import inspect import io import json import os import re import tempfile import unittest import uuid from typing import Callable, Union import numpy as np import PIL.Image import torch from huggingface_hub import delete_repo from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny, ConsistencyDecoderVAE, DDIMScheduler, DiffusionPipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import logging from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version, is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, require_torch, torch_device, ) from ..models.test_models_vae import ( get_asym_autoencoder_kl_config, get_autoencoder_kl_config, get_autoencoder_tiny_config, get_consistency_vae_config, ) from ..others.test_utils import TOKEN, USER, is_staging_test def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) class PipelineLatentTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for PyTorch pipeline that has vae, e.g. equivalence of different input and output types, etc. """ @property def image_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_params` in the child test class. " "`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results" ) @property def image_latents_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_latents_params` in the child test class. " "`image_latents_params` are tested for if passing latents directly are producing same results" ) def get_dummy_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inputs(device, seed) def convert_to_pt(image): if isinstance(image, torch.Tensor): input_image = image elif isinstance(image, np.ndarray): input_image = VaeImageProcessor.numpy_to_pt(image) elif isinstance(image, PIL.Image.Image): input_image = VaeImageProcessor.pil_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pt(input_image) else: raise ValueError(f"unsupported input_image_type {type(image)}") return input_image def convert_pt_to_type(image, input_image_type): if input_image_type == "pt": input_image = image elif input_image_type == "np": input_image = VaeImageProcessor.pt_to_numpy(image) elif input_image_type == "pil": input_image = VaeImageProcessor.pt_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pil(input_image) else: raise ValueError(f"unsupported input_image_type {input_image_type}.") return input_image for image_param in self.image_params: if image_param in inputs.keys(): inputs[image_param] = convert_pt_to_type( convert_to_pt(inputs[image_param]).to(device), input_image_type ) inputs["output_type"] = output_type return inputs def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4): self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff) def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type="pt"): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output_pt = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pt") )[0] output_np = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="np") )[0] output_pil = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pil") )[0] max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess( max_diff, expected_max_diff, "`output_type=='pt'` generate different results from `output_type=='np'`" ) max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_pt_np_pil_inputs_equivalent(self): if len(self.image_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pil"))[0] max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`") max_diff = np.abs(out_input_pil - out_input_np).max() self.assertLess(max_diff, 1e-2, "`input_type=='pt'` generate different result from `input_type=='np'`") def test_latents_input(self): if len(self.image_latents_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] vae = components["vae"] inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") generator = inputs["generator"] for image_param in self.image_latents_params: if image_param in inputs.keys(): inputs[image_param] = ( vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor ) out_latents_inputs = pipe(**inputs)[0] max_diff = np.abs(out - out_latents_inputs).max() self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") def test_multi_vae(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) block_out_channels = pipe.vae.config.block_out_channels norm_num_groups = pipe.vae.config.norm_num_groups vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny] configs = [ get_autoencoder_kl_config(block_out_channels, norm_num_groups), get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups), get_consistency_vae_config(block_out_channels, norm_num_groups), get_autoencoder_tiny_config(block_out_channels), ] out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] for vae_cls, config in zip(vae_classes, configs): vae = vae_cls(**config) vae = vae.to(torch_device) components["vae"] = vae vae_pipe = self.pipeline_class(**components) out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] assert out_vae_np.shape == out_np.shape @require_torch class PipelineKarrasSchedulerTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers equivalence of dict and tuple outputs, etc. """ def test_karras_schedulers_shape(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 if "strength" in inputs: inputs["num_inference_steps"] = 4 inputs["strength"] = 0.5 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = 5 scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = 2 assert check_same_shape(outputs) @require_torch class PipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline, equivalence of dict and tuple outputs, etc. """ # Canonical parameters that are passed to `__call__` regardless # of the type of pipeline. They are always optional and have common # sense default values. required_optional_params = frozenset( [ "num_inference_steps", "num_images_per_prompt", "generator", "latents", "output_type", "return_dict", ] ) # set these parameters to False in the child class if the pipeline does not support the corresponding functionality test_attention_slicing = True test_xformers_attention = True def get_generator(self, seed): device = torch_device if torch_device != "mps" else "cpu" generator = torch.Generator(device).manual_seed(seed) return generator @property def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: raise NotImplementedError( "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_components(self): raise NotImplementedError( "You need to implement `get_dummy_components(self)` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_inputs(self, device, seed=0): raise NotImplementedError( "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " "See existing pipeline tests for reference." ) @property def params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `params` in the child test class. " "`params` are checked for if all values are present in `__call__`'s signature." " You can set `params` using one of the common set of parameters defined in `pipeline_params.py`" " e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to " "image pipelines, including prompts and prompt embedding overrides." "If your pipeline's set of arguments has minor changes from one of the common sets of arguments, " "do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline " "with non-configurable height and width arguments should set the attribute as " "`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. " "See existing pipeline tests for reference." ) @property def batch_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `batch_params` in the child test class. " "`batch_params` are the parameters required to be batched when passed to the pipeline's " "`__call__` method. `pipeline_params.py` provides some common sets of parameters such as " "`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's " "set of batch arguments has minor changes from one of the common sets of batch arguments, " "do not make modifications to the existing common sets of batch arguments. I.e. a text to " "image pipeline `negative_prompt` is not batched should set the attribute as " "`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. " "See existing pipeline tests for reference." ) @property def callback_cfg_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. " "`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback " "function when dynamically adjusting `guidance_scale`. They are variables that require special" "treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common" " sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's " "set of cfg arguments has minor changes from one of the common sets of cfg arguments, " "do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeine, you " " need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as" "`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`" ) def tearDown(self): # clean up the VRAM after each test in case of CUDA runtime errors super().tearDown() gc.collect() torch.cuda.empty_cache() def test_save_load_local(self, expected_max_difference=5e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_pipeline_call_signature(self): self.assertTrue( hasattr(self.pipeline_class, "__call__"), f"{self.pipeline_class} should have a `__call__` method" ) parameters = inspect.signature(self.pipeline_class.__call__).parameters optional_parameters = set() for k, v in parameters.items(): if v.default != inspect._empty: optional_parameters.add(k) parameters = set(parameters.keys()) parameters.remove("self") parameters.discard("kwargs") # kwargs can be added if arguments of pipeline call function are deprecated remaining_required_parameters = set() for param in self.params: if param not in parameters: remaining_required_parameters.add(param) self.assertTrue( len(remaining_required_parameters) == 0, f"Required parameters not present: {remaining_required_parameters}", ) remaining_required_optional_parameters = set() for param in self.required_optional_params: if param not in optional_parameters: remaining_required_optional_parameters.add(param) self.assertTrue( len(remaining_required_optional_parameters) == 0, f"Required optional parameters not present: {remaining_required_optional_parameters}", ) def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes) def _test_inference_batch_consistent( self, batch_sizes=[2], additional_params_copy_to_batched_inputs=["num_inference_steps"] ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # prepare batched inputs batched_inputs = [] for batch_size in batch_sizes: batched_input = {} batched_input.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_input[name][-1] = 100 * "very long" else: batched_input[name] = batch_size * [value] if "generator" in inputs: batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_input["batch_size"] = batch_size batched_inputs.append(batched_input) logger.setLevel(level=diffusers.logging.WARNING) for batch_size, batched_input in zip(batch_sizes, batched_inputs): output = pipe(**batched_input) assert len(output[0]) == batch_size def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4): self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff) def _test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" output = pipe(**self.get_dummy_inputs(generator_device))[0] output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) def test_components_function(self): init_components = self.get_dummy_components() init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) for component in pipe_fp16.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(0) output = pipe(**inputs)[0] fp16_inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(0) output_fp16 = pipe_fp16(**fp16_inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(torch_dtype=torch.float16) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3): self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff) def _test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing = pipe(**inputs)[0] if test_max_difference: max_diff = np.abs(to_np(output_with_slicing) - to_np(output_without_slicing)).max() self.assertLess(max_diff, expected_max_diff, "Attention slicing should not affect the inference results") if test_mean_pixel_difference: assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0])) @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass() def _test_xformers_attention_forwardGenerator_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4 ): if not self.test_xformers_attention: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) if test_max_difference: max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") if test_mean_pixel_difference: assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0]) def test_progress_bar(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") def test_num_images_per_prompt(self): sig = inspect.signature(self.pipeline_class.__call__) if "num_images_per_prompt" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # interate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # interate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] inputs["output_type"] = "latent" output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def test_callback_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_increase_guidance(pipe, i, t, callback_kwargs): pipe._guidance_scale += 1.0 return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # use cfg guidance because some pipelines modify the shape of the latents # outside of the denoising loop inputs["guidance_scale"] = 2.0 inputs["callback_on_step_end"] = callback_increase_guidance inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs _ = pipe(**inputs)[0] # we increase the guidance scale by 1.0 at every step # check that the guidance scale is increased by the number of scheduler timesteps # accounts for models that modify the number of inference steps based on strength assert pipe.guidance_scale == (inputs["guidance_scale"] + pipe.num_timesteps) @is_staging_test class PipelinePushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-pipeline-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def get_pipeline_components(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) with tempfile.TemporaryDirectory() as tmpdir: dummy_vocab = {"<|startoftext|>": 0, "<|endoftext|>": 1, "!": 2} vocab_path = os.path.join(tmpdir, "vocab.json") with open(vocab_path, "w") as f: json.dump(dummy_vocab, f) merges = "Ġ t\nĠt h" merges_path = os.path.join(tmpdir, "merges.txt") with open(merges_path, "w") as f: f.writelines(merges) tokenizer = CLIPTokenizer(vocab_file=vocab_path, merges_file=merges_path) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def test_push_to_hub(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN) # For SDXL and its derivative pipelines (such as ControlNet), we have the text encoders # and the tokenizers as optional components. So, we need to override the `test_save_load_optional_components()` # test for all such pipelines. This requires us to use a custom `encode_prompt()` function. class SDXLOptionalComponentsTesterMixin: def encode_prompt( self, tokenizers, text_encoders, prompt: str, num_images_per_prompt: int = 1, negative_prompt: str = None ): device = text_encoders[0].device if isinstance(prompt, str): prompt = [prompt] batch_size = len(prompt) prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) if negative_prompt is None: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) else: negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # for classifier-free guidance # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) # for classifier-free guidance negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds def _test_save_load_optional_components(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) tokenizer = components.pop("tokenizer") tokenizer_2 = components.pop("tokenizer_2") text_encoder = components.pop("text_encoder") text_encoder_2 = components.pop("text_encoder_2") tokenizers = [tokenizer, tokenizer_2] if tokenizer is not None else [tokenizer_2] text_encoders = [text_encoder, text_encoder_2] if text_encoder is not None else [text_encoder_2] prompt = inputs.pop("prompt") ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt(tokenizers, text_encoders, prompt) inputs["prompt_embeds"] = prompt_embeds inputs["negative_prompt_embeds"] = negative_prompt_embeds inputs["pooled_prompt_embeds"] = pooled_prompt_embeds inputs["negative_pooled_prompt_embeds"] = negative_pooled_prompt_embeds output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) _ = inputs.pop("prompt") inputs["prompt_embeds"] = prompt_embeds inputs["negative_prompt_embeds"] = negative_prompt_embeds inputs["pooled_prompt_embeds"] = pooled_prompt_embeds inputs["negative_pooled_prompt_embeds"] = negative_pooled_prompt_embeds output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) # Some models (e.g. unCLIP) are extremely likely to significantly deviate depending on which hardware is used. # This helper function is used to check that the image doesn't deviate on average more than 10 pixels from a # reference image. def assert_mean_pixel_difference(image, expected_image, expected_max_diff=10): image = np.asarray(DiffusionPipeline.numpy_to_pil(image)[0], dtype=np.float32) expected_image = np.asarray(DiffusionPipeline.numpy_to_pil(expected_image)[0], dtype=np.float32) avg_diff = np.abs(image - expected_image).mean() assert avg_diff < expected_max_diff, f"Error image deviates {avg_diff} pixels on average"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipeline_utils.py
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class IsSafetensorsCompatibleTests(unittest.TestCase): def test_all_is_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_diffusers_model_is_compatible(self): filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_diffusers_model_is_not_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(filenames)) def test_transformer_model_is_compatible(self): filenames = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_transformer_model_is_not_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) def test_all_is_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_diffusers_model_is_compatible_variant(self): filenames = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_diffusers_model_is_compatible_variant_partial(self): # pass variant but use the non-variant filenames filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_diffusers_model_is_not_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] variant = "fp16" self.assertFalse(is_safetensors_compatible(filenames, variant=variant)) def test_transformer_model_is_compatible_variant(self): filenames = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_transformer_model_is_compatible_variant_partial(self): # pass variant but use the non-variant filenames filenames = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_transformer_model_is_not_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] variant = "fp16" self.assertFalse(is_safetensors_compatible(filenames, variant=variant))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import json import os import random import shutil import sys import tempfile import traceback import unittest import unittest.mock as mock import numpy as np import PIL.Image import requests_mock import safetensors.torch import torch from parameterized import parameterized from PIL import Image from requests.exceptions import HTTPError from transformers import CLIPImageProcessor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ConfigMixin, DDIMPipeline, DDIMScheduler, DDPMPipeline, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, ModelMixin, PNDMScheduler, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionPipeline, UNet2DConditionModel, UNet2DModel, UniPCMultistepScheduler, logging, ) from diffusers.pipelines.pipeline_utils import _get_pipeline_class from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import ( CONFIG_NAME, WEIGHTS_NAME, ) from diffusers.utils.testing_utils import ( CaptureLogger, enable_full_determinism, floats_tensor, get_tests_dir, load_numpy, nightly, require_compel, require_flax, require_onnxruntime, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import is_compiled_module enable_full_determinism() # Will be run via run_test_in_subprocess def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): error = None try: # 1. Load models model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) model = torch.compile(model) scheduler = DDPMScheduler(num_train_timesteps=10) ddpm = DDPMPipeline(model, scheduler) # previous diffusers versions stripped compilation off # compiled modules assert is_compiled_module(ddpm.unet) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdirname: ddpm.save_pretrained(tmpdirname) new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) new_ddpm.to(torch_device) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class CustomEncoder(ModelMixin, ConfigMixin): def __init__(self): super().__init__() class CustomPipeline(DiffusionPipeline): def __init__(self, encoder: CustomEncoder, scheduler: DDIMScheduler): super().__init__() self.register_modules(encoder=encoder, scheduler=scheduler) class DownloadTests(unittest.TestCase): def test_one_request_upon_cached(self): # TODO: For some reason this test fails on MPS where no HEAD call is made. if torch_device == "mps": return with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe", cache_dir=tmpdirname) download_requests = [r.method for r in m.request_history] assert download_requests.count("HEAD") == 15, "15 calls to files" assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json" assert ( len(download_requests) == 32 ), "2 calls per file (15 files) + send_telemetry, model_info and model_index.json" with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" assert ( len(cache_requests) == 2 ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_less_downloads_passed_object(self): with tempfile.TemporaryDirectory() as tmpdirname: cached_folder = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) # make sure safety checker is not downloaded assert "safety_checker" not in os.listdir(cached_folder) # make sure rest is downloaded assert "unet" in os.listdir(cached_folder) assert "tokenizer" in os.listdir(cached_folder) assert "vae" in os.listdir(cached_folder) assert "model_index.json" in os.listdir(cached_folder) assert "scheduler" in os.listdir(cached_folder) assert "feature_extractor" in os.listdir(cached_folder) def test_less_downloads_passed_object_calls(self): # TODO: For some reason this test fails on MPS where no HEAD call is made. if torch_device == "mps": return with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) download_requests = [r.method for r in m.request_history] # 15 - 2 because no call to config or model file for `safety_checker` assert download_requests.count("HEAD") == 13, "13 calls to files" # 17 - 2 because no call to config or model file for `safety_checker` assert download_requests.count("GET") == 15, "13 calls to files + model_info + model_index.json" assert ( len(download_requests) == 28 ), "2 calls per file (13 files) + send_telemetry, model_info and model_index.json" with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" assert ( len(cache_requests) == 2 ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a flax file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack assert not any(f.endswith(".msgpack") for f in files) # We need to never convert this tiny model to safetensors for this test to pass assert not any(f.endswith(".safetensors") for f in files) def test_force_safetensors_error(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights with self.assertRaises(EnvironmentError): tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-no-safetensors", safety_checker=None, cache_dir=tmpdirname, use_safetensors=True, ) def test_download_safetensors(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-safetensors", safety_checker=None, cache_dir=tmpdirname, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a pytorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack assert not any(f.endswith(".bin") for f in files) def test_download_safetensors_index(self): for variant in ["fp16", None]: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", cache_dir=tmpdirname, use_safetensors=True, variant=variant, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a safetensors file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder if variant is None: assert not any("fp16" in f for f in files) else: model_files = [f for f in files if "safetensors" in f] assert all("fp16" in f for f in model_files) assert len([f for f in files if ".safetensors" in f]) == 8 assert not any(".bin" in f for f in files) def test_download_bin_index(self): for variant in ["fp16", None]: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", cache_dir=tmpdirname, use_safetensors=False, variant=variant, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a safetensors file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder if variant is None: assert not any("fp16" in f for f in files) else: model_files = [f for f in files if "bin" in f] assert all("fp16" in f for f in model_files) assert len([f for f in files if ".bin" in f]) == 8 assert not any(".safetensors" in f for f in files) def test_download_no_openvino_by_default(self): with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-open-vino", cache_dir=tmpdirname, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # make sure that by default no openvino weights are downloaded assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) assert not any("openvino_" in f for f in files) def test_download_no_onnx_by_default(self): with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-xl-pipe", cache_dir=tmpdirname, use_safetensors=False, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # make sure that by default no onnx weights are downloaded for non-ONNX pipelines assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) assert not any((f.endswith(".onnx") or f.endswith(".pb")) for f in files) @require_onnxruntime def test_download_onnx_by_default_for_onnx_pipelines(self): with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline", cache_dir=tmpdirname, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # make sure that by default onnx weights are downloaded for ONNX pipelines assert any((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) assert any((f.endswith(".onnx")) for f in files) assert any((f.endswith(".pb")) for f in files) def test_download_no_safety_checker(self): prompt = "hello" pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_load_no_safety_checker_explicit_locally(self): prompt = "hello" pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None) pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_load_no_safety_checker_default_locally(self): prompt = "hello" pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname) pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_cached_files_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. orig_pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")} # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")} for m1, m2 in zip(orig_comps.values(), comps.values()): for p1, p2 in zip(m1.parameters(), m2.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" def test_local_files_only_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # first check that with local files only the pipeline can only be used if cached with self.assertRaises(FileNotFoundError): with tempfile.TemporaryDirectory() as tmpdirname: orig_pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True, cache_dir=tmpdirname ) # now download orig_pipe = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-torch") # make sure it can be loaded with local_files_only orig_pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True ) orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")} # Under the mock environment we get a 500 error when trying to connect to the internet. # Make sure it works local_files_only only works here! with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")} for m1, m2 in zip(orig_comps.values(), comps.values()): for p1, p2 in zip(m1.parameters(), m2.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" def test_download_from_variant_folder(self): for use_safetensors in [False, True]: other_format = ".bin" if use_safetensors else ".safetensors" with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a variant file even if we have some here: # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" assert not any(f.endswith(other_format) for f in files) # no variants assert not any(len(f.split(".")) == 3 for f in files) def test_download_variant_all(self): for use_safetensors in [False, True]: other_format = ".bin" if use_safetensors else ".safetensors" this_format = ".safetensors" if use_safetensors else ".bin" variant = "fp16" with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant, use_safetensors=use_safetensors, ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a non-variant file even if we have some here: # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" # unet, vae, text_encoder, safety_checker assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 4 # all checkpoints should have variant ending assert not any(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) assert not any(f.endswith(other_format) for f in files) def test_download_variant_partly(self): for use_safetensors in [False, True]: other_format = ".bin" if use_safetensors else ".safetensors" this_format = ".safetensors" if use_safetensors else ".bin" variant = "no_ema" with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant, use_safetensors=use_safetensors, ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] unet_files = os.listdir(os.path.join(tmpdirname, "unet")) # Some of the downloaded files should be a non-variant file, check: # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" # only unet has "no_ema" variant assert f"diffusion_pytorch_model.{variant}{this_format}" in unet_files assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 1 # vae, safety_checker and text_encoder should have no variant assert sum(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) == 3 assert not any(f.endswith(other_format) for f in files) def test_download_broken_variant(self): for use_safetensors in [False, True]: # text encoder is missing no variant and "no_ema" variant weights, so the following can't work for variant in [None, "no_ema"]: with self.assertRaises(OSError) as error_context: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/stable-diffusion-broken-variants", cache_dir=tmpdirname, variant=variant, use_safetensors=use_safetensors, ) assert "Error no file name" in str(error_context.exception) # text encoder has fp16 variants so we can load it with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-broken-variants", use_safetensors=use_safetensors, cache_dir=tmpdirname, variant="fp16", ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a non-variant file even if we have some here: # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" # only unet has "no_ema" variant def test_local_save_load_index(self): prompt = "hello" for variant in [None, "fp16"]: for use_safe in [True, False]: pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", variant=variant, use_safetensors=use_safe, safety_checker=None, ) pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_2 = StableDiffusionPipeline.from_pretrained( tmpdirname, safe_serialization=use_safe, variant=variant ) pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_text_inversion_download(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe = pipe.to(torch_device) num_tokens = len(pipe.tokenizer) # single token load local with tempfile.TemporaryDirectory() as tmpdirname: ten = {"<*>": torch.ones((32,))} torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) pipe.load_textual_inversion(tmpdirname) token = pipe.tokenizer.convert_tokens_to_ids("<*>") assert token == num_tokens, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32 assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>" prompt = "hey <*>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # single token load local with weight name with tempfile.TemporaryDirectory() as tmpdirname: ten = {"<**>": 2 * torch.ones((1, 32))} torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) pipe.load_textual_inversion(tmpdirname, weight_name="learned_embeds.bin") token = pipe.tokenizer.convert_tokens_to_ids("<**>") assert token == num_tokens + 1, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>" prompt = "hey <**>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi token load with tempfile.TemporaryDirectory() as tmpdirname: ten = {"<***>": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])} torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) pipe.load_textual_inversion(tmpdirname) token = pipe.tokenizer.convert_tokens_to_ids("<***>") token_1 = pipe.tokenizer.convert_tokens_to_ids("<***>_1") token_2 = pipe.tokenizer.convert_tokens_to_ids("<***>_2") assert token == num_tokens + 2, "Added token must be at spot `num_tokens`" assert token_1 == num_tokens + 3, "Added token must be at spot `num_tokens`" assert token_2 == num_tokens + 4, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2" prompt = "hey <***>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi token load a1111 with tempfile.TemporaryDirectory() as tmpdirname: ten = { "string_to_param": { "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))]) }, "name": "<****>", } torch.save(ten, os.path.join(tmpdirname, "a1111.bin")) pipe.load_textual_inversion(tmpdirname, weight_name="a1111.bin") token = pipe.tokenizer.convert_tokens_to_ids("<****>") token_1 = pipe.tokenizer.convert_tokens_to_ids("<****>_1") token_2 = pipe.tokenizer.convert_tokens_to_ids("<****>_2") assert token == num_tokens + 5, "Added token must be at spot `num_tokens`" assert token_1 == num_tokens + 6, "Added token must be at spot `num_tokens`" assert token_2 == num_tokens + 7, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2" prompt = "hey <****>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi embedding load with tempfile.TemporaryDirectory() as tmpdirname1: with tempfile.TemporaryDirectory() as tmpdirname2: ten = {"<*****>": torch.ones((32,))} torch.save(ten, os.path.join(tmpdirname1, "learned_embeds.bin")) ten = {"<******>": 2 * torch.ones((1, 32))} torch.save(ten, os.path.join(tmpdirname2, "learned_embeds.bin")) pipe.load_textual_inversion([tmpdirname1, tmpdirname2]) token = pipe.tokenizer.convert_tokens_to_ids("<*****>") assert token == num_tokens + 8, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32 assert pipe._maybe_convert_prompt("<*****>", pipe.tokenizer) == "<*****>" token = pipe.tokenizer.convert_tokens_to_ids("<******>") assert token == num_tokens + 9, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>" prompt = "hey <*****> <******>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # single token state dict load ten = {"<x>": torch.ones((32,))} pipe.load_textual_inversion(ten) token = pipe.tokenizer.convert_tokens_to_ids("<x>") assert token == num_tokens + 10, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32 assert pipe._maybe_convert_prompt("<x>", pipe.tokenizer) == "<x>" prompt = "hey <x>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi embedding state dict load ten1 = {"<xxxxx>": torch.ones((32,))} ten2 = {"<xxxxxx>": 2 * torch.ones((1, 32))} pipe.load_textual_inversion([ten1, ten2]) token = pipe.tokenizer.convert_tokens_to_ids("<xxxxx>") assert token == num_tokens + 11, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32 assert pipe._maybe_convert_prompt("<xxxxx>", pipe.tokenizer) == "<xxxxx>" token = pipe.tokenizer.convert_tokens_to_ids("<xxxxxx>") assert token == num_tokens + 12, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 assert pipe._maybe_convert_prompt("<xxxxxx>", pipe.tokenizer) == "<xxxxxx>" prompt = "hey <xxxxx> <xxxxxx>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # auto1111 multi-token state dict load ten = { "string_to_param": { "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))]) }, "name": "<xxxx>", } pipe.load_textual_inversion(ten) token = pipe.tokenizer.convert_tokens_to_ids("<xxxx>") token_1 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_1") token_2 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_2") assert token == num_tokens + 13, "Added token must be at spot `num_tokens`" assert token_1 == num_tokens + 14, "Added token must be at spot `num_tokens`" assert token_2 == num_tokens + 15, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 assert pipe._maybe_convert_prompt("<xxxx>", pipe.tokenizer) == "<xxxx> <xxxx>_1 <xxxx>_2" prompt = "hey <xxxx>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multiple references to multi embedding ten = {"<cat>": torch.ones(3, 32)} pipe.load_textual_inversion(ten) assert ( pipe._maybe_convert_prompt("<cat> <cat>", pipe.tokenizer) == "<cat> <cat>_1 <cat>_2 <cat> <cat>_1 <cat>_2" ) prompt = "hey <cat> <cat>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) def test_text_inversion_multi_tokens(self): pipe1 = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe1 = pipe1.to(torch_device) token1, token2 = "<*>", "<**>" ten1 = torch.ones((32,)) ten2 = torch.ones((32,)) * 2 num_tokens = len(pipe1.tokenizer) pipe1.load_textual_inversion(ten1, token=token1) pipe1.load_textual_inversion(ten2, token=token2) emb1 = pipe1.text_encoder.get_input_embeddings().weight pipe2 = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe2 = pipe2.to(torch_device) pipe2.load_textual_inversion([ten1, ten2], token=[token1, token2]) emb2 = pipe2.text_encoder.get_input_embeddings().weight pipe3 = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe3 = pipe3.to(torch_device) pipe3.load_textual_inversion(torch.stack([ten1, ten2], dim=0), token=[token1, token2]) emb3 = pipe3.text_encoder.get_input_embeddings().weight assert len(pipe1.tokenizer) == len(pipe2.tokenizer) == len(pipe3.tokenizer) == num_tokens + 2 assert ( pipe1.tokenizer.convert_tokens_to_ids(token1) == pipe2.tokenizer.convert_tokens_to_ids(token1) == pipe3.tokenizer.convert_tokens_to_ids(token1) == num_tokens ) assert ( pipe1.tokenizer.convert_tokens_to_ids(token2) == pipe2.tokenizer.convert_tokens_to_ids(token2) == pipe3.tokenizer.convert_tokens_to_ids(token2) == num_tokens + 1 ) assert emb1[num_tokens].sum().item() == emb2[num_tokens].sum().item() == emb3[num_tokens].sum().item() assert ( emb1[num_tokens + 1].sum().item() == emb2[num_tokens + 1].sum().item() == emb3[num_tokens + 1].sum().item() ) def test_download_ignore_files(self): # Check https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files/blob/72f58636e5508a218c6b3f60550dc96445547817/model_index.json#L4 with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights tmpdirname = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files") all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a pytorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack assert not any(f in ["vae/diffusion_pytorch_model.bin", "text_encoder/config.json"] for f in files) assert len(files) == 14 def test_get_pipeline_class_from_flax(self): flax_config = {"_class_name": "FlaxStableDiffusionPipeline"} config = {"_class_name": "StableDiffusionPipeline"} # when loading a PyTorch Pipeline from a FlaxPipeline `model_index.json`, e.g.: https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-lms-pipe/blob/7a9063578b325779f0f1967874a6771caa973cad/model_index.json#L2 # we need to make sure that we don't load the Flax Pipeline class, but instead the PyTorch pipeline class assert _get_pipeline_class(DiffusionPipeline, flax_config) == _get_pipeline_class(DiffusionPipeline, config) class CustomPipelineTests(unittest.TestCase): def test_load_custom_pipeline(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) pipeline = pipeline.to(torch_device) # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24 assert pipeline.__class__.__name__ == "CustomPipeline" def test_load_custom_github(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="main" ) # make sure that on "main" pipeline gives only ones because of: https://github.com/huggingface/diffusers/pull/1690 with torch.no_grad(): output = pipeline() assert output.numel() == output.sum() # hack since Python doesn't like overwriting modules: https://stackoverflow.com/questions/3105801/unload-a-module-in-python # Could in the future work with hashes instead. del sys.modules["diffusers_modules.git.one_step_unet"] pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="0.10.2" ) with torch.no_grad(): output = pipeline() assert output.numel() != output.sum() assert pipeline.__class__.__name__ == "UnetSchedulerOneForwardPipeline" def test_run_custom_pipeline(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert images[0].shape == (1, 32, 32, 3) # compare output to https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L102 assert output_str == "This is a test" def test_remote_components(self): # make sure that trust remote code has to be passed with self.assertRaises(ValueError): pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-components") # Check that only loading custom componets "my_unet", "my_scheduler" works pipeline = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-sdxl-custom-components", trust_remote_code=True ) assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") assert pipeline.__class__.__name__ == "StableDiffusionXLPipeline" pipeline = pipeline.to(torch_device) images = pipeline("test", num_inference_steps=2, output_type="np")[0] assert images.shape == (1, 64, 64, 3) # Check that only loading custom componets "my_unet", "my_scheduler" and explicit custom pipeline works pipeline = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-sdxl-custom-components", custom_pipeline="my_pipeline", trust_remote_code=True ) assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") assert pipeline.__class__.__name__ == "MyPipeline" pipeline = pipeline.to(torch_device) images = pipeline("test", num_inference_steps=2, output_type="np")[0] assert images.shape == (1, 64, 64, 3) def test_remote_auto_custom_pipe(self): # make sure that trust remote code has to be passed with self.assertRaises(ValueError): pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-all") # Check that only loading custom componets "my_unet", "my_scheduler" and auto custom pipeline works pipeline = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-sdxl-custom-all", trust_remote_code=True ) assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") assert pipeline.__class__.__name__ == "MyPipeline" pipeline = pipeline.to(torch_device) images = pipeline("test", num_inference_steps=2, output_type="np")[0] assert images.shape == (1, 64, 64, 3) def test_local_custom_pipeline_repo(self): local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline") pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path ) pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert pipeline.__class__.__name__ == "CustomLocalPipeline" assert images[0].shape == (1, 32, 32, 3) # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102 assert output_str == "This is a local test" def test_local_custom_pipeline_file(self): local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline") local_custom_pipeline_path = os.path.join(local_custom_pipeline_path, "what_ever.py") pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path ) pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert pipeline.__class__.__name__ == "CustomLocalPipeline" assert images[0].shape == (1, 32, 32, 3) # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102 assert output_str == "This is a local test" def test_custom_model_and_pipeline(self): pipe = CustomPipeline( encoder=CustomEncoder(), scheduler=DDIMScheduler(), ) with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname, safe_serialization=False) pipe_new = CustomPipeline.from_pretrained(tmpdirname) pipe_new.save_pretrained(tmpdirname) conf_1 = dict(pipe.config) conf_2 = dict(pipe_new.config) del conf_2["_name_or_path"] assert conf_1 == conf_2 @slow @require_torch_gpu def test_download_from_git(self): # Because adaptive_avg_pool2d_backward_cuda # does not have a deterministic implementation. clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16) pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, torch_dtype=torch.float16, ) pipeline.enable_attention_slicing() pipeline = pipeline.to(torch_device) # NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under: # https://github.com/huggingface/diffusers/blob/main/examples/community/clip_guided_stable_diffusion.py assert pipeline.__class__.__name__ == "CLIPGuidedStableDiffusion" image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0] assert image.shape == (512, 512, 3) def test_save_pipeline_change_config(self): pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = DiffusionPipeline.from_pretrained(tmpdirname) assert pipe.scheduler.__class__.__name__ == "PNDMScheduler" # let's make sure that changing the scheduler is correctly reflected with tempfile.TemporaryDirectory() as tmpdirname: pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.save_pretrained(tmpdirname) pipe = DiffusionPipeline.from_pretrained(tmpdirname) assert pipe.scheduler.__class__.__name__ == "DPMSolverMultistepScheduler" class PipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image def dummy_uncond_unet(self, sample_size=32): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=sample_size, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def dummy_cond_unet(self, sample_size=32): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=sample_size, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract @parameterized.expand( [ [DDIMScheduler, DDIMPipeline, 32], [DDPMScheduler, DDPMPipeline, 32], [DDIMScheduler, DDIMPipeline, (32, 64)], [DDPMScheduler, DDPMPipeline, (64, 32)], ] ) def test_uncond_unet_components(self, scheduler_fn=DDPMScheduler, pipeline_fn=DDPMPipeline, sample_size=32): unet = self.dummy_uncond_unet(sample_size) scheduler = scheduler_fn() pipeline = pipeline_fn(unet, scheduler).to(torch_device) generator = torch.manual_seed(0) out_image = pipeline( generator=generator, num_inference_steps=2, output_type="np", ).images sample_size = (sample_size, sample_size) if isinstance(sample_size, int) else sample_size assert out_image.shape == (1, *sample_size, 3) def test_stable_diffusion_components(self): """Test that components property works correctly""" unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image().cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk inpaint = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ).to(torch_device) img2img = StableDiffusionImg2ImgPipeline(**inpaint.components, image_encoder=None).to(torch_device) text2img = StableDiffusionPipeline(**inpaint.components, image_encoder=None).to(torch_device) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image_inpaint = inpaint( [prompt], generator=generator, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ).images image_img2img = img2img( [prompt], generator=generator, num_inference_steps=2, output_type="np", image=init_image, ).images image_text2img = text2img( [prompt], generator=generator, num_inference_steps=2, output_type="np", ).images assert image_inpaint.shape == (1, 32, 32, 3) assert image_img2img.shape == (1, 32, 32, 3) assert image_text2img.shape == (1, 64, 64, 3) @require_torch_gpu def test_pipe_false_offload_warn(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd.enable_model_cpu_offload() logger = logging.get_logger("diffusers.pipelines.pipeline_utils") with CaptureLogger(logger) as cap_logger: sd.to("cuda") assert "It is strongly recommended against doing so" in str(cap_logger) sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) def test_set_scheduler(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, DDIMScheduler) sd.scheduler = DDPMScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, DDPMScheduler) sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, PNDMScheduler) sd.scheduler = LMSDiscreteScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, LMSDiscreteScheduler) sd.scheduler = EulerDiscreteScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, EulerDiscreteScheduler) sd.scheduler = EulerAncestralDiscreteScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, EulerAncestralDiscreteScheduler) sd.scheduler = DPMSolverMultistepScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, DPMSolverMultistepScheduler) def test_set_component_to_none(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") pipeline = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "This is a flower" out_image = pipeline( prompt=prompt, generator=generator, num_inference_steps=1, output_type="np", ).images pipeline.feature_extractor = None generator = torch.Generator(device="cpu").manual_seed(0) out_image_2 = pipeline( prompt=prompt, generator=generator, num_inference_steps=1, output_type="np", ).images assert out_image.shape == (1, 64, 64, 3) assert np.abs(out_image - out_image_2).max() < 1e-3 def test_optional_components_is_none(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") items = { "feature_extractor": self.dummy_extractor, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": bert, "tokenizer": tokenizer, "safety_checker": None, # we don't add an image encoder } pipeline = StableDiffusionPipeline(**items) assert sorted(pipeline.components.keys()) == sorted(["image_encoder"] + list(items.keys())) assert pipeline.image_encoder is None def test_set_scheduler_consistency(self): unet = self.dummy_cond_unet() pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") ddim = DDIMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=pndm, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) pndm_config = sd.scheduler.config sd.scheduler = DDPMScheduler.from_config(pndm_config) sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config) pndm_config_2 = sd.scheduler.config pndm_config_2 = {k: v for k, v in pndm_config_2.items() if k in pndm_config} assert dict(pndm_config) == dict(pndm_config_2) sd = StableDiffusionPipeline( unet=unet, scheduler=ddim, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) ddim_config = sd.scheduler.config sd.scheduler = LMSDiscreteScheduler.from_config(ddim_config) sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config) ddim_config_2 = sd.scheduler.config ddim_config_2 = {k: v for k, v in ddim_config_2.items() if k in ddim_config} assert dict(ddim_config) == dict(ddim_config_2) def test_save_safe_serialization(self): pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") with tempfile.TemporaryDirectory() as tmpdirname: pipeline.save_pretrained(tmpdirname, safe_serialization=True) # Validate that the VAE safetensor exists and are of the correct format vae_path = os.path.join(tmpdirname, "vae", "diffusion_pytorch_model.safetensors") assert os.path.exists(vae_path), f"Could not find {vae_path}" _ = safetensors.torch.load_file(vae_path) # Validate that the UNet safetensor exists and are of the correct format unet_path = os.path.join(tmpdirname, "unet", "diffusion_pytorch_model.safetensors") assert os.path.exists(unet_path), f"Could not find {unet_path}" _ = safetensors.torch.load_file(unet_path) # Validate that the text encoder safetensor exists and are of the correct format text_encoder_path = os.path.join(tmpdirname, "text_encoder", "model.safetensors") assert os.path.exists(text_encoder_path), f"Could not find {text_encoder_path}" _ = safetensors.torch.load_file(text_encoder_path) pipeline = StableDiffusionPipeline.from_pretrained(tmpdirname) assert pipeline.unet is not None assert pipeline.vae is not None assert pipeline.text_encoder is not None assert pipeline.scheduler is not None assert pipeline.feature_extractor is not None def test_no_pytorch_download_when_doing_safetensors(self): # by default we don't download with tempfile.TemporaryDirectory() as tmpdirname: _ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname ) path = os.path.join( tmpdirname, "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all", "snapshots", "07838d72e12f9bcec1375b0482b80c1d399be843", "unet", ) # safetensors exists assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors")) # pytorch does not assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin")) def test_no_safetensors_download_when_doing_pytorch(self): use_safetensors = False with tempfile.TemporaryDirectory() as tmpdirname: _ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) path = os.path.join( tmpdirname, "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all", "snapshots", "07838d72e12f9bcec1375b0482b80c1d399be843", "unet", ) # safetensors does not exists assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors")) # pytorch does assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin")) def test_optional_components(self): unet = self.dummy_cond_unet() pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") orig_sd = StableDiffusionPipeline( unet=unet, scheduler=pndm, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=unet, feature_extractor=self.dummy_extractor, ) sd = orig_sd assert sd.config.requires_safety_checker is True with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) # Test that passing None works sd = StableDiffusionPipeline.from_pretrained( tmpdirname, feature_extractor=None, safety_checker=None, requires_safety_checker=False ) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) # Test that loading previous None works sd = StableDiffusionPipeline.from_pretrained(tmpdirname) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) orig_sd.save_pretrained(tmpdirname) # Test that loading without any directory works shutil.rmtree(os.path.join(tmpdirname, "safety_checker")) with open(os.path.join(tmpdirname, sd.config_name)) as f: config = json.load(f) config["safety_checker"] = [None, None] with open(os.path.join(tmpdirname, sd.config_name), "w") as f: json.dump(config, f) sd = StableDiffusionPipeline.from_pretrained(tmpdirname, requires_safety_checker=False) sd.save_pretrained(tmpdirname) sd = StableDiffusionPipeline.from_pretrained(tmpdirname) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) # Test that loading from deleted model index works with open(os.path.join(tmpdirname, sd.config_name)) as f: config = json.load(f) del config["safety_checker"] del config["feature_extractor"] with open(os.path.join(tmpdirname, sd.config_name), "w") as f: json.dump(config, f) sd = StableDiffusionPipeline.from_pretrained(tmpdirname) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) # Test that partially loading works sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor != (None, None) # Test that partially loading works sd = StableDiffusionPipeline.from_pretrained( tmpdirname, feature_extractor=self.dummy_extractor, safety_checker=unet, requires_safety_checker=[True, True], ) assert sd.config.requires_safety_checker == [True, True] assert sd.config.safety_checker != (None, None) assert sd.config.feature_extractor != (None, None) with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor) assert sd.config.requires_safety_checker == [True, True] assert sd.config.safety_checker != (None, None) assert sd.config.feature_extractor != (None, None) def test_name_or_path(self): model_path = "hf-internal-testing/tiny-stable-diffusion-torch" sd = DiffusionPipeline.from_pretrained(model_path) assert sd.name_or_path == model_path with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) sd = DiffusionPipeline.from_pretrained(tmpdirname) assert sd.name_or_path == tmpdirname def test_error_no_variant_available(self): variant = "fp16" with self.assertRaises(ValueError) as error_context: _ = StableDiffusionPipeline.download( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", variant=variant ) assert "but no such modeling files are available" in str(error_context.exception) assert variant in str(error_context.exception) def test_pipe_to(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) device_type = torch.device(torch_device).type sd1 = sd.to(device_type) sd2 = sd.to(torch.device(device_type)) sd3 = sd.to(device_type, torch.float32) sd4 = sd.to(device=device_type) sd5 = sd.to(torch_device=device_type) sd6 = sd.to(device_type, dtype=torch.float32) sd7 = sd.to(device_type, torch_dtype=torch.float32) assert sd1.device.type == device_type assert sd2.device.type == device_type assert sd3.device.type == device_type assert sd4.device.type == device_type assert sd5.device.type == device_type assert sd6.device.type == device_type assert sd7.device.type == device_type sd1 = sd.to(torch.float16) sd2 = sd.to(None, torch.float16) sd3 = sd.to(dtype=torch.float16) sd4 = sd.to(torch_dtype=torch.float16) sd5 = sd.to(None, dtype=torch.float16) sd6 = sd.to(None, torch_dtype=torch.float16) assert sd1.dtype == torch.float16 assert sd2.dtype == torch.float16 assert sd3.dtype == torch.float16 assert sd4.dtype == torch.float16 assert sd5.dtype == torch.float16 assert sd6.dtype == torch.float16 sd1 = sd.to(device=device_type, dtype=torch.float16) sd2 = sd.to(torch_device=device_type, torch_dtype=torch.float16) sd3 = sd.to(device_type, torch.float16) assert sd1.dtype == torch.float16 assert sd2.dtype == torch.float16 assert sd3.dtype == torch.float16 assert sd1.device.type == device_type assert sd2.device.type == device_type assert sd3.device.type == device_type def test_pipe_same_device_id_offload(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd.enable_model_cpu_offload(gpu_id=5) assert sd._offload_gpu_id == 5 sd.maybe_free_model_hooks() assert sd._offload_gpu_id == 5 @slow @require_torch_gpu class PipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_smart_download(self): model_id = "hf-internal-testing/unet-pipeline-dummy" with tempfile.TemporaryDirectory() as tmpdirname: _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True) local_repo_name = "--".join(["models"] + model_id.split("/")) snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots") snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0]) # inspect all downloaded files to make sure that everything is included assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name)) assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME)) # let's make sure the super large numpy file: # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy # is not downloaded, but all the expected ones assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy")) def test_warning_unused_kwargs(self): model_id = "hf-internal-testing/unet-pipeline-dummy" logger = logging.get_logger("diffusers.pipelines") with tempfile.TemporaryDirectory() as tmpdirname: with CaptureLogger(logger) as cap_logger: DiffusionPipeline.from_pretrained( model_id, not_used=True, cache_dir=tmpdirname, force_download=True, ) assert ( cap_logger.out.strip().split("\n")[-1] == "Keyword arguments {'not_used': True} are not expected by DDPMPipeline and will be ignored." ) def test_from_save_pretrained(self): # 1. Load models model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) scheduler = DDPMScheduler(num_train_timesteps=10) ddpm = DDPMPipeline(model, scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdirname: ddpm.save_pretrained(tmpdirname) new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) new_ddpm.to(torch_device) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" @require_python39_or_higher @require_torch_2 def test_from_save_pretrained_dynamo(self): run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=None) def test_from_pretrained_hub(self): model_path = "google/ddpm-cifar10-32" scheduler = DDPMScheduler(num_train_timesteps=10) ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler) ddpm = ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler) ddpm_from_hub = ddpm_from_hub.to(torch_device) ddpm_from_hub.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" def test_from_pretrained_hub_pass_model(self): model_path = "google/ddpm-cifar10-32" scheduler = DDPMScheduler(num_train_timesteps=10) # pass unet into DiffusionPipeline unet = UNet2DModel.from_pretrained(model_path) ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler) ddpm_from_hub_custom_model = ddpm_from_hub_custom_model.to(torch_device) ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler) ddpm_from_hub = ddpm_from_hub.to(torch_device) ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" def test_output_format(self): model_path = "google/ddpm-cifar10-32" scheduler = DDIMScheduler.from_pretrained(model_path) pipe = DDIMPipeline.from_pretrained(model_path, scheduler=scheduler) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) images = pipe(output_type="numpy").images assert images.shape == (1, 32, 32, 3) assert isinstance(images, np.ndarray) images = pipe(output_type="pil", num_inference_steps=4).images assert isinstance(images, list) assert len(images) == 1 assert isinstance(images[0], PIL.Image.Image) # use PIL by default images = pipe(num_inference_steps=4).images assert isinstance(images, list) assert isinstance(images[0], PIL.Image.Image) @require_flax def test_from_flax_from_pt(self): pipe_pt = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe_pt.to(torch_device) from diffusers import FlaxStableDiffusionPipeline with tempfile.TemporaryDirectory() as tmpdirname: pipe_pt.save_pretrained(tmpdirname) pipe_flax, params = FlaxStableDiffusionPipeline.from_pretrained( tmpdirname, safety_checker=None, from_pt=True ) with tempfile.TemporaryDirectory() as tmpdirname: pipe_flax.save_pretrained(tmpdirname, params=params) pipe_pt_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None, from_flax=True) pipe_pt_2.to(torch_device) prompt = "Hello" generator = torch.manual_seed(0) image_0 = pipe_pt( [prompt], generator=generator, num_inference_steps=2, output_type="np", ).images[0] generator = torch.manual_seed(0) image_1 = pipe_pt_2( [prompt], generator=generator, num_inference_steps=2, output_type="np", ).images[0] assert np.abs(image_0 - image_1).sum() < 1e-5, "Models don't give the same forward pass" @require_compel def test_weighted_prompts_compel(self): from compel import Compel pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.enable_attention_slicing() compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) prompt = "a red cat playing with a ball{}" prompts = [prompt.format(s) for s in ["", "++", "--"]] prompt_embeds = compel(prompts) generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])] images = pipe( prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="numpy" ).images for i, image in enumerate(images): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/compel/forest_{i}.npy" ) assert np.abs(image - expected_image).max() < 3e-1 @nightly @require_torch_gpu class PipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_ddpm_ddim_equality_batched(self): seed = 0 model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) ddpm_scheduler = DDPMScheduler() ddim_scheduler = DDIMScheduler() ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler) ddim.to(torch_device) ddim.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(seed) ddpm_images = ddpm(batch_size=2, generator=generator, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(seed) ddim_images = ddim( batch_size=2, generator=generator, num_inference_steps=1000, eta=1.0, output_type="numpy", use_clipped_model_output=True, # Need this to make DDIM match DDPM ).images # the values aren't exactly equal, but the images look the same visually assert np.abs(ddpm_images - ddim_images).max() < 1e-1
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from huggingface_hub import ModelCard from diffusers import ( DDPMScheduler, DiffusionPipeline, KandinskyV22CombinedPipeline, KandinskyV22Pipeline, KandinskyV22PriorPipeline, ) from diffusers.pipelines.pipeline_utils import CONNECTED_PIPES_KEYS def state_dicts_almost_equal(sd1, sd2): sd1 = dict(sorted(sd1.items())) sd2 = dict(sorted(sd2.items())) models_are_equal = True for ten1, ten2 in zip(sd1.values(), sd2.values()): if (ten1 - ten2).abs().sum() > 1e-3: models_are_equal = False return models_are_equal class CombinedPipelineFastTest(unittest.TestCase): def modelcard_has_connected_pipeline(self, model_id): modelcard = ModelCard.load(model_id) connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} connected_pipes = {k: v for k, v in connected_pipes.items() if v is not None} return len(connected_pipes) > 0 def test_correct_modelcard_format(self): # hf-internal-testing/tiny-random-kandinsky-v22-prior has no metadata assert not self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-prior") # see https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L2 assert self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-decoder") def test_load_connected_checkpoint_when_specified(self): pipeline_prior = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior") pipeline_prior_connected = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-prior", load_connected_pipeline=True ) # Passing `load_connected_pipeline` to prior is a no-op as the pipeline has no connected pipeline assert pipeline_prior.__class__ == pipeline_prior_connected.__class__ pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder") pipeline_connected = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder", load_connected_pipeline=True ) # Passing `load_connected_pipeline` to decoder loads the combined pipeline assert pipeline.__class__ != pipeline_connected.__class__ assert pipeline.__class__ == KandinskyV22Pipeline assert pipeline_connected.__class__ == KandinskyV22CombinedPipeline # check that loaded components match prior and decoder components assert set(pipeline_connected.components.keys()) == set( ["prior_" + k for k in pipeline_prior.components.keys()] + list(pipeline.components.keys()) ) def test_load_connected_checkpoint_default(self): prior = KandinskyV22PriorPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior") decoder = KandinskyV22Pipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder") # check that combined pipeline loads both prior & decoder because of # https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L3 assert ( KandinskyV22CombinedPipeline._load_connected_pipes ) # combined pipelines will download more checkpoints that just the one specified pipeline = KandinskyV22CombinedPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder" ) prior_comps = prior.components decoder_comps = decoder.components for k, component in pipeline.components.items(): if k.startswith("prior_"): k = k[6:] comp = prior_comps[k] else: comp = decoder_comps[k] if isinstance(component, torch.nn.Module): assert state_dicts_almost_equal(component.state_dict(), comp.state_dict()) elif hasattr(component, "config"): assert dict(component.config) == dict(comp.config) else: assert component.__class__ == comp.__class__ def test_load_connected_checkpoint_with_passed_obj(self): pipeline = KandinskyV22CombinedPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder" ) prior_scheduler = DDPMScheduler.from_config(pipeline.prior_scheduler.config) scheduler = DDPMScheduler.from_config(pipeline.scheduler.config) # make sure we pass a different scheduler and prior_scheduler assert pipeline.prior_scheduler.__class__ != prior_scheduler.__class__ assert pipeline.scheduler.__class__ != scheduler.__class__ pipeline_new = KandinskyV22CombinedPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder", prior_scheduler=prior_scheduler, scheduler=scheduler, ) assert dict(pipeline_new.prior_scheduler.config) == dict(prior_scheduler.config) assert dict(pipeline_new.scheduler.config) == dict(scheduler.config)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_onnx_common.py
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class OnnxPipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline, equivalence of dict and tuple outputs, etc. """ pass
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/pipeline_params.py
# These are canonical sets of parameters for different types of pipelines. # They are set on subclasses of `PipelineTesterMixin` as `params` and # `batch_params`. # # If a pipeline's set of arguments has minor changes from one of the common sets # of arguments, do not make modifications to the existing common sets of arguments. # I.e. a text to image pipeline with non-configurable height and width arguments # should set its attribute as `params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. TEXT_TO_IMAGE_PARAMS = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) TEXT_TO_IMAGE_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) TEXT_TO_IMAGE_IMAGE_PARAMS = frozenset([]) IMAGE_TO_IMAGE_IMAGE_PARAMS = frozenset(["image"]) IMAGE_VARIATION_PARAMS = frozenset( [ "image", "height", "width", "guidance_scale", ] ) IMAGE_VARIATION_BATCH_PARAMS = frozenset(["image"]) TEXT_GUIDED_IMAGE_VARIATION_PARAMS = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS = frozenset(["prompt", "image", "negative_prompt"]) TEXT_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) IMAGE_INPAINTING_PARAMS = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["image", "mask_image"]) IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["example_image", "image", "mask_image"]) CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS = frozenset(["class_labels"]) CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS = frozenset(["class_labels"]) UNCONDITIONAL_IMAGE_GENERATION_PARAMS = frozenset(["batch_size"]) UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS = frozenset([]) UNCONDITIONAL_AUDIO_GENERATION_PARAMS = frozenset(["batch_size"]) UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS = frozenset([]) TEXT_TO_AUDIO_PARAMS = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) TEXT_TO_AUDIO_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) TOKENS_TO_AUDIO_GENERATION_PARAMS = frozenset(["input_tokens"]) TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS = frozenset(["input_tokens"]) TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS = frozenset(["prompt_embeds"])
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_auto.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import shutil import unittest from collections import OrderedDict from pathlib import Path import torch from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, ControlNetModel, DiffusionPipeline, ) from diffusers.pipelines.auto_pipeline import ( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING, ) from diffusers.utils.testing_utils import slow PRETRAINED_MODEL_REPO_MAPPING = OrderedDict( [ ("stable-diffusion", "runwayml/stable-diffusion-v1-5"), ("if", "DeepFloyd/IF-I-XL-v1.0"), ("kandinsky", "kandinsky-community/kandinsky-2-1"), ("kandinsky22", "kandinsky-community/kandinsky-2-2-decoder"), ] ) class AutoPipelineFastTest(unittest.TestCase): def test_from_pipe_consistent(self): pipe = AutoPipelineForText2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False ) original_config = dict(pipe.config) pipe = AutoPipelineForImage2Image.from_pipe(pipe) assert dict(pipe.config) == original_config pipe = AutoPipelineForText2Image.from_pipe(pipe) assert dict(pipe.config) == original_config def test_from_pipe_override(self): pipe = AutoPipelineForText2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False ) pipe = AutoPipelineForImage2Image.from_pipe(pipe, requires_safety_checker=True) assert pipe.config.requires_safety_checker is True pipe = AutoPipelineForText2Image.from_pipe(pipe, requires_safety_checker=True) assert pipe.config.requires_safety_checker is True def test_from_pipe_consistent_sdxl(self): pipe = AutoPipelineForImage2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-xl-pipe", requires_aesthetics_score=True, force_zeros_for_empty_prompt=False, ) original_config = dict(pipe.config) pipe = AutoPipelineForText2Image.from_pipe(pipe) pipe = AutoPipelineForImage2Image.from_pipe(pipe) assert dict(pipe.config) == original_config def test_kwargs_local_files_only(self): repo = "hf-internal-testing/tiny-stable-diffusion-torch" tmpdirname = DiffusionPipeline.download(repo) tmpdirname = Path(tmpdirname) # edit commit_id to so that it's not the latest commit commit_id = tmpdirname.name new_commit_id = commit_id + "hug" ref_dir = tmpdirname.parent.parent / "refs/main" with open(ref_dir, "w") as f: f.write(new_commit_id) new_tmpdirname = tmpdirname.parent / new_commit_id os.rename(tmpdirname, new_tmpdirname) try: AutoPipelineForText2Image.from_pretrained(repo, local_files_only=True) except OSError: assert False, "not able to load local files" shutil.rmtree(tmpdirname.parent.parent) def test_from_pipe_controlnet_text2img(self): pipe = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionImg2ImgPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_inpaint(self): pipe = AutoPipelineForInpainting.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionInpaintPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_new_task(self): pipe_text2img = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_text2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=None) assert pipe_inpaint.__class__.__name__ == "StableDiffusionInpaintPipeline" assert "controlnet" not in pipe_inpaint.components # testing `from_pipe` for text2img controlnet ## 1. from a different controlnet pipe, without controlnet argument pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components ## 2. from a different controlnet pipe, with controlnet argument pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components ## 3. from same controlnet pipeline class, with a different controlnet component pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components # testing from_pipe for inpainting ## 1. from a different controlnet pipeline class pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components ## from a different controlnet pipe, with a different controlnet pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components ## from same controlnet pipe, with a different controlnet pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_inpaint, controlnet=controlnet) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components # testing from_pipe from img2img controlnet ## from a different controlnet pipe, without controlnet argument pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components # from a different controlnet pipe, with a different controlnet component pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components # from same controlnet pipeline class, with a different controlnet pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components @slow class AutoPipelineIntegrationTest(unittest.TestCase): def test_pipe_auto(self): for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): # test txt2img pipe_txt2img = AutoPipelineForText2Image.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) if "kandinsky" not in model_name: pipe_to = AutoPipelineForInpainting.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_txt2img, pipe_to gc.collect() # test img2img pipe_img2img = AutoPipelineForImage2Image.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) if "kandinsky" not in model_name: pipe_to = AutoPipelineForInpainting.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_img2img, pipe_to gc.collect() # test inpaint if "kandinsky" not in model_name: pipe_inpaint = AutoPipelineForInpainting.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForInpainting.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_inpaint, pipe_to gc.collect() def test_from_pipe_consistent(self): for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): if model_name in ["kandinsky", "kandinsky22"]: auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image] else: auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image, AutoPipelineForInpainting] # test from_pretrained for pipe_from_class in auto_pipes: pipe_from = pipe_from_class.from_pretrained(model_repo, variant="fp16", torch_dtype=torch.float16) pipe_from_config = dict(pipe_from.config) for pipe_to_class in auto_pipes: pipe_to = pipe_to_class.from_pipe(pipe_from) self.assertEqual(dict(pipe_to.config), pipe_from_config) del pipe_from, pipe_to gc.collect() def test_controlnet(self): # test from_pretrained model_repo = "runwayml/stable-diffusion-v1-5" controlnet_repo = "lllyasviel/sd-controlnet-canny" controlnet = ControlNetModel.from_pretrained(controlnet_repo, torch_dtype=torch.float16) pipe_txt2img = AutoPipelineForText2Image.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) pipe_img2img = AutoPipelineForImage2Image.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) pipe_inpaint = AutoPipelineForInpainting.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) # test from_pipe for pipe_from in [pipe_txt2img, pipe_img2img, pipe_inpaint]: pipe_to = AutoPipelineForText2Image.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_txt2img.config)) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_img2img.config)) pipe_to = AutoPipelineForInpainting.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_inpaint.config))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_flax.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class DownloadTests(unittest.TestCase): def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _ = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname, os.listdir(tmpdirname)[0], "snapshots"))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin") for f in files) @slow @require_flax class FlaxPipelineTests(unittest.TestCase): def test_dummy_all_tpus(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 4 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 4.1514745) < 1e-3 assert np.abs(np.abs(images, dtype=np.float32).sum() - 49947.875) < 5e-1 images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(images_pil) == num_samples def test_stable_diffusion_v1_4(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.05652401)) < 1e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2383808.2)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_with_safety(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16 ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_ddim(self): scheduler = FlaxDDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, steps_offset=1, ) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, scheduler=scheduler, safety_checker=None, ) scheduler_state = scheduler.create_state() params["scheduler"] = scheduler_state prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.045043945)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2347693.5)) < 5e-1 def test_jax_memory_efficient_attention(self): prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) num_samples = jax.device_count() prompt = num_samples * [prompt] prng_seed = jax.random.split(jax.random.PRNGKey(0), num_samples) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) slice = images[2, 0, 256, 10:17, 1] # With memory efficient attention pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, use_memory_efficient_attention=True, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images_eff = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) slice_eff = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false test_xformers_attention = False def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # prior components torch.manual_seed(0) prior_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) prior_text_encoder = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) prior = PriorTransformer( num_attention_heads=2, attention_head_dim=12, embedding_dim=embedder_projection_dim, num_layers=1, ) torch.manual_seed(0) prior_scheduler = DDPMScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=5.0, beta_schedule="squaredcos_cap_v2", ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @nightly @require_torch_gpu class StableUnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe("anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( "anime turtle", prior_num_inference_steps=2, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImg2ImgPipeline, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # image encoding components feature_extractor = CLIPImageProcessor(crop_size=32, size=32) torch.manual_seed(0) image_encoder = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def get_dummy_inputs(self, device, seed=0, pil_image=True): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if pil_image: input_image = input_image * 0.5 + 0.5 input_image = input_image.clamp(0, 1) input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy() input_image = DiffusionPipeline.numpy_to_pil(input_image)[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def test_image_embeds_none(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableUnCLIPImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"image_embeds": None}) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False) @nightly @require_torch_gpu class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip_l_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_h_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_img2img_pipeline_with_sequential_cpu_offloading(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( input_image, "anime turtle", num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py
import gc import random import tempfile import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection, ) import diffusers from diffusers import ( AutoencoderKLTemporalDecoder, EulerDiscreteScheduler, StableVideoDiffusionPipeline, UNetSpatioTemporalConditionModel, ) from diffusers.utils import is_accelerate_available, is_accelerate_version, load_image, logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, disable_full_determinism, enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableVideoDiffusionPipeline params = frozenset(["image"]) batch_params = frozenset(["image", "generator"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNetSpatioTemporalConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=( "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", ), up_block_types=("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal"), cross_attention_dim=32, num_attention_heads=8, projection_class_embeddings_input_dim=96, addition_time_embed_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", interpolation_type="linear", num_train_timesteps=1000, prediction_type="v_prediction", sigma_max=700.0, sigma_min=0.002, steps_offset=1, timestep_spacing="leading", timestep_type="continuous", trained_betas=None, use_karras_sigmas=True, ) torch.manual_seed(0) vae = AutoencoderKLTemporalDecoder( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=32, projection_dim=32, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) image_encoder = CLIPVisionModelWithProjection(config) torch.manual_seed(0) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "image_encoder": image_encoder, "scheduler": scheduler, "vae": vae, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(0)).to(device) inputs = { "generator": generator, "image": image, "num_inference_steps": 2, "output_type": "pt", "min_guidance_scale": 1.0, "max_guidance_scale": 2.5, "num_frames": 2, "height": 32, "width": 32, } return inputs @unittest.skip("Deprecated functionality") def test_attention_slicing_forward_pass(self): pass @unittest.skip("Batched inference works and outputs look correct, but the test is failing") def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = torch.Generator("cpu").manual_seed(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) batched_inputs["generator"] = [torch.Generator("cpu").manual_seed(0) for i in range(batch_size)] batched_inputs["image"] = torch.cat([inputs["image"]] * batch_size, dim=0) output = pipe(**inputs).frames output_batch = pipe(**batched_inputs).frames assert len(output_batch) == batch_size max_diff = np.abs(to_np(output_batch[0]) - to_np(output[0])).max() assert max_diff < expected_max_diff @unittest.skip("Test is similar to test_inference_batch_single_identical") def test_inference_batch_consistent(self): pass def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" output = pipe(**self.get_dummy_inputs(generator_device)).frames[0] output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skip("Test is currently failing") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) for component in pipe_fp16.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] fp16_inputs = self.get_dummy_inputs(torch_device) output_fp16 = pipe_fp16(**fp16_inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output = pipe(**inputs).frames[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_save_load_local(self, expected_max_difference=9e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu")).frames[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda")).frames[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(torch_dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): disable_full_determinism() expected_max_diff = 9e-4 if not self.test_xformers_attention: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") enable_full_determinism() @slow @require_torch_gpu class StableVideoDiffusionPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_sd_video(self): pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid", variant="fp16", torch_dtype=torch.float16, ) pipe = pipe.to(torch_device) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" ) generator = torch.Generator("cpu").manual_seed(0) num_frames = 3 output = pipe( image=image, num_frames=num_frames, generator=generator, num_inference_steps=3, output_type="np", ) image = output.frames[0] assert image.shape == (num_frames, 576, 1024, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.8592, 0.8645, 0.8499, 0.8722, 0.8769, 0.8421, 0.8557, 0.8528, 0.8285]) assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import ( DDIMScheduler, DDPMScheduler, KandinskyImg2ImgPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_tokenizer(self): tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, ) text_encoder = MultilingualCLIP(config) text_encoder = text_encoder.eval() return text_encoder @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyImg2ImgPipeline params = ["prompt", "image_embeds", "negative_image_embeds", "image"] batch_params = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) @slow @require_torch_gpu class KandinskyImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) prompt = "A red cartoon frog, 4k" pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( prompt, image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) @nightly @require_torch_gpu class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_img2img_ddpm(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_ddpm_frog.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/frog.png" ) prompt = "A red cartoon frog, 4k" pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) scheduler = DDPMScheduler.from_pretrained("kandinsky-community/kandinsky-2-1", subfolder="ddpm_scheduler") pipeline = KandinskyImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1", scheduler=scheduler, torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( prompt, image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_tokenizer(self): tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, ) text_encoder = MultilingualCLIP(config) text_encoder = text_encoder.eval() return text_encoder @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyPipeline params = [ "prompt", "image_embeds", "negative_image_embeds", ] batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Dummies() return dummy.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummy = Dummies() return dummy.get_dummy_inputs(device=device, seed=seed) def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 @slow @require_torch_gpu class KandinskyPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_text2img_cat_fp16.npy" ) pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( prompt, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyCombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyImg2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyInpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyPriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Dummies() return dummy.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummy = Dummies() return dummy.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_tokenizer(self): tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, ) text_encoder = MultilingualCLIP(config) text_encoder = text_encoder.eval() return text_encoder @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask mask = np.zeros((64, 64), dtype=np.float32) mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyInpaintPipeline params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] batch_params = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_inpaint(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @nightly @require_torch_gpu class KandinskyInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_inpaint(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) mask[:250, 250:-250] = 1 prompt = "a hat" pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( prompt, image=init_image, mask_image=mask, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py
# coding=utf-8 # Copyright 2023 Harutatsu Akiyama, Jinbin Bai, and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetInpaintPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset(IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"mask_image", "control_image"})) image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, } return components def get_dummy_inputs(self, device, seed=0, img_res=64): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) controlnet_embedder_scale_factor = 2 control_image = ( floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ) .to(device) .cpu() ) control_image = control_image.cpu().permute(0, 2, 3, 1)[0] # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image control_image = 255 * control_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("L").resize((img_res, img_res)) control_image = Image.fromarray(np.uint8(control_image)).convert("RGB").resize((img_res, img_res)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": init_image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.5381963, 0.4836803, 0.45821992, 0.5577731, 0.51210403, 0.4794795, 0.59282357, 0.5647199, 0.43100584] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) from diffusers.models.unet_2d_blocks import UNetMidBlock2D from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, slow, torch_device from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.7330834, 0.590667, 0.5667336, 0.6029023, 0.5679491, 0.5968194, 0.4032986, 0.47612396, 0.5089609] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 def test_controlnet_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.7799, 0.614, 0.6162, 0.7082, 0.6662, 0.5833, 0.4148, 0.5182, 0.4866]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionXLMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): return self._test_save_load_optional_components() class StableDiffusionXLMultiControlNetOneModelPipelineFastTests( PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe( **inputs, control_guidance_start=[0.1], control_guidance_end=[0.2], )[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() def test_negative_conditions(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slice_without_neg_cond = image[0, -3:, -3:, -1] image = pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images image_slice_with_neg_cond = image[0, -3:, -3:, -1] self.assertTrue(np.abs(image_slice_without_neg_cond - image_slice_with_neg_cond).max() > 1e-2) @slow @require_torch_gpu class ControlNetSDXLPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4185, 0.4127, 0.4089, 0.4046, 0.4115, 0.4096, 0.4081, 0.4112, 0.3913]) assert np.allclose(original_image, expected_image, atol=1e-04) def test_depth(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (512, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4399, 0.5112, 0.5478, 0.4314, 0.472, 0.4823, 0.4647, 0.4957, 0.4853]) assert np.allclose(original_image, expected_image, atol=1e-04) class StableDiffusionSSD1BControlNetPipelineFastTests(StableDiffusionXLControlNetPipelineFastTests): def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.6831671, 0.5702532, 0.5459845, 0.6299793, 0.58563006, 0.6033695, 0.4493941, 0.46132287, 0.5035841] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 def test_controlnet_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6850, 0.5135, 0.5545, 0.7033, 0.6617, 0.5971, 0.4165, 0.5480, 0.5070]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_conditioning_channels(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), mid_block_type="UNetMidBlock2D", # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=None, ) controlnet = ControlNetModel.from_unet(unet, conditioning_channels=4) assert type(controlnet.mid_block) == UNetMidBlock2D assert controlnet.conditioning_channels == 4 def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), mid_block_type="UNetMidBlock2D", # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), mid_block_type="UNetMidBlock2D", # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from transformers.models.clip.configuration_clip import CLIPTextConfig from diffusers import ( AutoencoderKL, BlipDiffusionControlNetPipeline, ControlNetModel, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism from src.diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from src.diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from src.diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class BlipDiffusionControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = BlipDiffusionControlNetPipeline params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", "condtioning_image", ] batch_params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", "condtioning_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "neg_prompt", "guidance_scale", "prompt_strength", "prompt_reps", ] def get_dummy_components(self): torch.manual_seed(0) text_encoder_config = CLIPTextConfig( vocab_size=1000, hidden_size=16, intermediate_size=16, projection_dim=16, num_hidden_layers=1, num_attention_heads=1, max_position_embeddings=77, ) text_encoder = ContextCLIPTextModel(text_encoder_config) vae = AutoencoderKL( in_channels=4, out_channels=4, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), block_out_channels=(32,), layers_per_block=1, act_fn="silu", latent_channels=4, norm_num_groups=16, sample_size=16, ) blip_vision_config = { "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "image_size": 224, "patch_size": 14, "hidden_act": "quick_gelu", } blip_qformer_config = { "vocab_size": 1000, "hidden_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "intermediate_size": 16, "max_position_embeddings": 512, "cross_attention_frequency": 1, "encoder_hidden_size": 16, } qformer_config = Blip2Config( vision_config=blip_vision_config, qformer_config=blip_qformer_config, num_query_tokens=16, tokenizer="hf-internal-testing/tiny-random-bert", ) qformer = Blip2QFormerModel(qformer_config) unet = UNet2DConditionModel( block_out_channels=(4, 16), layers_per_block=1, norm_num_groups=4, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=16, ) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) controlnet = ControlNetModel( block_out_channels=(4, 16), layers_per_block=1, in_channels=4, norm_num_groups=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=16, conditioning_embedding_out_channels=(8, 16), ) vae.eval() qformer.eval() text_encoder.eval() image_processor = BlipImageProcessor() components = { "text_encoder": text_encoder, "vae": vae, "qformer": qformer, "unet": unet, "tokenizer": tokenizer, "scheduler": scheduler, "controlnet": controlnet, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): np.random.seed(seed) reference_image = np.random.rand(32, 32, 3) * 255 reference_image = Image.fromarray(reference_image.astype("uint8")).convert("RGBA") cond_image = np.random.rand(32, 32, 3) * 255 cond_image = Image.fromarray(cond_image.astype("uint8")).convert("RGBA") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "swimming underwater", "generator": generator, "reference_image": reference_image, "condtioning_image": cond_image, "source_subject_category": "dog", "target_subject_category": "dog", "height": 32, "width": 32, "guidance_scale": 7.5, "num_inference_steps": 2, "output_type": "np", } return inputs def test_blipdiffusion_controlnet(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) image = pipe(**self.get_dummy_inputs(device))[0] image_slice = image[0, -3:, -3:, 0] assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7953, 0.7136, 0.6597, 0.4779, 0.7389, 0.4111, 0.5826, 0.4150, 0.8422]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLImg2ImgFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, skip_first_text_encoder=False): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, } return components def get_dummy_inputs(self, device, seed=0): controlnet_embedder_scale_factor = 2 image = floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": image, } return inputs def test_stable_diffusion_xl_controlnet_img2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_controlnet_img2img_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] assert output.images.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class StableDiffusionMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "evil space-punk bird" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) output = pipe( prompt, image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=50, strength=0.6, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetImg2ImgPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" output = pipe( prompt, image=image, control_image=control_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_flax_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxControlNetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_canny(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "bird" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) canny_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 def test_pose(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "Chef in the kitchen" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) pose_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) processed_image = pipe.prepare_image_inputs([pose_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily based on: import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset({"control_image"}) # skip `image` and `mask` for now, only test for control_image image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components class MultiControlNetInpaintPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetInpaintPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) prompt = "pitch black hole" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe( prompt, image=image, mask_image=mask_image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=3, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/inpaint.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_inpaint(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(33) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" ) init_image = init_image.resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" ) mask_image = mask_image.resize((512, 512)) prompt = "a handsome man with ray-ban sunglasses" def make_inpaint_condition(image, image_mask): image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" image[image_mask > 0.5] = -1.0 # set as masked pixel image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) image = torch.from_numpy(image) return image control_image = make_inpaint_condition(init_image, mask_image) output = pipe( prompt, image=init_image, mask_image=mask_image, control_image=control_image, guidance_scale=9.0, eta=1.0, generator=generator, num_inference_steps=20, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.npy" ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 1e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetInpaintPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" output = pipe( prompt, image=image, control_image=control_image, mask_image=mask_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, EulerDiscreteScheduler, LCMScheduler, StableDiffusionControlNetPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, load_numpy, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.to("cuda") pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet.to(memory_format=torch.channels_last) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe(prompt, image, num_inference_steps=10, generator=generator, output_type="np") image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy" ) expected_image = np.resize(expected_image, (512, 512, 3)) assert np.abs(expected_image - image).max() < 1.0 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_controlnet_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_controlnet_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass class StableDiffusionMultiControlNetOneModelPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe( **inputs, control_guidance_start=[0.1], control_guidance_end=[0.2], )[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_depth(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth_out.npy" ) assert np.abs(expected_image - image).max() < 8e-1 def test_hed(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "oil painting of handsome old man, masterpiece" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_mlsd(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "room" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_normal(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "cute toy" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_openpose(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Chef in the kitchen" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/chef_pose_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_scribble(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "bag" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (640, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_seg(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) _ = pipe( prompt, image, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 4 * 10**9 def test_canny_guess_mode(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.2724, 0.2846, 0.2724, 0.3843, 0.3682, 0.2736, 0.4675, 0.3862, 0.2887]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_canny_guess_mode_euler(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @require_python39_or_higher @require_torch_2 def test_stable_diffusion_compile(self): run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None) def test_v11_shuffle_global_pool_conditions(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "New York" image = load_image( "https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=7.0, ) image = output.images[0] assert image.shape == (512, 640, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1338, 0.1597, 0.1202, 0.1687, 0.1377, 0.1017, 0.2070, 0.1574, 0.1348]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3 @slow @require_torch_gpu class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pose_and_canny(self): controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny] ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird and Chef" image_canny = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) image_pose = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, [image_pose, image_canny], generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose_canny_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/repaint/test_repaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = RePaintPipeline params = IMAGE_INPAINTING_PARAMS - {"width", "height", "guidance_scale"} required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = IMAGE_INPAINTING_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) torch.manual_seed(0) unet = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) scheduler = RePaintScheduler() components = {"unet": unet, "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = np.random.RandomState(seed).standard_normal((1, 3, 32, 32)) image = torch.from_numpy(image).to(device=device, dtype=torch.float32) mask = (image > 0).to(device=device, dtype=torch.float32) inputs = { "image": image, "mask_image": mask, "generator": generator, "num_inference_steps": 5, "eta": 0.0, "jump_length": 2, "jump_n_sample": 2, "output_type": "numpy", } return inputs def test_repaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = RePaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([1.0000, 0.5426, 0.5497, 0.2200, 1.0000, 1.0000, 0.5623, 1.0000, 0.6274]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_save_load_local(self): return super().test_save_load_local() # RePaint can hardly be made deterministic since the scheduler is currently always # nondeterministic @unittest.skip("non-deterministic pipeline") def test_inference_batch_single_identical(self): return super().test_inference_batch_single_identical() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() @nightly @require_torch_gpu class RepaintPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_celebahq(self): original_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "repaint/celeba_hq_256.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "repaint/celeba_hq_256_result.npy" ) model_id = "google/ddpm-ema-celebahq-256" unet = UNet2DModel.from_pretrained(model_id) scheduler = RePaintScheduler.from_pretrained(model_id) repaint = RePaintPipeline(unet=unet, scheduler=scheduler).to(torch_device) repaint.set_progress_bar_config(disable=None) repaint.enable_attention_slicing() generator = torch.manual_seed(0) output = repaint( original_image, mask_image, num_inference_steps=250, eta=0.0, jump_length=10, jump_n_sample=10, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).mean() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py
import gc import inspect import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, LatentConsistencyModelImg2ImgPipeline, LCMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class LatentConsistencyModelImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = LatentConsistencyModelImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "negative_prompt", "negative_prompt_embeds"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents", "negative_prompt"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, time_cond_proj_dim=32, ) scheduler = LCMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "requires_safety_checker": False, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_lcm_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.4388, 0.3717, 0.2202, 0.7213, 0.6370, 0.3664, 0.5815, 0.6080, 0.4977]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.4150, 0.3719, 0.2479, 0.6333, 0.6024, 0.3778, 0.5036, 0.5420, 0.4678]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3994, 0.3471, 0.2540, 0.7030, 0.6193, 0.3645, 0.5777, 0.5850, 0.4965]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) # override default test because the final latent variable is "denoised" instead of "latents" def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class LatentConsistencyModelImg2ImgPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) init_image = init_image.resize((512, 512)) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", "image": init_image, } return inputs def test_lcm_onestep(self): pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1950, 0.1961, 0.2308, 0.1786, 0.1837, 0.2320, 0.1898, 0.1885, 0.2309]) assert np.abs(image_slice - expected_slice).max() < 1e-3 def test_lcm_multistep(self): pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.3756, 0.3816, 0.3767, 0.3718, 0.3739, 0.3735, 0.3863, 0.3803, 0.3563]) assert np.abs(image_slice - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py
import gc import inspect import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, LatentConsistencyModelPipeline, LCMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class LatentConsistencyModelPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = LatentConsistencyModelPipeline params = TEXT_TO_IMAGE_PARAMS - {"negative_prompt", "negative_prompt_embeds"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {"negative_prompt"} image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, time_cond_proj_dim=32, ) scheduler = LCMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "requires_safety_checker": False, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_lcm_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1441, 0.5304, 0.5452, 0.1361, 0.4011, 0.4370, 0.5326, 0.3492, 0.3637]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) # skip because lcm pipeline apply cfg differently def test_callback_cfg(self): pass # override default test because the final latent variable is "denoised" instead of "latents" def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class LatentConsistencyModelPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_lcm_onestep(self): pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1025, 0.0911, 0.0984, 0.0981, 0.0901, 0.0918, 0.1055, 0.0940, 0.0730]) assert np.abs(image_slice - expected_slice).max() < 1e-3 def test_lcm_multistep(self): pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.01855, 0.01855, 0.01489, 0.01392, 0.01782, 0.01465, 0.01831, 0.02539, 0.0]) assert np.abs(image_slice - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py
import gc import random import traceback import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, GPT2Tokenizer, ) from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch_2, require_torch_gpu, run_test_in_subprocess, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_unidiffuser_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.set_progress_bar_config(disable=None) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice - expected_slice).max() < 1e-1 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class UniDiffuserPipelineFastTests( PipelineTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = UniDiffuserPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS # vae_latents, not latents, is the argument that corresponds to VAE latent inputs image_latents_params = frozenset(["vae_latents"]) def get_dummy_components(self): unet = UniDiffuserModel.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="unet", ) scheduler = DPMSolverMultistepScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", solver_order=3, ) vae = AutoencoderKL.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="vae", ) text_encoder = CLIPTextModel.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_encoder", ) clip_tokenizer = CLIPTokenizer.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="clip_tokenizer", ) image_encoder = CLIPVisionModelWithProjection.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="image_encoder", ) # From the Stable Diffusion Image Variation pipeline tests clip_image_processor = CLIPImageProcessor(crop_size=32, size=32) # image_processor = CLIPImageProcessor.from_pretrained("hf-internal-testing/tiny-random-clip") text_tokenizer = GPT2Tokenizer.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_tokenizer", ) text_decoder = UniDiffuserTextDecoder.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_decoder", ) components = { "vae": vae, "text_encoder": text_encoder, "image_encoder": image_encoder, "clip_image_processor": clip_image_processor, "clip_tokenizer": clip_tokenizer, "text_decoder": text_decoder, "text_tokenizer": text_tokenizer, "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) generator = torch.Generator(device=device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 32), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 16, 16), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 32), generator=generator, device=device, dtype=torch.float32) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def get_dummy_inputs_with_latents(self, device, seed=0): # image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) # image = image.cpu().permute(0, 2, 3, 1)[0] # image = Image.fromarray(np.uint8(image)).convert("RGB") image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg", ) image = image.resize((32, 32)) latents = self.get_fixed_latents(device, seed=seed) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "prompt_latents": latents.get("prompt_latents"), "vae_latents": latents.get("vae_latents"), "clip_latents": latents.get("clip_latents"), } return inputs def test_unidiffuser_default_joint_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_joint_no_cfg_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] # Set guidance scale to 1.0 to turn off CFG inputs["guidance_scale"] = 1.0 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_text2img_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_image_0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img' unidiffuser_pipe.set_image_mode() assert unidiffuser_pipe.mode == "img" inputs = self.get_dummy_inputs(device) # Delete prompt and image for unconditional ("marginal") text generation. del inputs["prompt"] del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5760, 0.6270, 0.6571, 0.4966, 0.4638, 0.5663, 0.5254, 0.5068, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_text_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img' unidiffuser_pipe.set_text_mode() assert unidiffuser_pipe.mode == "text" inputs = self.get_dummy_inputs(device) # Delete prompt and image for unconditional ("marginal") text generation. del inputs["prompt"] del inputs["image"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_img2text_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_joint_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_text2img_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_img2text_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_text2img_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs(device) # Delete image for text-conditioned image generation del inputs["image"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 image = unidiffuser_pipe(**inputs).images assert image.shape == (2, 32, 32, 3) def test_unidiffuser_img2text_multiple_prompts(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs(device) # Delete text for image-conditioned text generation del inputs["prompt"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 text = unidiffuser_pipe(**inputs).text assert len(text) == 3 def test_unidiffuser_text2img_multiple_images_with_latents(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 image = unidiffuser_pipe(**inputs).images assert image.shape == (2, 32, 32, 3) def test_unidiffuser_img2text_multiple_prompts_with_latents(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 text = unidiffuser_pipe(**inputs).text assert len(text) == 3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=2e-4) @require_torch_gpu def test_unidiffuser_default_joint_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5049, 0.5498, 0.5854, 0.3052, 0.4460, 0.6489, 0.5122, 0.4810, 0.6138]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = '" This This' assert text[0][: len(expected_text_prefix)] == expected_text_prefix @require_torch_gpu def test_unidiffuser_default_text2img_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5054, 0.5498, 0.5854, 0.3052, 0.4458, 0.6489, 0.5122, 0.4810, 0.6138]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 @require_torch_gpu def test_unidiffuser_default_img2text_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] inputs["data_type"] = 1 text = unidiffuser_pipe(**inputs).text expected_text_prefix = '" This This' assert text[0][: len(expected_text_prefix)] == expected_text_prefix @nightly @require_torch_gpu class UniDiffuserPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, seed=0, generate_latents=False): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" ) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 8.0, "output_type": "numpy", } if generate_latents: latents = self.get_fixed_latents(device, seed=seed) for latent_name, latent_tensor in latents.items(): inputs[latent_name] = latent_tensor return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) latent_device = torch.device("cpu") generator = torch.Generator(device=latent_device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32) # Move latents onto desired device. prompt_latents = prompt_latents.to(device) vae_latents = vae_latents.to(device) clip_latents = clip_latents.to(device) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def test_unidiffuser_default_joint_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # inputs = self.get_dummy_inputs(device) inputs = self.get_inputs(device=torch_device, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-1 expected_text_prefix = "a living room" assert text[0][: len(expected_text_prefix)] == expected_text_prefix def test_unidiffuser_default_text2img_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["image"] sample = pipe(**inputs) image = sample.images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_unidiffuser_default_img2text_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["prompt"] sample = pipe(**inputs) text = sample.text expected_text_prefix = "An astronaut" assert text[0][: len(expected_text_prefix)] == expected_text_prefix @unittest.skip(reason="Skip torch.compile test to speed up the slow test suite.") @require_torch_2 def test_unidiffuser_compile(self, seed=0): inputs = self.get_inputs(torch_device, seed=seed, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_unidiffuser_compile, inputs=inputs) @nightly @require_torch_gpu class UniDiffuserPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, seed=0, generate_latents=False): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" ) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 8.0, "output_type": "numpy", } if generate_latents: latents = self.get_fixed_latents(device, seed=seed) for latent_name, latent_tensor in latents.items(): inputs[latent_name] = latent_tensor return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) latent_device = torch.device("cpu") generator = torch.Generator(device=latent_device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32) # Move latents onto desired device. prompt_latents = prompt_latents.to(device) vae_latents = vae_latents.to(device) clip_latents = clip_latents.to(device) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def test_unidiffuser_default_joint_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # inputs = self.get_dummy_inputs(device) inputs = self.get_inputs(device=torch_device, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 2e-1 expected_text_prefix = "a living room" assert text[0][: len(expected_text_prefix)] == expected_text_prefix def test_unidiffuser_default_text2img_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["image"] sample = pipe(**inputs) image = sample.images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_unidiffuser_default_img2text_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["prompt"] sample = pipe(**inputs) text = sample.text expected_text_prefix = "An astronaut" assert text[0][: len(expected_text_prefix)] == expected_text_prefix
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/musicldm/test_musicldm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, MusicLDMPipeline, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class MusicLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = MusicLDMPipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=32 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": feature_extractor, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_musicldm_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = musicldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0020, -0.0035, -0.0019, -0.0037, -0.0020, -0.0038, -0.0019] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = musicldm_pipe.tokenizer( prompt, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = musicldm_pipe.text_encoder.get_text_features(text_inputs) inputs["prompt_embeds"] = prompt_embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = musicldm_pipe.tokenizer( p, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) text_embeds = musicldm_pipe.text_encoder.get_text_features( text_inputs, ) embeds.append(text_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = musicldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0019, -0.0035, -0.0018, -0.0037, -0.0021, -0.0038, -0.0018] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = musicldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = musicldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = musicldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = musicldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_musicldm_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = musicldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = musicldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = musicldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_musicldm_vocoder_model_in_dim(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = musicldm_pipe.vocoder.config config.model_in_dim *= 2 musicldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(torch_dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) @nightly @require_torch_gpu class MusicLDMPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_musicldm(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8680:8690] expected_slice = np.array( [-0.1042, -0.1068, -0.1235, -0.1387, -0.1428, -0.136, -0.1213, -0.1097, -0.0967, -0.0945] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_musicldm_lms(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe.scheduler = LMSDiscreteScheduler.from_config(musicldm_pipe.scheduler.config) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[58020:58030] expected_slice = np.array([0.3592, 0.3477, 0.4084, 0.4665, 0.5048, 0.5891, 0.6461, 0.5579, 0.4595, 0.4403]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, TextToVideoSDPipeline, UNet3DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = TextToVideoSDPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=4, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(8,), in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=4, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = TextToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) expected_slice = np.array([192.0, 44.0, 157.0, 140.0, 108.0, 104.0, 123.0, 144.0, 129.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", reason="Feature isn't heavily used. Test in CUDA environment only.") def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @slow @skip_mps @require_torch_gpu class TextToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): expected_video = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames video = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2 def test_two_step_model_with_freeu(self): expected_video = [] pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames video = video_frames.cpu().numpy() video = video[0, 0, -3:, -3:, -1].flatten() expected_video = [-0.3102, -0.2477, -0.1772, -0.648, -0.6176, -0.5484, -0.0217, -0.056, -0.0177] assert np.abs(expected_video - video).mean() < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNet3DConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, is_flaky, nightly, numpy_cosine_similarity_distance, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = VideoToVideoSDPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} test_attention_slicing = False # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=32, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=True, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[ 8, ], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): # 3 frames video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = VideoToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) expected_slice = np.array([162.0, 136.0, 132.0, 140.0, 139.0, 137.0, 169.0, 134.0, 132.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @is_flaky() def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=0.001) @is_flaky() def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent() @is_flaky() def test_save_load_local(self): super().test_save_load_local() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @nightly @skip_mps class VideoToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() # 10 frames generator = torch.Generator(device="cpu").manual_seed(0) video = torch.randn((1, 10, 3, 320, 576), generator=generator) prompt = "Spiderman is surfing" video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="pt").frames expected_array = np.array([-0.9770508, -0.8027344, -0.62646484, -0.8334961, -0.7573242]) output_array = video_frames.cpu().numpy()[0, 0, 0, 0, -5:] assert numpy_cosine_similarity_distance(expected_array, output_array) < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import DDIMScheduler, TextToVideoZeroPipeline from diffusers.utils.testing_utils import load_pt, nightly, require_torch_gpu from ..test_pipelines_common import assert_mean_pixel_difference @nightly @require_torch_gpu class TextToVideoZeroPipelineSlowTests(unittest.TestCase): def test_full_model(self): model_id = "runwayml/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(0) prompt = "A bear is playing a guitar on Times Square" result = pipe(prompt=prompt, generator=generator).images expected_result = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text-to-video/A bear is playing a guitar on Times Square.pt" ) assert_mean_pixel_difference(result, expected_result)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import inspect import io import re import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoZeroSDXLPipeline, UNet2DConditionModel from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class TextToVideoZeroSDXLPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = TextToVideoZeroSDXLPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS generator_device = "cpu" def get_dummy_components(self, seed=0): torch.manual_seed(seed) unet = UNet2DConditionModel( block_out_channels=(2, 4), layers_per_block=2, sample_size=2, norm_num_groups=2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = DDIMScheduler( num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", trained_betas=None, clip_sample=True, set_alpha_to_one=True, steps_offset=0, prediction_type="epsilon", thresholding=False, dynamic_thresholding_ratio=0.995, clip_sample_range=1.0, sample_max_value=1.0, timestep_spacing="leading", rescale_betas_zero_snr=False, ) torch.manual_seed(seed) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(seed) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A panda dancing in Antarctica", "generator": generator, "num_inference_steps": 5, "t0": 1, "t1": 3, "height": 64, "width": 64, "video_length": 3, "output_type": "np", } return inputs def get_generator(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) return generator def test_text_to_video_zero_sdxl(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) inputs = self.get_dummy_inputs(self.generator_device) result = pipe(**inputs).images first_frame_slice = result[0, -3:, -3:, -1] last_frame_slice = result[-1, -3:, -3:, 0] expected_slice1 = np.array([0.48, 0.58, 0.53, 0.59, 0.50, 0.44, 0.60, 0.65, 0.52]) expected_slice2 = np.array([0.66, 0.49, 0.40, 0.70, 0.47, 0.51, 0.73, 0.65, 0.52]) assert np.abs(first_frame_slice.flatten() - expected_slice1).max() < 1e-2 assert np.abs(last_frame_slice.flatten() - expected_slice2).max() < 1e-2 @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_attention_slicing_forward_pass(self): pass def test_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(self.generator_device))[0] output_tuple = pipe(**self.get_dummy_inputs(self.generator_device), return_dict=False)[0] max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) # # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(self.generator_device) output = pipe(**inputs)[0] fp16_inputs = self.get_dummy_inputs(self.generator_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(self.generator_device) output_fp16 = pipe_fp16(**fp16_inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_inference_batch_single_identical(self): pass @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(self.generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_pipeline_call_signature(self): pass def test_progress_bar(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) inputs = self.get_dummy_inputs(self.generator_device) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(self.generator_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_save_load_local(self): pass @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_save_load_optional_components(self): pass @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_sequential_cpu_offload_forward_pass(self): pass @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_xformers_attention_forwardGenerator_pass(self): pass @nightly @require_torch_gpu class TextToVideoZeroSDXLPipelineSlowTests(unittest.TestCase): def test_full_model(self): model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = self.pipeline_class.from_pretrained( model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.enable_model_cpu_offload() pipe.enable_vae_slicing() pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "A panda dancing in Antarctica" result = pipe(prompt=prompt, generator=generator).images first_frame_slice = result[0, -3:, -3:, -1] last_frame_slice = result[-1, -3:, -3:, 0] expected_slice1 = np.array([0.57, 0.57, 0.57, 0.57, 0.57, 0.56, 0.55, 0.56, 0.56]) expected_slice2 = np.array([0.54, 0.53, 0.53, 0.53, 0.53, 0.52, 0.53, 0.53, 0.53]) assert np.abs(first_frame_slice.flatten() - expected_slice1).max() < 1e-2 assert np.abs(last_frame_slice.flatten() - expected_slice2).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/animatediff/test_animatediff.py
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffPipeline, AutoencoderKL, DDIMScheduler, MotionAdapter, UNet2DConditionModel, UNetMotionModel, ) from diffusers.utils import logging from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AnimateDiffPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") motion_adapter = MotionAdapter( block_out_channels=(32, 64), motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "pt", } return inputs def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(torch_dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device) pipe(**inputs) @slow @require_torch_gpu class AnimateDiffPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_animatediff(self): adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) pipe = pipe.to(torch_device) pipe.scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", steps_offset=1, clip_sample=False, ) pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "night, b&w photo of old house, post apocalypse, forest, storm weather, wind, rocks, 8k uhd, dslr, soft lighting, high quality, film grain" negative_prompt = "bad quality, worse quality" generator = torch.Generator("cpu").manual_seed(0) output = pipe( prompt, negative_prompt=negative_prompt, num_frames=16, generator=generator, guidance_scale=7.5, num_inference_steps=3, output_type="np", ) image = output.frames[0] assert image.shape == (16, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array( [ 0.11357737, 0.11285847, 0.11180121, 0.11084166, 0.11414117, 0.09785956, 0.10742754, 0.10510018, 0.08045256, ] ) assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/ddim/test_ddim.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DDIMPipeline params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "latents", "callback", "callback_steps", } batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) scheduler = DDIMScheduler() components = {"unet": unet, "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 32, 3)) expected_slice = np.array( [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=3e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class DDIMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDIMScheduler() ddim = DDIMPipeline(unet=unet, scheduler=scheduler) ddim.to(torch_device) ddim.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddim(generator=generator, eta=0.0, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_ema_bedroom(self): model_id = "google/ddpm-ema-bedroom-256" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDIMScheduler.from_pretrained(model_id) ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audioldm2/test_audioldm2.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, GPT2Config, GPT2Model, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, T5Config, T5EncoderModel, T5Tokenizer, ) from diffusers import ( AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDM2Pipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = AudioLDM2UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=([None, 16, 32], [None, 16, 32]), ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, projection_dim=16, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, projection_dim=16, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=16 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) text_encoder_2_config = T5Config( vocab_size=32100, d_model=32, d_ff=37, d_kv=8, num_heads=2, num_layers=2, ) text_encoder_2 = T5EncoderModel(text_encoder_2_config) tokenizer_2 = T5Tokenizer.from_pretrained("hf-internal-testing/tiny-random-T5Model", model_max_length=77) torch.manual_seed(0) language_model_config = GPT2Config( n_embd=16, n_head=2, n_layer=2, vocab_size=1000, n_ctx=99, n_positions=99, ) language_model = GPT2Model(language_model_config) language_model.config.max_new_tokens = 8 torch.manual_seed(0) projection_model = AudioLDM2ProjectionModel(text_encoder_dim=16, text_encoder_1_dim=32, langauge_model_dim=16) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "feature_extractor": feature_extractor, "language_model": language_model, "projection_model": projection_model, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_audioldm2_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) inputs["prompt_embeds"] = t5_prompt_embeds inputs["generated_prompt_embeds"] = generated_prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] generated_embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True if len(embeds) == 0 else embeds[0].shape[1], truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) embeds.append(t5_prompt_embeds) generated_embeds.append(generated_prompt_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds inputs["generated_prompt_embeds"], inputs["negative_generated_prompt_embeds"] = generated_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm2_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm2_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) @unittest.skip("Raises a not implemented error in AudioLDM2") def test_xformers_attention_forwardGenerator_pass(self): pass def test_dict_tuple_outputs_equivalent(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_dict_tuple_outputs_equivalent(expected_max_difference=2e-4) def test_inference_batch_single_identical(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model self._test_inference_batch_single_identical(expected_max_diff=2e-4) def test_save_load_local(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_local(expected_max_difference=2e-4) def test_save_load_optional_components(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_optional_components(expected_max_difference=2e-4) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(torch_dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) def test_sequential_cpu_offload_forward_pass(self): pass @nightly class AudioLDM2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm2(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[17275:17285] expected_slice = np.array([0.0791, 0.0666, 0.1158, 0.1227, 0.1171, -0.2880, -0.1940, -0.0283, -0.0126, 0.1127]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_lms(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[31390:31400] expected_slice = np.array( [-0.1318, -0.0577, 0.0446, -0.0573, 0.0659, 0.1074, -0.2600, 0.0080, -0.2190, -0.4301] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_large(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2-large") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8825:8835] expected_slice = np.array( [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from PIL import Image from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorEmb2EmbPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class KandinskyV22PriorEmb2EmbPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorEmb2EmbPipeline params = ["prompt", "image"] batch_params = ["prompt", "image"] required_optional_params = [ "num_images_per_prompt", "strength", "generator", "num_inference_steps", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) inputs = { "prompt": "horse", "image": init_image, "strength": 0.5, "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_kandinsky_prior_emb2emb(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [ 0.1071284, 1.3330271, 0.61260223, -0.6691065, -0.3846852, -1.0303661, 0.22716111, 0.03348901, 0.30040675, -0.24805029, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgPipeline params = ["image_embeds", "negative_image_embeds", "image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", ] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) @slow @require_torch_gpu class KandinskyV22Img2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) prompt = "A red cartoon frog, 4k" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22Img2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyV22ControlnetPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22ControlnetPipeline params = ["image_embeds", "negative_image_embeds", "hint"] batch_params = ["image_embeds", "negative_image_embeds", "hint"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create hint hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_kandinsky_controlnet(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) @nightly @require_torch_gpu class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_controlnet(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) hint = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) hint = torch.from_numpy(np.array(hint)).float() / 255.0 hint = hint.permute(2, 0, 1).unsqueeze(0) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22ControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "A robot, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Pipeline params = [ "image_embeds", "negative_image_embeds", ] batch_params = ["image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] callback_cfg_params = ["image_embds"] test_xformers_attention = False def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) @slow @require_torch_gpu class KandinskyV22PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22Pipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import ( KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22CombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3013, 0.0471, 0.5176, 0.1817, 0.2566, 0.7076, 0.6712, 0.4421, 0.7503]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4353, 0.4710, 0.5128, 0.4806, 0.5054, 0.5348, 0.5224, 0.4603, 0.5025]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_callback_inputs(self): pass def test_callback_cfg(self): pass
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] callback_cfg_params = ["prompt_embeds", "text_encoder_hidden_states", "text_mask"] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) # override default test because no output_type "latent", use "pt" instead def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["num_inference_steps"] = 2 inputs["output_type"] = "pt" output = pipe(**inputs)[0] assert output.abs().sum() == 0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask mask = np.zeros((64, 64), dtype=np.float32) mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintPipeline params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds", "masked_image", "mask_image"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_inpaint(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_model_cpu_offload_forward_pass(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) # override default test because we need to zero out mask too in order to make sure final latent is all zero def test_callback_inputs(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_inpaint(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) mask[:250, 250:-250] = 1 prompt = "a hat" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22InpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, mask_image=mask, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22PriorEmb2EmbPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22ControlnetImg2ImgPipeline params = ["image_embeds", "negative_image_embeds", "image", "hint"] batch_params = ["image_embeds", "negative_image_embeds", "image", "hint"] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create hint hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def test_kandinsky_controlnet_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1.75e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) @nightly @require_torch_gpu class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_controlnet_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) init_image = init_image.resize((512, 512)) hint = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) hint = torch.from_numpy(np.array(hint)).float() / 255.0 hint = hint.permute(2, 0, 1).unsqueeze(0) prompt = "A robot, 4k photo" pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, image=init_image, strength=0.85, generator=generator, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, generator=generator, num_inference_steps=100, height=512, width=512, strength=0.5, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/shap_e/test_shap_e_img2img.py
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import ( floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ShapEImg2ImgPipeline params = ["image"] batch_params = ["image"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 16 @property def time_input_dim(self): return 16 @property def time_embed_dim(self): return self.time_input_dim * 4 @property def renderer_dim(self): return 8 @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=32, projection_dim=self.text_embedder_hidden_size, intermediate_size=24, num_attention_heads=2, num_channels=3, num_hidden_layers=5, patch_size=1, ) model = CLIPVisionModel(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } model = PriorTransformer(**model_kwargs) return model @property def dummy_renderer(self): torch.manual_seed(0) model_kwargs = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } model = ShapERenderer(**model_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder image_processor = self.dummy_image_processor shap_e_renderer = self.dummy_renderer scheduler = HeunDiscreteScheduler( beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=True, clip_sample=True, clip_sample_range=1.0, ) components = { "prior": prior, "image_encoder": image_encoder, "image_processor": image_processor, "shap_e_renderer": shap_e_renderer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": input_image, "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "latent", } return inputs def test_shap_e(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] image_slice = image[-3:, -3:].cpu().numpy() assert image.shape == (32, 16) expected_slice = np.array( [-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( batch_size=2, expected_max_diff=6e-3, ) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_size = 1 num_images_per_prompt = 2 inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) @unittest.skip("Key error is raised with accelerate") def test_sequential_cpu_offload_forward_pass(self): pass @nightly @require_torch_gpu class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_shap_e_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_img2img_out.npy" ) pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) images = pipe( input_image, generator=generator, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(images, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/shap_e/test_shap_e.py
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ShapEPipeline params = ["prompt"] batch_params = ["prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 16 @property def time_input_dim(self): return 16 @property def time_embed_dim(self): return self.time_input_dim * 4 @property def renderer_dim(self): return 8 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } model = PriorTransformer(**model_kwargs) return model @property def dummy_renderer(self): torch.manual_seed(0) model_kwargs = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } model = ShapERenderer(**model_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer shap_e_renderer = self.dummy_renderer scheduler = HeunDiscreteScheduler( beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=True, clip_sample=True, clip_sample_range=1.0, ) components = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "shap_e_renderer": shap_e_renderer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "latent", } return inputs def test_shap_e(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] image = image.cpu().numpy() image_slice = image[-3:, -3:] assert image.shape == (32, 16) expected_slice = np.array([-1.0000, -0.6241, 1.0000, -0.8978, -0.6866, 0.7876, -0.7473, -0.2874, 0.6103]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=6e-3) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_size = 1 num_images_per_prompt = 2 inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) @unittest.skip("Key error is raised with accelerate") def test_sequential_cpu_offload_forward_pass(self): pass @nightly @require_torch_gpu class ShapEPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_shap_e(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) pipe = ShapEPipeline.from_pretrained("openai/shap-e") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) images = pipe( "a shark", generator=generator, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(images, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.semantic_stable_diffusion import SemanticStableDiffusionPipeline as StableDiffusionPipeline from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, nightly, require_torch_gpu, torch_device, ) enable_full_determinism() class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_semantic_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5753, 0.6114, 0.5001, 0.5034, 0.5470, 0.4729, 0.4971, 0.4867, 0.4867]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_semantic_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5122, 0.5712, 0.4825, 0.5053, 0.5646, 0.4769, 0.5179, 0.4894, 0.4994]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_semantic_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_semantic_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" image = sd_pipe([prompt], num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_positive_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photo of a cat" edit = { "editing_prompt": ["sunglasses"], "reverse_editing_direction": [False], "edit_warmup_steps": 10, "edit_guidance_scale": 6, "edit_threshold": 0.95, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 3 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.34673113, 0.38492733, 0.37597352, 0.34086335, 0.35650748, 0.35579205, 0.3384763, 0.34340236, 0.3573271, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.41887826, 0.37728766, 0.30138272, 0.41416335, 0.41664985, 0.36283392, 0.36191246, 0.43364465, 0.43001732, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_negative_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "an image of a crowded boulevard, realistic, 4k" edit = { "editing_prompt": "crowd, crowded, people", "reverse_editing_direction": True, "edit_warmup_steps": 10, "edit_guidance_scale": 8.3, "edit_threshold": 0.9, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 9 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.43497998, 0.91814065, 0.7540739, 0.55580205, 0.8467265, 0.5389691, 0.62574506, 0.58897763, 0.50926757, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.3089719, 0.30500144, 0.29016042, 0.30630964, 0.325687, 0.29419225, 0.2908091, 0.28723598, 0.27696294, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_multi_cond_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a castle next to a river" edit = { "editing_prompt": ["boat on a river, boat", "monet, impression, sunrise"], "reverse_editing_direction": False, "edit_warmup_steps": [15, 18], "edit_guidance_scale": 6, "edit_threshold": [0.9, 0.8], "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 48 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.75163555, 0.76037145, 0.61785, 0.9189673, 0.8627701, 0.85189694, 0.8512813, 0.87012076, 0.8312857, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.73553365, 0.7537271, 0.74341905, 0.66480356, 0.6472925, 0.63039416, 0.64812905, 0.6749717, 0.6517102, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_guidance_fp16(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photo of a cat" edit = { "editing_prompt": ["sunglasses"], "reverse_editing_direction": [False], "edit_warmup_steps": 10, "edit_guidance_scale": 6, "edit_threshold": 0.95, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 3 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.34887695, 0.3876953, 0.375, 0.34423828, 0.3581543, 0.35717773, 0.3383789, 0.34570312, 0.359375, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.42285156, 0.36914062, 0.29077148, 0.42041016, 0.41918945, 0.35498047, 0.3618164, 0.4423828, 0.43115234, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/blipdiffusion/test_blipdiffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from transformers.models.clip.configuration_clip import CLIPTextConfig from diffusers import AutoencoderKL, BlipDiffusionPipeline, PNDMScheduler, UNet2DConditionModel from diffusers.utils.testing_utils import enable_full_determinism from src.diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from src.diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from src.diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class BlipDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = BlipDiffusionPipeline params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", ] batch_params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "neg_prompt", "guidance_scale", "prompt_strength", "prompt_reps", ] def get_dummy_components(self): torch.manual_seed(0) text_encoder_config = CLIPTextConfig( vocab_size=1000, hidden_size=16, intermediate_size=16, projection_dim=16, num_hidden_layers=1, num_attention_heads=1, max_position_embeddings=77, ) text_encoder = ContextCLIPTextModel(text_encoder_config) vae = AutoencoderKL( in_channels=4, out_channels=4, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), block_out_channels=(32,), layers_per_block=1, act_fn="silu", latent_channels=4, norm_num_groups=16, sample_size=16, ) blip_vision_config = { "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "image_size": 224, "patch_size": 14, "hidden_act": "quick_gelu", } blip_qformer_config = { "vocab_size": 1000, "hidden_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "intermediate_size": 16, "max_position_embeddings": 512, "cross_attention_frequency": 1, "encoder_hidden_size": 16, } qformer_config = Blip2Config( vision_config=blip_vision_config, qformer_config=blip_qformer_config, num_query_tokens=16, tokenizer="hf-internal-testing/tiny-random-bert", ) qformer = Blip2QFormerModel(qformer_config) unet = UNet2DConditionModel( block_out_channels=(16, 32), norm_num_groups=16, layers_per_block=1, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=16, ) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) vae.eval() qformer.eval() text_encoder.eval() image_processor = BlipImageProcessor() components = { "text_encoder": text_encoder, "vae": vae, "qformer": qformer, "unet": unet, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): np.random.seed(seed) reference_image = np.random.rand(32, 32, 3) * 255 reference_image = Image.fromarray(reference_image.astype("uint8")).convert("RGBA") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "swimming underwater", "generator": generator, "reference_image": reference_image, "source_subject_category": "dog", "target_subject_category": "dog", "height": 32, "width": 32, "guidance_scale": 7.5, "num_inference_steps": 2, "output_type": "np", } return inputs def test_blipdiffusion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) image = pipe(**self.get_dummy_inputs(device))[0] image_slice = image[0, -3:, -3:, 0] assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7096, 0.5900, 0.6703, 0.4032, 0.7766, 0.3629, 0.5447, 0.4149, 0.8172]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}"
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnetxs/test_controlnetxs.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetXSModel, DDIMScheduler, LCMScheduler, StableDiffusionControlNetXSPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, load_numpy, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) controlnet = ControlNetXSModel.from_pretrained("UmerHA/ConrolNetXS-SD2.1-canny") pipe = StableDiffusionControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, controlnet=controlnet ) pipe.to("cuda") pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet.to(memory_format=torch.channels_last) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe(prompt, image, num_inference_steps=10, generator=generator, output_type="np") image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy" ) expected_image = np.resize(expected_image, (512, 512, 3)) assert np.abs(expected_image - image).max() < 1.0 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ControlNetXSPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetXSPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetXSModel.from_unet( unet=unet, time_embedding_mix=0.95, learn_embedding=True, size_ratio=0.5, conditioning_embedding_out_channels=(16, 32), num_attention_heads=2, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_controlnet_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetXSPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class ControlNetXSPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetXSModel.from_pretrained("UmerHA/ConrolNetXS-SD2.1-canny") pipe = StableDiffusionControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) original_image = image[-3:, -3:, -1].flatten() expected_image = np.array([0.1274, 0.1401, 0.147, 0.1185, 0.1555, 0.1492, 0.1565, 0.1474, 0.1701]) assert np.allclose(original_image, expected_image, atol=1e-04) def test_depth(self): controlnet = ControlNetXSModel.from_pretrained("UmerHA/ConrolNetXS-SD2.1-depth") pipe = StableDiffusionControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) original_image = image[-3:, -3:, -1].flatten() expected_image = np.array([0.1098, 0.1025, 0.1211, 0.1129, 0.1165, 0.1262, 0.1185, 0.1261, 0.1703]) assert np.allclose(original_image, expected_image, atol=1e-04) @require_python39_or_higher @require_torch_2 def test_stable_diffusion_compile(self): run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnetxs/test_controlnetxs_sdxl.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetXSModel, EulerDiscreteScheduler, StableDiffusionXLControlNetXSPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, slow, torch_device from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLControlNetXSPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetXSPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) controlnet = ControlNetXSModel.from_unet( unet, time_embedding_mix=0.95, learn_embedding=True, size_ratio=0.5, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, } return components # copied from test_controlnet_sdxl.py def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, } return inputs # copied from test_controlnet_sdxl.py def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) # copied from test_controlnet_sdxl.py @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) # copied from test_controlnet_sdxl.py def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) # copied from test_controlnet_sdxl.py def test_save_load_optional_components(self): self._test_save_load_optional_components() # copied from test_controlnet_sdxl.py @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 # copied from test_controlnet_sdxl.py def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1.1e-4 @slow @require_torch_gpu class ControlNetSDXLPipelineXSSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetXSModel.from_pretrained("UmerHA/ConrolNetXS-SDXL-canny") pipe = StableDiffusionXLControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4359, 0.4335, 0.4609, 0.4515, 0.4669, 0.4494, 0.452, 0.4493, 0.4382]) assert np.allclose(original_image, expected_image, atol=1e-04) def test_depth(self): controlnet = ControlNetXSModel.from_pretrained("UmerHA/ConrolNetXS-SDXL-depth") pipe = StableDiffusionXLControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (512, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4411, 0.3617, 0.2654, 0.266, 0.3449, 0.3898, 0.3745, 0.353, 0.326]) assert np.allclose(original_image, expected_image, atol=1e-04)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audio_diffusion/test_audio_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNet2DConditionModel, UNet2DModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() class PipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_unet(self): torch.manual_seed(0) model = UNet2DModel( sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), ) return model @property def dummy_unet_condition(self): torch.manual_seed(0) model = UNet2DConditionModel( sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=10, ) return model @property def dummy_vqvae_and_unet(self): torch.manual_seed(0) vqvae = AutoencoderKL( sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), ) unet = UNet2DModel( sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), ) return vqvae, unet @nightly def test_audio_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator mel = Mel( x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], ) scheduler = DDPMScheduler() pipe = AudioDiffusionPipeline(vqvae=None, unet=self.dummy_unet, mel=mel, scheduler=scheduler) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(42) output = pipe(generator=generator, steps=4) audio = output.audios[0] image = output.images[0] generator = torch.Generator(device=device).manual_seed(42) output = pipe(generator=generator, steps=4, return_dict=False) image_from_tuple = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] image_from_tuple_slice = np.frombuffer(image_from_tuple.tobytes(), dtype="uint8")[:10] expected_slice = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0 mel = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], ) scheduler = DDIMScheduler() dummy_vqvae_and_unet = self.dummy_vqvae_and_unet pipe = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=mel, scheduler=scheduler ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) np.random.seed(0) raw_audio = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,)) generator = torch.Generator(device=device).manual_seed(42) output = pipe(raw_audio=raw_audio, generator=generator, start_step=5, steps=10) image = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] expected_slice = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 dummy_unet_condition = self.dummy_unet_condition pipe = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_unet_condition, mel=mel, scheduler=scheduler ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) np.random.seed(0) encoding = torch.rand((1, 1, 10)) output = pipe(generator=generator, encoding=encoding) image = output.images[0] image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] expected_slice = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 @nightly @require_torch_gpu class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_audio_diffusion(self): device = torch_device pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(42) output = pipe(generator=generator) audio = output.audios[0] image = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] expected_slice = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenCombinedPipeline from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenCombinedPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "prior_guidance_scale", "decoder_guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "prior_num_inference_steps", "output_type", "return_dict", ] test_xformers_attention = True @property def text_embedder_hidden_size(self): return 32 @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = {"c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2} model = WuerstchenPrior(**model_kwargs) return model.eval() @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_prior_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "c_cond": self.text_embedder_hidden_size, "c_hidden": [320], "nhead": [-1], "blocks": [4], "level_config": ["CT"], "clip_embd": self.text_embedder_hidden_size, "inject_effnet": [False], } model = WuerstchenDiffNeXt(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior prior_text_encoder = self.dummy_prior_text_encoder scheduler = DDPMWuerstchenScheduler() tokenizer = self.dummy_tokenizer text_encoder = self.dummy_text_encoder decoder = self.dummy_decoder vqgan = self.dummy_vqgan components = { "tokenizer": tokenizer, "text_encoder": text_encoder, "decoder": decoder, "vqgan": vqgan, "scheduler": scheduler, "prior_prior": prior, "prior_text_encoder": prior_text_encoder, "prior_tokenizer": tokenizer, "prior_scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_guidance_scale": 4.0, "decoder_guidance_scale": 4.0, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", "height": 128, "width": 128, } return inputs def test_wuerstchen(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.7616304, 0.0, 1.0, 0.0, 1.0, 0.0, 0.05925313, 0.0, 0.951898]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) @unittest.skip(reason="flakey and float16 requires CUDA") def test_float16_inference(self): super().test_float16_inference() def test_callback_inputs(self): pass def test_callback_cfg(self): pass
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_decoder.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenDecoderPipeline from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenDecoderPipeline params = ["prompt"] batch_params = ["image_embeddings", "prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "c_cond": self.text_embedder_hidden_size, "c_hidden": [320], "nhead": [-1], "blocks": [4], "level_config": ["CT"], "clip_embd": self.text_embedder_hidden_size, "inject_effnet": [False], } model = WuerstchenDiffNeXt(**model_kwargs) return model.eval() def get_dummy_components(self): decoder = self.dummy_decoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer vqgan = self.dummy_vqgan scheduler = DDPMWuerstchenScheduler() components = { "decoder": decoder, "vqgan": vqgan, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "latent_dim_scale": 4.0, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeddings": torch.ones((1, 4, 4, 4), device=device), "prompt": "horse", "generator": generator, "guidance_scale": 1.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_decoder(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False) image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0000, 0.0000, 0.0089, 1.0000, 1.0000, 0.3927, 1.0000, 1.0000, 1.0000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-5) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="bf16 not supported and requires CUDA") def test_float16_inference(self): super().test_float16_inference()
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenPriorPipeline from diffusers.loaders import AttnProcsLayers from diffusers.models.attention_processor import ( LoRAAttnProcessor, LoRAAttnProcessor2_0, ) from diffusers.pipelines.wuerstchen import WuerstchenPrior from diffusers.utils.import_utils import is_peft_available from diffusers.utils.testing_utils import enable_full_determinism, require_peft_backend, skip_mps, torch_device if is_peft_available(): from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() def create_prior_lora_layers(unet: nn.Module): lora_attn_procs = {} for name in unet.attn_processors.keys(): lora_attn_processor_class = ( LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor ) lora_attn_procs[name] = lora_attn_processor_class( hidden_size=unet.config.c, ) unet_lora_layers = AttnProcsLayers(lora_attn_procs) return lora_attn_procs, unet_lora_layers class WuerstchenPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenPriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2, } model = WuerstchenPrior(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer scheduler = DDPMWuerstchenScheduler() components = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeddings image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, 0, 0, -10:] image_from_tuple_slice = image_from_tuple[0, 0, 0, -10:] assert image.shape == (1, 2, 24, 24) expected_slice = np.array( [ -7172.837, -3438.855, -1093.312, 388.8835, -7471.467, -7998.1206, -5328.259, 218.00089, -2731.5745, -8056.734, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=2e-1, ) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="flaky for now") def test_float16_inference(self): super().test_float16_inference() # override because we need to make sure latent_mean and latent_std to be 0 def test_callback_inputs(self): components = self.get_dummy_components() components["latent_mean"] = 0 components["latent_std"] = 0 pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def check_if_lora_correctly_set(self, model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False def get_lora_components(self): prior = self.dummy_prior prior_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False ) prior_lora_attn_procs, prior_lora_layers = create_prior_lora_layers(prior) lora_components = { "prior_lora_layers": prior_lora_layers, "prior_lora_attn_procs": prior_lora_attn_procs, } return prior, prior_lora_config, lora_components @require_peft_backend def test_inference_with_prior_lora(self): _, prior_lora_config, _ = self.get_lora_components() device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output_no_lora = pipe(**self.get_dummy_inputs(device)) image_embed = output_no_lora.image_embeddings self.assertTrue(image_embed.shape == (1, 2, 24, 24)) pipe.prior.add_adapter(prior_lora_config) self.assertTrue(self.check_if_lora_correctly_set(pipe.prior), "Lora not correctly set in prior") output_lora = pipe(**self.get_dummy_inputs(device)) lora_image_embed = output_lora.image_embeddings self.assertTrue(image_embed.shape == lora_image_embed.shape)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImg2ImgPipeline, AutoencoderKL, PNDMScheduler, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, nightly, require_torch_gpu, torch_device, ) enable_full_determinism() class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5006, ) return RobertaSeriesModelWithTransformation(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_stable_diffusion_img2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") tokenizer.model_max_length = 77 init_image = self.dummy_image.to(device) init_image = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk alt_pipe = AltDiffusionImg2ImgPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, image_encoder=None, ) alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=True) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = alt_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, ) image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = alt_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_img2img_fp16(self): """Test that stable diffusion img2img works with fp16""" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") tokenizer.model_max_length = 77 init_image = self.dummy_image.to(torch_device) # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk alt_pipe = AltDiffusionImg2ImgPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, image_encoder=None, ) alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=False) alt_pipe = alt_pipe.to(torch_device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image = alt_pipe( [prompt], generator=generator, num_inference_steps=2, output_type="np", image=init_image, ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 init_image = init_image.resize((760, 504)) model_id = "BAAI/AltDiffusion" pipe = AltDiffusionImg2ImgPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "A fantasy landscape, trending on artstation" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np", ) image = output.images[0] image_slice = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) expected_slice = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class AltDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_img2img_pipeline_default(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((768, 512)) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) model_id = "BAAI/AltDiffusion" pipe = AltDiffusionImg2ImgPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "A fantasy landscape, trending on artstation" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class AltDiffusionPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = AltDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5002, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") tokenizer.model_max_length = 77 components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_alt_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() torch.manual_seed(0) text_encoder_config = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=5002, ) # TODO: remove after fixing the non-deterministic text encoder text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) components["text_encoder"] = text_encoder alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = "A photo of an astronaut" output = alt_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_alt_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) text_encoder_config = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=5002, ) # TODO: remove after fixing the non-deterministic text encoder text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) components["text_encoder"] = text_encoder alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = alt_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class AltDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_alt_diffusion(self): # make sure here that pndm scheduler skips prk alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", safety_checker=None) alt_pipe = alt_pipe.to(torch_device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_alt_diffusion_fast_ddim(self): scheduler = DDIMScheduler.from_pretrained("BAAI/AltDiffusion", subfolder="scheduler") alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", scheduler=scheduler, safety_checker=None) alt_pipe = alt_pipe.to(torch_device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = alt_pipe([prompt], generator=generator, num_inference_steps=2, output_type="numpy") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_gpu, torch_device class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_safe_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" image = sd_pipe([prompt], num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SafeDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_harm_safe_stable_diffusion(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) seed = 4003660346 guidance_scale = 7 # without safety guidance (sld_guidance_scale = 0) generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=0, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # without safety guidance (strong configuration) generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_nudity_safe_stable_diffusion(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "padme amidala taking a bath artwork, safe for work, no nudity" seed = 2734971755 guidance_scale = 7 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=0, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_nudity_safetychecker_safe_stable_diffusion(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) seed = 1044355234 guidance_scale = 12 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=0, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audioldm/test_audioldm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDMPipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = ClapTextModelWithProjection(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_audioldm_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def test_audioldm_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = audioldm_pipe.text_encoder( text_inputs, ) prompt_embeds = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state prompt_embeds = F.normalize(prompt_embeds, dim=-1) inputs["prompt_embeds"] = prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) text_embeds = audioldm_pipe.text_encoder( text_inputs, ) text_embeds = text_embeds.text_embeds # additional L_2 normalization over each hidden-state text_embeds = F.normalize(text_embeds, dim=-1) embeds.append(text_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def test_audioldm_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) @nightly class AudioLDMPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm(self): audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81920 audio_slice = audio[77230:77240] expected_slice = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-2 @nightly class AudioLDMPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm_lms(self): audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81920 audio_slice = audio[27780:27790] expected_slice = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 3e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import DDPMScheduler, MidiProcessor, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder from diffusers.utils.testing_utils import ( enable_full_determinism, nightly, require_note_seq, require_onnxruntime, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() MIDI_FILE = "./tests/fixtures/elise_format0.mid" # The note-seq package throws an error on import because the default installed version of Ipython # is not compatible with python 3.8 which we run in the CI. # https://github.com/huggingface/diffusers/actions/runs/4830121056/jobs/8605954838#step:7:98 @unittest.skip("The note-seq package currently throws an error on import") class SpectrogramDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = SpectrogramDiffusionPipeline required_optional_params = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } test_attention_slicing = False batch_params = TOKENS_TO_AUDIO_GENERATION_PARAMS params = TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) notes_encoder = SpectrogramNotesEncoder( max_length=2048, vocab_size=1536, d_model=768, dropout_rate=0.1, num_layers=1, num_heads=1, d_kv=4, d_ff=2048, feed_forward_proj="gated-gelu", ) continuous_encoder = SpectrogramContEncoder( input_dims=128, targets_context_length=256, d_model=768, dropout_rate=0.1, num_layers=1, num_heads=1, d_kv=4, d_ff=2048, feed_forward_proj="gated-gelu", ) decoder = T5FilmDecoder( input_dims=128, targets_length=256, max_decoder_noise_time=20000.0, d_model=768, num_layers=1, num_heads=1, d_kv=4, d_ff=2048, dropout_rate=0.1, ) scheduler = DDPMScheduler() components = { "notes_encoder": notes_encoder.eval(), "continuous_encoder": continuous_encoder.eval(), "decoder": decoder.eval(), "scheduler": scheduler, "melgan": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "input_tokens": [ [1134, 90, 1135, 1133, 1080, 112, 1132, 1080, 1133, 1079, 133, 1132, 1079, 1133, 1] + [0] * 2033 ], "generator": generator, "num_inference_steps": 4, "output_type": "mel", } return inputs def test_spectrogram_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = SpectrogramDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) mel = output.audios mel_slice = mel[0, -3:, -3:] assert mel_slice.shape == (3, 3) expected_slice = np.array( [-11.512925, -4.788215, -0.46172905, -2.051715, -10.539147, -10.970963, -9.091634, 4.0, 4.0] ) assert np.abs(mel_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): pass def test_inference_batch_consistent(self): pass @skip_mps def test_progress_bar(self): return super().test_progress_bar() @nightly @require_torch_gpu @require_onnxruntime @require_note_seq class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_callback(self): # TODO - test that pipeline can decode tokens in a callback # so that music can be played live device = torch_device pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") melgan = pipe.melgan pipe.melgan = None pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) def callback(step, mel_output): # decode mel to audio audio = melgan(input_features=mel_output.astype(np.float32))[0] assert len(audio[0]) == 81920 * (step + 1) # simulate that audio is played return audio processor = MidiProcessor() input_tokens = processor(MIDI_FILE) input_tokens = input_tokens[:3] generator = torch.manual_seed(0) pipe(input_tokens, num_inference_steps=5, generator=generator, callback=callback, output_type="mel") def test_spectrogram_fast(self): device = torch_device pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) processor = MidiProcessor() input_tokens = processor(MIDI_FILE) # just run two denoising loops input_tokens = input_tokens[:2] generator = torch.manual_seed(0) output = pipe(input_tokens, num_inference_steps=2, generator=generator) audio = output.audios[0] assert abs(np.abs(audio).sum() - 3612.841) < 1e-1 def test_spectrogram(self): device = torch_device pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) processor = MidiProcessor() input_tokens = processor(MIDI_FILE) # just run 4 denoising loops input_tokens = input_tokens[:4] generator = torch.manual_seed(0) output = pipe(input_tokens, num_inference_steps=100, generator=generator) audio = output.audios[0] assert abs(np.abs(audio).sum() - 9389.1111) < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/pixart/test_pixart.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, DDIMScheduler, PixArtAlphaPipeline, Transformer2DModel, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class PixArtAlphaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = PixArtAlphaPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params def get_dummy_components(self): torch.manual_seed(0) transformer = Transformer2DModel( sample_size=8, num_layers=2, patch_size=2, attention_head_dim=8, num_attention_heads=3, caption_channels=32, in_channels=4, cross_attention_dim=24, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, ) vae = AutoencoderKL() scheduler = DDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "use_resolution_binning": False, "output_type": "np", } return inputs def test_sequential_cpu_offload_forward_pass(self): # TODO(PVP, Sayak) need to fix later return def test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 8, 8, 3)) expected_slice = np.array([0.5303, 0.2658, 0.7979, 0.1182, 0.3304, 0.4608, 0.5195, 0.4261, 0.4675]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_non_square_images(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs, height=32, width=48).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 48, 3)) expected_slice = np.array([0.3859, 0.2987, 0.2333, 0.5243, 0.6721, 0.4436, 0.5292, 0.5373, 0.4416]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_with_embeddings_and_multiple_images(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] prompt_embeds, prompt_attn_mask, negative_prompt_embeds, neg_prompt_attn_mask = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference_with_multiple_images_per_prompt(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_images_per_prompt"] = 2 image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (2, 8, 8, 3)) expected_slice = np.array([0.5303, 0.2658, 0.7979, 0.1182, 0.3304, 0.4608, 0.5195, 0.4261, 0.4675]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_raises_warning_for_mask_feature(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"mask_feature": True}) with self.assertWarns(FutureWarning) as warning_ctx: _ = pipe(**inputs).images assert "mask_feature" in str(warning_ctx.warning) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @slow @require_torch_gpu class PixArtAlphaPipelineIntegrationTests(unittest.TestCase): ckpt_id_1024 = "PixArt-alpha/PixArt-XL-2-1024-MS" ckpt_id_512 = "PixArt-alpha/PixArt-XL-2-512x512" prompt = "A small cactus with a happy face in the Sahara desert." def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pixart_1024(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1941, 0.2117, 0.2188, 0.1946, 0.218, 0.2124, 0.199, 0.2437, 0.2583]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_512(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.2637, 0.291, 0.2939, 0.207, 0.2512, 0.2783, 0.2168, 0.2324, 0.2817]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_1024_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 1024, 768 num_inference_steps = 10 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) def test_pixart_512_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 512, 768 num_inference_steps = 10 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/vq_diffusion/test_vq_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device torch.backends.cuda.matmul.allow_tf32 = False class VQDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def num_embed(self): return 12 @property def num_embeds_ada_norm(self): return 12 @property def text_embedder_hidden_size(self): return 32 @property def dummy_vqvae(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_transformer(self): torch.manual_seed(0) height = 12 width = 12 model_kwargs = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } model = Transformer2DModel(**model_kwargs) return model def test_vq_diffusion(self): device = "cpu" vqvae = self.dummy_vqvae text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer transformer = self.dummy_transformer scheduler = VQDiffusionScheduler(self.num_embed) learned_classifier_free_sampling_embeddings = LearnedClassifierFreeSamplingEmbeddings(learnable=False) pipe = VQDiffusionPipeline( vqvae=vqvae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) prompt = "teddy bear playing in the pool" generator = torch.Generator(device=device).manual_seed(0) output = pipe([prompt], generator=generator, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = pipe( [prompt], generator=generator, output_type="np", return_dict=False, num_inference_steps=2 )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) expected_slice = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_vq_diffusion_classifier_free_sampling(self): device = "cpu" vqvae = self.dummy_vqvae text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer transformer = self.dummy_transformer scheduler = VQDiffusionScheduler(self.num_embed) learned_classifier_free_sampling_embeddings = LearnedClassifierFreeSamplingEmbeddings( learnable=True, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) pipe = VQDiffusionPipeline( vqvae=vqvae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) prompt = "teddy bear playing in the pool" generator = torch.Generator(device=device).manual_seed(0) output = pipe([prompt], generator=generator, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = pipe( [prompt], generator=generator, output_type="np", return_dict=False, num_inference_steps=2 )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) expected_slice = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class VQDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_vq_diffusion_classifier_free_sampling(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) pipeline = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq") pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though generator = torch.Generator(device=torch_device).manual_seed(0) output = pipeline( "teddy bear playing in the pool", num_images_per_prompt=1, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = LDMTextToImagePipeline params = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } batch_params = TEXT_TO_IMAGE_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(32, 64), in_channels=3, out_channels=3, down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_inference_text2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LDMTextToImagePipeline(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878]) max_diff = np.abs(expected_slice - image_slice).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0