text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
use yew_agent::PublicWorker; fn main() { console_error_panic_hook::set_once(); candle_wasm_example_llama2::Worker::register(); }
candle/candle-wasm-examples/llama2-c/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/bin/worker.rs", "repo_id": "candle", "token_count": 54 }
66
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "phi-mixformer-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } async function concatenateArrayBuffers(urls) { const arrayBuffers = await Promise.all(urls.map(url => fetchArrayBuffer(url))); let totalLength = arrayBuffers.reduce((acc, arrayBuffer) => acc + arrayBuffer.byteLength, 0); let concatenatedBuffer = new Uint8Array(totalLength); let offset = 0; arrayBuffers.forEach(buffer => { concatenatedBuffer.set(new Uint8Array(buffer), offset); offset += buffer.byteLength; }); return concatenatedBuffer; } class Phi { static instance = {}; static async getInstance( weightsURL, modelID, tokenizerURL, configURL, quantized ) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ weightsURL instanceof Array ? concatenateArrayBuffers(weightsURL) : fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, configArrayU8, quantized ); } return this.instance[modelID]; } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, configURL, quantized, prompt, temp, top_p, repeatPenalty, seed, maxSeqLen, } = data; try { self.postMessage({ status: "loading", message: "Starting Phi" }); const model = await Phi.getInstance( weightsURL, modelID, tokenizerURL, configURL, quantized ); self.postMessage({ status: "loading", message: "Initializing model" }); const firstToken = model.init_with_prompt( prompt, temp, top_p, repeatPenalty, 64, BigInt(seed) ); const seq_len = 2048; let sentence = firstToken; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const token = await model.next_token(); if (token === "<|endoftext|>") { self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); return; } const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
candle/candle-wasm-examples/phi/phiWorker.js/0
{ "file_path": "candle/candle-wasm-examples/phi/phiWorker.js", "repo_id": "candle", "token_count": 1667 }
67
use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; pub use candle_transformers::models::quantized_t5::{ Config, T5EncoderModel, T5ForConditionalGeneration, VarBuilder, }; use candle_wasm_example_t5::console_log; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; const DEVICE: Device = Device::Cpu; #[wasm_bindgen] pub struct ModelEncoder { model: T5EncoderModel, tokenizer: Tokenizer, } #[wasm_bindgen] pub struct ModelConditionalGeneration { model: T5ForConditionalGeneration, tokenizer: Tokenizer, config: Config, } #[wasm_bindgen] impl ModelConditionalGeneration { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelConditionalGeneration, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights, &DEVICE)?; let mut config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5ForConditionalGeneration::load(vb, &config)?; config.use_cache = false; Ok(Self { model, tokenizer, config, }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: ConditionalGenerationParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = &DEVICE; self.model.clear_kv_cache(); let mut output_token_ids = [self.config.pad_token_id as u32].to_vec(); let prompt = input.prompt; let repeat_penalty = input.repeat_penalty; let repeat_last_n = input.repeat_last_n; let seed = input.seed; let max_length = usize::clamp(input.max_length.unwrap_or(512), 0, 512); let temperature = if input.temperature <= 0. { None } else { Some(input.temperature) }; let top_p = if input.top_p <= 0. || input.top_p >= 1. { None } else { Some(input.top_p) }; let mut logits_processor = LogitsProcessor::new(seed, temperature, top_p); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let encoder_output = self.model.encode(&input_token_ids)?; let mut decoded = String::new(); for index in 0.. { if output_token_ids.len() > max_length { break; } let decoder_token_ids = if index == 0 { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = self .model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == self.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = self.tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); decoded += &text; } } Ok(serde_wasm_bindgen::to_value( &ConditionalGenerationOutput { generation: decoded, }, )?) } } #[wasm_bindgen] impl ModelEncoder { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelEncoder, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights, &DEVICE)?; let mut config: Config = serde_json::from_slice(&config)?; config.use_cache = false; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5EncoderModel::load(vb, &config)?; Ok(Self { model, tokenizer }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let device = &DEVICE; let input: DecoderParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; self.model.clear_kv_cache(); let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = self .tokenizer .encode(sentence, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let embeddings = self.model.forward(&token_ids)?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; console_log!("{:?}", embeddings.shape()); all_embeddings.push(embeddings.squeeze(0)?.to_vec1::<f32>()?); } Ok(serde_wasm_bindgen::to_value(&DecoderOutput { embeddings: all_embeddings, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct ConditionalGenerationOutput { generation: String, } #[derive(serde::Serialize, serde::Deserialize)] struct DecoderOutput { embeddings: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct DecoderParams { sentences: Vec<String>, normalize_embeddings: bool, } #[derive(serde::Serialize, serde::Deserialize)] pub struct ConditionalGenerationParams { prompt: String, temperature: f64, seed: u64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, max_length: Option<usize>, } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/t5/src/bin/m-quantized.rs/0
{ "file_path": "candle/candle-wasm-examples/t5/src/bin/m-quantized.rs", "repo_id": "candle", "token_count": 3555 }
68
pub const WITH_TIMER: bool = true; mod app; mod audio; pub mod languages; pub mod worker; pub use app::App; pub use worker::Worker;
candle/candle-wasm-examples/whisper/src/lib.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/lib.rs", "repo_id": "candle", "token_count": 47 }
69
//load the candle yolo wasm module import init, { Model, ModelPose } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "yolo-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Yolo { static instance = {}; // Retrieve the YOLO model. When called for the first time, // this will load the model and save it for future use. static async getInstance(modelID, modelURL, modelSize) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: `loading model ${modelID}:${modelSize}` }); const weightsArrayU8 = await fetchArrayBuffer(modelURL); if (/pose/.test(modelID)) { // if pose model, use ModelPose this.instance[modelID] = new ModelPose(weightsArrayU8, modelSize); } else { this.instance[modelID] = new Model(weightsArrayU8, modelSize); } } else { self.postMessage({ status: "model already loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { imageURL, modelID, modelURL, modelSize, confidence, iou_threshold } = event.data; try { self.postMessage({ status: "detecting" }); const yolo = await Yolo.getInstance(modelID, modelURL, modelSize); self.postMessage({ status: "loading image" }); const imgRes = await fetch(imageURL); const imgData = await imgRes.arrayBuffer(); const imageArrayU8 = new Uint8Array(imgData); self.postMessage({ status: `running inference ${modelID}:${modelSize}` }); const bboxes = yolo.run(imageArrayU8, confidence, iou_threshold); // Send the output back to the main thread as JSON self.postMessage({ status: "complete", output: JSON.parse(bboxes), }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/yolo/yoloWorker.js/0
{ "file_path": "candle/candle-wasm-examples/yolo/yoloWorker.js", "repo_id": "candle", "token_count": 756 }
70
## Privacy > Last updated: Feb 14, 2025 Users of HuggingChat are authenticated through their HF user account. We endorse Privacy by Design. As such, your conversations are private to you and will not be shared with anyone, including model authors, for any purpose, including for research or model training purposes. You conversation data will only be stored to let you access past conversations. You can click on the Delete icon to delete any past conversation at any moment. 🗓 Please also consult huggingface.co's main privacy policy at <https://huggingface.co/privacy>. To exercise any of your legal privacy rights, please send an email to <privacy@huggingface.co>. ## About available LLMs The goal of this app is to showcase that it is now possible to build an open source alternative to ChatGPT. 💪 We aim to always provide a diverse set of state of the art open LLMs, hence we rotate the available models over time. Discuss available models and request new ones on the [models discussion page](https://huggingface.co/spaces/huggingchat/chat-ui/discussions/372). Check the [models](https://huggingface.co/chat/models/) page for an up-to-date list of the best available LLMs. ## Technical details [![chat-ui](https://img.shields.io/github/stars/huggingface/chat-ui)](https://github.com/huggingface/chat-ui) The app is completely open source, and further development takes place on the [huggingface/chat-ui](https://github.com/huggingface/chat-ui) GitHub repo. We're always open to contributions! You can find the production configuration for HuggingChat [here](https://github.com/huggingface/chat-ui/blob/main/chart/env/prod.yaml). The inference backend is running the optimized [text-generation-inference](https://github.com/huggingface/text-generation-inference) on HuggingFace's Inference API infrastructure. It is possible to deploy a copy of this app to a Space and customize it (swap model, add some UI elements, or store user messages according to your own Terms and conditions). You can also 1-click deploy your own instance using the [Chat UI Spaces Docker template](https://huggingface.co/new-space?template=huggingchat/chat-ui-template). We welcome any feedback on this app: please participate to the public discussion at <https://huggingface.co/spaces/huggingchat/chat-ui/discussions> <a target="_blank" href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-a-discussion-xl.svg" title="open a discussion"></a>
chat-ui/PRIVACY.md/0
{ "file_path": "chat-ui/PRIVACY.md", "repo_id": "chat-ui", "token_count": 700 }
71
image: repository: ghcr.io/huggingface name: chat-ui tag: 0.0.0-latest pullPolicy: IfNotPresent replicas: 3 domain: huggingface.co networkPolicy: enabled: false allowedBlocks: [] service: type: NodePort annotations: { } serviceAccount: enabled: false create: false name: "" automountServiceAccountToken: true annotations: { } ingress: enabled: true path: "/" annotations: { } # className: "nginx" tls: { } # secretName: XXX ingressInternal: enabled: false path: "/" annotations: { } # className: "nginx" tls: { } resources: requests: cpu: 2 memory: 4Gi limits: cpu: 2 memory: 4Gi nodeSelector: {} tolerations: [] envVars: { } infisical: enabled: false env: "" project: "huggingchat-v2-a1" url: "" resyncInterval: 60 operatorSecretName: "huggingchat-operator-secrets" operatorSecretNamespace: "hub-utils" # Allow to environment injections on top or instead of infisical extraEnvFrom: [] extraEnv: [] autoscaling: enabled: false minReplicas: 1 maxReplicas: 2 targetMemoryUtilizationPercentage: "" targetCPUUtilizationPercentage: "" monitoring: enabled: false
chat-ui/chart/values.yaml/0
{ "file_path": "chat-ui/chart/values.yaml", "repo_id": "chat-ui", "token_count": 432 }
72
# Text Generation Inference (TGI) | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | Yes\* | | [Multimodal](../multimodal) | Yes\* | \* Tools are only supported with the Cohere Command R+ model with the Xenova tokenizers. Please see the [Tools](../tools) section. \* Multimodal is only supported with the IDEFICS model. Please see the [Multimodal](../multimodal) section. By default, if `endpoints` are left unspecified, Chat UI will look for the model on the hosted Hugging Face inference API using the model name, and use your `HF_TOKEN`. Refer to the [overview](../overview) for more information about model configuration. ```ini MODELS=`[ { "name": "mistralai/Mistral-7B-Instruct-v0.2", "displayName": "mistralai/Mistral-7B-Instruct-v0.2", "description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.", "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/", "preprompt": "", "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.3, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "promptExamples": [ { "title": "Write an email", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Recipe help", "prompt": "How do I make a delicious lemon cheesecake?" } ] } ]` ``` ## Running your own models using a custom endpoint If you want to, instead of hitting models on the Hugging Face Inference API, you can run your own models locally. A good option is to hit a [text-generation-inference](https://github.com/huggingface/text-generation-inference) endpoint. This is what is done in the official [Chat UI Spaces Docker template](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) for instance: both this app and a text-generation-inference server run inside the same container. To do this, you can add your own endpoints to the `MODELS` variable in `.env.local`, by adding an `"endpoints"` key for each model in `MODELS`. ```ini MODELS=`[{ "name": "your-model-name", "displayName": "Your Model Name", ... other model config "endpoints": [{ "type" : "tgi", "url": "https://HOST:PORT", }] }]` ```
chat-ui/docs/source/configuration/models/providers/tgi.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/tgi.md", "repo_id": "chat-ui", "token_count": 1056 }
73
{ "name": "chat-ui", "version": "0.10.0", "private": true, "packageManager": "npm@9.5.0", "scripts": { "dev": "vite dev", "build": "vite build", "preview": "vite preview", "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json", "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch", "lint": "prettier --check . && eslint .", "format": "prettier --write .", "test": "vitest", "updateLocalEnv": "vite-node --options.transformMode.ssr='/.*/' scripts/updateLocalEnv.ts", "populate": "vite-node --options.transformMode.ssr='/.*/' scripts/populate.ts", "config": "vite-node --options.transformMode.ssr='/.*/' scripts/config.ts", "prepare": "husky" }, "devDependencies": { "@elysiajs/cors": "^1.3.3", "@elysiajs/eden": "^1.3.2", "@elysiajs/node": "^1.2.6", "@faker-js/faker": "^8.4.1", "@iconify-json/carbon": "^1.1.16", "@iconify-json/eos-icons": "^1.1.6", "@sveltejs/adapter-node": "^5.2.12", "@sveltejs/kit": "^2.21.1", "@sveltejs/vite-plugin-svelte": "^5.0.3", "@tailwindcss/typography": "^0.5.9", "@types/dompurify": "^3.0.5", "@types/express": "^4.17.21", "@types/fs-extra": "^11.0.4", "@types/js-yaml": "^4.0.9", "@types/jsdom": "^21.1.1", "@types/jsonpath": "^0.2.4", "@types/katex": "^0.16.7", "@types/mime-types": "^2.1.4", "@types/minimist": "^1.2.5", "@types/node": "^22.1.0", "@types/parquetjs": "^0.10.3", "@types/sbd": "^1.0.5", "@types/uuid": "^9.0.8", "@types/yazl": "^3.3.0", "@typescript-eslint/eslint-plugin": "^6.x", "@typescript-eslint/parser": "^6.x", "bson-objectid": "^2.0.4", "dompurify": "^3.2.4", "elysia": "^1.3.2", "eslint": "^8.28.0", "eslint-config-prettier": "^8.5.0", "eslint-plugin-svelte": "^2.45.1", "fs-extra": "^11.3.0", "isomorphic-dompurify": "^2.13.0", "js-yaml": "^4.1.0", "jsonrepair": "^3.12.0", "minimist": "^1.2.8", "mongodb-memory-server": "^10.1.2", "node-llama-cpp": "^3.6.0", "prettier": "^3.5.3", "prettier-plugin-svelte": "^3.2.6", "prettier-plugin-tailwindcss": "^0.6.11", "prom-client": "^15.1.2", "sade": "^1.8.1", "superjson": "^2.2.2", "svelte": "^5.33.3", "svelte-check": "^4.0.0", "svelte-gestures": "^5.1.3", "ts-node": "^10.9.1", "tslib": "^2.4.1", "typescript": "^5.5.0", "unplugin-icons": "^0.16.1", "vite": "^6.3.5", "vite-node": "^3.0.9", "vitest": "^3.1.4", "yazl": "^3.3.1" }, "type": "module", "dependencies": { "@aws-sdk/credential-providers": "^3.592.0", "@cliqz/adblocker-playwright": "^1.34.0", "@elysiajs/swagger": "^1.3.0", "@gradio/client": "^1.8.0", "@huggingface/hub": "^2.2.0", "@huggingface/inference": "^3.12.1", "@huggingface/mcp-client": "^0.1.1", "@huggingface/tasks": "^0.19.1", "@huggingface/transformers": "^3.1.1", "@iconify-json/bi": "^1.1.21", "@playwright/browser-chromium": "^1.52.0", "@resvg/resvg-js": "^2.6.2", "autoprefixer": "^10.4.14", "aws-sigv4-fetch": "^4.0.1", "aws4": "^1.13.0", "date-fns": "^2.29.3", "dotenv": "^16.5.0", "express": "^4.21.2", "file-type": "^21.0.0", "google-auth-library": "^9.13.0", "handlebars": "^4.7.8", "highlight.js": "^11.7.0", "husky": "^9.0.11", "image-size": "^1.2.1", "ip-address": "^9.0.5", "jose": "^5.3.0", "jsdom": "^22.0.0", "json5": "^2.2.3", "jsonpath": "^1.1.1", "katex": "^0.16.21", "lint-staged": "^15.2.7", "marked": "^12.0.1", "mongodb": "^5.8.0", "nanoid": "^5.0.9", "natural": "^8.1.0", "openid-client": "^5.4.2", "parquetjs": "^0.11.2", "pino": "^9.0.0", "pino-pretty": "^11.0.0", "playwright": "^1.52.0", "postcss": "^8.4.31", "saslprep": "^1.0.3", "satori": "^0.10.11", "satori-html": "^0.3.2", "sbd": "^1.0.19", "serpapi": "^1.1.1", "sharp": "^0.33.4", "tailwind-scrollbar": "^3.0.0", "tailwindcss": "^3.4.0", "uuid": "^10.0.0", "vitest-browser-svelte": "^0.1.0", "zod": "^3.22.3" }, "optionalDependencies": { "@anthropic-ai/sdk": "^0.32.1", "@anthropic-ai/vertex-sdk": "^0.4.1", "@aws-sdk/client-bedrock-runtime": "^3.631.0", "@google-cloud/vertexai": "^1.1.0", "@google/generative-ai": "^0.24.0", "aws4fetch": "^1.0.17", "cohere-ai": "^7.9.0", "openai": "^4.44.0" }, "overrides": { "@reflink/reflink": "file:stub/@reflink/reflink" } }
chat-ui/package.json/0
{ "file_path": "chat-ui/package.json", "repo_id": "chat-ui", "token_count": 2488 }
74
import type { EndpointParameters } from "./server/endpoints/endpoints"; import type { BackendModel } from "./server/models"; import type { Tool, ToolResult } from "./types/Tool"; type buildPromptOptions = Pick<EndpointParameters, "messages" | "preprompt" | "continueMessage"> & { model: BackendModel; tools?: Tool[]; toolResults?: ToolResult[]; }; export async function buildPrompt({ messages, model, preprompt, continueMessage, tools, toolResults, }: buildPromptOptions): Promise<string> { const filteredMessages = messages; if (filteredMessages[0].from === "system" && preprompt) { filteredMessages[0].content = preprompt; } let prompt = model .chatPromptRender({ messages: filteredMessages.map((m) => ({ ...m, role: m.from, })), preprompt, tools, toolResults, continueMessage, }) // Not super precise, but it's truncated in the model's backend anyway .split(" ") .slice(-(model.parameters?.truncate ?? 0)) .join(" "); if (continueMessage && model.parameters?.stop) { let trimmedPrompt = prompt.trimEnd(); let hasRemovedStop = true; while (hasRemovedStop) { hasRemovedStop = false; for (const stopToken of model.parameters.stop) { if (trimmedPrompt.endsWith(stopToken)) { trimmedPrompt = trimmedPrompt.slice(0, -stopToken.length); hasRemovedStop = true; break; } } trimmedPrompt = trimmedPrompt.trimEnd(); } prompt = trimmedPrompt; } return prompt; }
chat-ui/src/lib/buildPrompt.ts/0
{ "file_path": "chat-ui/src/lib/buildPrompt.ts", "repo_id": "chat-ui", "token_count": 545 }
75
<script lang="ts" module> export const titles: { [key: string]: string } = { today: "Today", week: "This week", month: "This month", older: "Older", } as const; </script> <script lang="ts"> import { base } from "$app/paths"; import Logo from "$lib/components/icons/Logo.svelte"; import { switchTheme } from "$lib/switchTheme"; import { isAborted } from "$lib/stores/isAborted"; import NavConversationItem from "./NavConversationItem.svelte"; import type { LayoutData } from "../../routes/$types"; import type { ConvSidebar } from "$lib/types/ConvSidebar"; import type { Model } from "$lib/types/Model"; import { page } from "$app/stores"; import InfiniteScroll from "./InfiniteScroll.svelte"; import { CONV_NUM_PER_PAGE } from "$lib/constants/pagination"; import { goto } from "$app/navigation"; import { browser } from "$app/environment"; import { toggleSearch } from "./chat/Search.svelte"; import CarbonSearch from "~icons/carbon/search"; import { closeMobileNav } from "./MobileNav.svelte"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; import { isVirtualKeyboard } from "$lib/utils/isVirtualKeyboard"; import { useAPIClient, handleResponse } from "$lib/APIClient"; const publicConfig = usePublicConfig(); const client = useAPIClient(); interface Props { conversations: ConvSidebar[]; canLogin: boolean; user: LayoutData["user"]; p?: number; } let { conversations = $bindable(), canLogin, user, p = $bindable(0) }: Props = $props(); let hasMore = $state(true); function handleNewChatClick() { isAborted.set(true); } const dateRanges = [ new Date().setDate(new Date().getDate() - 1), new Date().setDate(new Date().getDate() - 7), new Date().setMonth(new Date().getMonth() - 1), ]; let groupedConversations = $derived({ today: conversations.filter(({ updatedAt }) => updatedAt.getTime() > dateRanges[0]), week: conversations.filter( ({ updatedAt }) => updatedAt.getTime() > dateRanges[1] && updatedAt.getTime() < dateRanges[0] ), month: conversations.filter( ({ updatedAt }) => updatedAt.getTime() > dateRanges[2] && updatedAt.getTime() < dateRanges[1] ), older: conversations.filter(({ updatedAt }) => updatedAt.getTime() < dateRanges[2]), }); const nModels: number = $page.data.models.filter((el: Model) => !el.unlisted).length; async function handleVisible() { p++; const newConvs = await client.conversations .get({ query: { p, }, }) .then(handleResponse) .then((r) => r.conversations) .catch(() => []); if (newConvs.length === 0) { hasMore = false; } conversations = [...conversations, ...newConvs]; } $effect(() => { if (conversations.length <= CONV_NUM_PER_PAGE) { // reset p to 0 if there's only one page of content // that would be caused by a data loading invalidation p = 0; } }); let theme = $state(browser ? localStorage.theme : "light"); </script> <div class="sticky top-0 flex flex-none touch-none items-center justify-between px-1.5 py-3.5 max-sm:pt-0" > <a class="flex items-center rounded-xl text-lg font-semibold" href="{publicConfig.PUBLIC_ORIGIN}{base}/" > <Logo classNames="mr-1" /> {publicConfig.PUBLIC_APP_NAME} </a> {#if $page.url.pathname !== base + "/"} <a href={`${base}/`} onclick={handleNewChatClick} class="flex rounded-lg border bg-white px-2 py-0.5 text-center shadow-sm hover:shadow-none dark:border-gray-600 dark:bg-gray-700 sm:text-smd" > New Chat </a> {/if} </div> <div class="scrollbar-custom flex touch-pan-y flex-col gap-1 overflow-y-auto rounded-r-xl from-gray-50 px-3 pb-3 pt-2 text-[.9rem] dark:from-gray-800/30 max-sm:bg-gradient-to-t md:bg-gradient-to-l" > <button class="group mx-auto flex w-full flex-row items-center justify-stretch gap-x-2 rounded-xl px-2 py-1 align-middle text-gray-600 hover:bg-gray-500/20 dark:text-gray-400" onclick={() => { closeMobileNav(); toggleSearch(); }} > <CarbonSearch class="text-xs" /> <span class="block">Search chats</span> {#if !isVirtualKeyboard()} <span class="invisible ml-auto text-xs text-gray-500 group-hover:visible" ><kbd>ctrl</kbd>+<kbd>k</kbd></span > {/if} </button> {#await groupedConversations} {#if $page.data.nConversations > 0} <div class="overflow-y-hidden"> <div class="flex animate-pulse flex-col gap-4"> <div class="h-4 w-24 rounded bg-gray-200 dark:bg-gray-700"></div> {#each Array(100) as _} <div class="ml-2 h-5 w-4/5 gap-5 rounded bg-gray-200 dark:bg-gray-700"></div> {/each} </div> </div> {/if} {:then groupedConversations} <div class="flex flex-col gap-1"> {#each Object.entries(groupedConversations) as [group, convs]} {#if convs.length} <h4 class="mb-1.5 mt-4 pl-0.5 text-sm text-gray-400 first:mt-0 dark:text-gray-500"> {titles[group]} </h4> {#each convs as conv} <NavConversationItem on:editConversationTitle on:deleteConversation {conv} /> {/each} {/if} {/each} </div> {#if hasMore} <InfiniteScroll on:visible={handleVisible} /> {/if} {/await} </div> <div class="flex touch-none flex-col gap-1 rounded-r-xl p-3 text-sm md:mt-3 md:bg-gradient-to-l md:from-gray-50 md:dark:from-gray-800/30" > {#if user?.username || user?.email} <button onclick={async () => { await fetch(`${base}/logout`, { method: "POST", }); await goto(base + "/", { invalidateAll: true }); }} class="group flex items-center gap-1.5 rounded-lg pl-2.5 pr-2 hover:bg-gray-100 dark:hover:bg-gray-700" > <span class="flex h-9 flex-none shrink items-center gap-1.5 truncate pr-2 text-gray-500 dark:text-gray-400" >{user?.username || user?.email}</span > {#if !user.logoutDisabled} <span class="ml-auto h-6 flex-none items-center gap-1.5 rounded-md border bg-white px-2 text-gray-700 shadow-sm group-hover:flex hover:shadow-none dark:border-gray-600 dark:bg-gray-600 dark:text-gray-400 dark:hover:text-gray-300 md:hidden" > Sign Out </span> {/if} </button> {/if} {#if canLogin} <a href="{base}/login" class="flex h-9 w-full flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Login </a> {/if} {#if nModels > 1} <a href="{base}/models" class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Models <span class="ml-auto rounded-full border border-gray-300 px-2 py-0.5 text-xs text-gray-500 dark:border-gray-500 dark:text-gray-400" >{nModels}</span > </a> {/if} {#if $page.data.enableAssistants} <a href="{base}/assistants" class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Assistants </a> {/if} {#if $page.data.enableCommunityTools} <a href="{base}/tools" class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Tools <span class="ml-auto rounded-full border border-purple-300 px-2 py-0.5 text-xs text-purple-500 dark:border-purple-500 dark:text-purple-400" >New</span > </a> {/if} <span class="flex flex-row-reverse gap-1 md:flex-row"> <a href="{base}/settings" class="flex h-9 flex-none flex-grow items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Settings </a> <button onclick={() => { switchTheme(); theme = localStorage.theme; }} aria-label="Toggle theme" class="flex h-9 min-w-[1.5em] flex-none items-center rounded-lg p-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > {#if browser} {#if theme === "dark"} <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" stroke-width="1.5" ><path d="M16 12.005a4 4 0 1 1-4 4a4.005 4.005 0 0 1 4-4m0-2a6 6 0 1 0 6 6a6 6 0 0 0-6-6z" fill="currentColor" stroke="currentColor" stroke-width="0.5" ></path><path d="M5.394 6.813l1.414-1.415l3.506 3.506L8.9 10.318z" fill="currentColor" ></path><path d="M2 15.005h5v2H2z" fill="currentColor"></path><path stroke="currentColor" stroke-width="0.5" d="M5.394 25.197L8.9 21.691l1.414 1.415l-3.506 3.505z" fill="currentColor" ></path><path d="M15 25.005h2v5h-2z" fill="currentColor"></path><path stroke="currentColor" stroke-width="0.5" d="M21.687 23.106l1.414-1.415l3.506 3.506l-1.414 1.414z" fill="currentColor" ></path><path d="M25 15.005h5v2h-5z" fill="currentColor"></path><path stroke="currentColor" stroke-width="0.5" d="M21.687 8.904l3.506-3.506l1.414 1.415l-3.506 3.505z" fill="currentColor" ></path><path d="M15 2.005h2v5h-2z" fill="currentColor"></path></svg > {:else} <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" stroke-width="1.5" ><path d="M13.502 5.414a15.075 15.075 0 0 0 11.594 18.194a11.113 11.113 0 0 1-7.975 3.39c-.138 0-.278.005-.418 0a11.094 11.094 0 0 1-3.2-21.584M14.98 3a1.002 1.002 0 0 0-.175.016a13.096 13.096 0 0 0 1.825 25.981c.164.006.328 0 .49 0a13.072 13.072 0 0 0 10.703-5.555a1.01 1.01 0 0 0-.783-1.565A13.08 13.08 0 0 1 15.89 4.38A1.015 1.015 0 0 0 14.98 3z" fill="currentColor" stroke="currentColor" stroke-width="0.5" ></path></svg > {/if} {/if} </button> </span> </div>
chat-ui/src/lib/components/NavMenu.svelte/0
{ "file_path": "chat-ui/src/lib/components/NavMenu.svelte", "repo_id": "chat-ui", "token_count": 4550 }
76
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/state"; import { clickOutside } from "$lib/actions/clickOutside"; import { useSettingsStore } from "$lib/stores/settings"; import type { ToolFront } from "$lib/types/Tool"; import IconTool from "./icons/IconTool.svelte"; import CarbonInformation from "~icons/carbon/information"; import CarbonGlobe from "~icons/carbon/earth-filled"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; const publicConfig = usePublicConfig(); interface Props { loading?: boolean; } let { loading = false }: Props = $props(); const settings = useSettingsStore(); let detailsEl: HTMLDetailsElement | undefined = $state(); // active tools are all the checked tools, either from settings or on by default let activeToolCount = $derived( page.data.tools.filter( (tool: ToolFront) => // community tools are always on by default tool.type === "community" || $settings?.tools?.includes(tool._id) ).length ); async function setAllTools(value: boolean) { const configToolsIds = page.data.tools .filter((t: ToolFront) => t.type === "config") .map((t: ToolFront) => t._id); if (value) { await settings.instantSet({ tools: Array.from(new Set([...configToolsIds, ...($settings?.tools ?? [])])), }); } else { await settings.instantSet({ tools: [], }); } } let allToolsEnabled = $derived(activeToolCount === page.data.tools.length); let tools = $derived(page.data.tools); </script> <details class="group relative bottom-0 h-full min-h-8" bind:this={detailsEl} use:clickOutside={() => { if (detailsEl?.hasAttribute("open")) { detailsEl.removeAttribute("open"); } }} > <summary class="absolute bottom-0 flex h-8 cursor-pointer select-none items-center gap-1 rounded-lg border bg-white px-2 py-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900" > <IconTool classNames="dark:text-purple-600" /> Tools <span class="text-gray-400 dark:text-gray-500"> ({activeToolCount}) </span> </summary> <div class="absolute bottom-10 h-max w-max select-none items-center gap-1 rounded-lg border bg-white p-0.5 shadow-sm dark:border-gray-800 dark:bg-gray-900" > <div class="grid grid-cols-2 gap-x-6 gap-y-1 p-3"> <div class="col-span-2 flex items-center gap-1.5 text-sm text-gray-500"> Available tools {#if publicConfig.isHuggingChat} <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/470" target="_blank" class="hover:brightness-0 dark:hover:brightness-200" ><CarbonInformation class="text-xs" /></a > {/if} <button class="ml-auto text-xs underline" onclick={(e) => { e.stopPropagation(); setAllTools(!allToolsEnabled); }} > {#if allToolsEnabled} Disable all {:else} Enable all {/if} </button> </div> {#if page.data.enableCommunityTools} <a href="{base}/tools" class="col-span-2 my-1 h-fit w-fit items-center justify-center rounded-full bg-purple-500/20 px-2.5 py-1.5 text-sm hover:bg-purple-500/30" > <span class="mr-1 rounded-full bg-purple-700 px-1.5 py-1 text-xs font-bold uppercase"> new </span> Browse community tools ({page.data.communityToolCount ?? 0}) </a> {/if} {#each tools as tool} {@const isChecked = $settings?.tools?.includes(tool._id)} <div class="flex items-center gap-1.5"> {#if tool.type === "community"} <input type="checkbox" id={tool._id} checked={true} class="rounded-xs font-semibold accent-purple-500 hover:accent-purple-600" onclick={async (e) => { e.preventDefault(); e.stopPropagation(); await settings.instantSet({ tools: $settings?.tools?.filter((t) => t !== tool._id) ?? [], }); }} /> {:else} <input type="checkbox" id={tool._id} checked={isChecked} disabled={loading} onclick={async (e) => { e.preventDefault(); e.stopPropagation(); if (isChecked) { await settings.instantSet({ tools: ($settings?.tools ?? []).filter((t) => t !== tool._id), }); } else { await settings.instantSet({ tools: [...($settings?.tools ?? []), tool._id], }); } }} /> {/if} <label class="cursor-pointer" for={tool._id}>{tool.displayName}</label> {#if tool.type === "community"} <a href="{base}/tools/{tool._id}" class="text-purple-600 hover:text-purple-700"> <CarbonGlobe /> </a> {/if} </div> {/each} </div> </div> </details> <style> details summary::-webkit-details-marker { display: none; } </style>
chat-ui/src/lib/components/ToolsMenu.svelte/0
{ "file_path": "chat-ui/src/lib/components/ToolsMenu.svelte", "repo_id": "chat-ui", "token_count": 2098 }
77
<script lang="ts"> import { MessageToolUpdateType, type MessageToolUpdate } from "$lib/types/MessageUpdate"; import { isMessageToolCallUpdate, isMessageToolErrorUpdate, isMessageToolResultUpdate, } from "$lib/utils/messageUpdates"; import CarbonTools from "~icons/carbon/tools"; import { ToolResultStatus, type ToolFront } from "$lib/types/Tool"; import { page } from "$app/state"; import { onDestroy } from "svelte"; import { browser } from "$app/environment"; interface Props { tool: MessageToolUpdate[]; loading?: boolean; } let { tool, loading = false }: Props = $props(); const toolFnName = tool.find(isMessageToolCallUpdate)?.call.name; let toolError = $derived(tool.some(isMessageToolErrorUpdate)); let toolDone = $derived(tool.some(isMessageToolResultUpdate)); let eta = $derived(tool.find((el) => el.subtype === MessageToolUpdateType.ETA)?.eta); const availableTools: ToolFront[] = page.data.tools; let loadingBarEl: HTMLDivElement | undefined = $state(); let animation: Animation | undefined = $state(undefined); let isShowingLoadingBar = $state(false); $effect(() => { !toolError && !toolDone && loading && loadingBarEl && eta && (() => { loadingBarEl.classList.remove("hidden"); isShowingLoadingBar = true; animation = loadingBarEl.animate([{ width: "0%" }, { width: "calc(100%+1rem)" }], { duration: eta * 1000, fill: "forwards", }); })(); }); onDestroy(() => { if (animation) { animation.cancel(); } }); // go to 100% quickly if loading is done $effect(() => { (!loading || toolDone || toolError) && browser && loadingBarEl && isShowingLoadingBar && (() => { isShowingLoadingBar = false; loadingBarEl.classList.remove("hidden"); animation?.cancel(); animation = loadingBarEl.animate( [{ width: loadingBarEl.style.width }, { width: "calc(100%+1rem)" }], { duration: 300, fill: "forwards", } ); setTimeout(() => { loadingBarEl?.classList.add("hidden"); }, 300); })(); }); </script> {#if toolFnName && toolFnName !== "websearch"} <details class="group/tool my-2.5 w-fit cursor-pointer rounded-lg border border-gray-200 bg-white pl-1 pr-2.5 text-sm shadow-sm transition-all open:mb-3 open:border-purple-500/10 open:bg-purple-600/5 open:shadow-sm dark:border-gray-800 dark:bg-gray-900 open:dark:border-purple-800/40 open:dark:bg-purple-800/10" > <summary class="relative flex select-none list-none items-center gap-1.5 py-1 group-open/tool:text-purple-700 group-open/tool:dark:text-purple-300" > <div bind:this={loadingBarEl} class="absolute -m-1 hidden h-full w-[calc(100%+1rem)] rounded-lg bg-purple-500/5 transition-all dark:bg-purple-500/10" ></div> <div class="relative grid size-[22px] place-items-center rounded bg-purple-600/10 dark:bg-purple-600/20" > <svg class="absolute inset-0 text-purple-500/40 transition-opacity" class:invisible={toolDone || toolError} width="22" height="22" viewBox="0 0 38 38" fill="none" xmlns="http://www.w3.org/2000/svg" > <path class="loading-path" d="M8 2.5H30C30 2.5 35.5 2.5 35.5 8V30C35.5 30 35.5 35.5 30 35.5H8C8 35.5 2.5 35.5 2.5 30V8C2.5 8 2.5 2.5 8 2.5Z" stroke="currentColor" stroke-width="1" stroke-linecap="round" id="shape" /> </svg> <CarbonTools class="text-xs text-purple-700 dark:text-purple-500" /> </div> <span> {toolError ? "Error calling" : toolDone ? "Called" : "Calling"} tool <span class="font-semibold" >{availableTools.find((tool) => tool.name === toolFnName)?.displayName ?? toolFnName}</span > </span> </summary> {#each tool as toolUpdate} {#if toolUpdate.subtype === MessageToolUpdateType.Call} <div class="mt-1 flex items-center gap-2 opacity-80"> <h3 class="text-sm">Parameters</h3> <div class="h-px flex-1 bg-gradient-to-r from-gray-500/20"></div> </div> <ul class="py-1 text-sm"> {#each Object.entries(toolUpdate.call.parameters ?? {}) as [k, v]} {#if v !== null} <li> <span class="font-semibold">{k}</span>: <span>{v}</span> </li> {/if} {/each} </ul> {:else if toolUpdate.subtype === MessageToolUpdateType.Error} <div class="mt-1 flex items-center gap-2 opacity-80"> <h3 class="text-sm">Error</h3> <div class="h-px flex-1 bg-gradient-to-r from-gray-500/20"></div> </div> <p class="text-sm">{toolUpdate.message}</p> {:else if isMessageToolResultUpdate(toolUpdate) && toolUpdate.result.status === ToolResultStatus.Success && toolUpdate.result.display} <div class="mt-1 flex items-center gap-2 opacity-80"> <h3 class="text-sm">Result</h3> <div class="h-px flex-1 bg-gradient-to-r from-gray-500/20"></div> </div> <ul class="py-1 text-sm"> {#each toolUpdate.result.outputs as output} {#each Object.entries(output) as [k, v]} {#if v !== null} <li> <span class="font-semibold">{k}</span>: <span>{v}</span> </li> {/if} {/each} {/each} </ul> {/if} {/each} </details> {/if} <style> details summary::-webkit-details-marker { display: none; } .loading-path { stroke-dasharray: 61.45; animation: loading 2s linear infinite; } </style>
chat-ui/src/lib/components/chat/ToolUpdate.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ToolUpdate.svelte", "repo_id": "chat-ui", "token_count": 2368 }
78
export const CONV_NUM_PER_PAGE = 30;
chat-ui/src/lib/constants/pagination.ts/0
{ "file_path": "chat-ui/src/lib/constants/pagination.ts", "repo_id": "chat-ui", "token_count": 15 }
79
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { Collection, FindCursor, ObjectId } from "mongodb"; import { logger } from "$lib/server/logger"; import type { Conversation } from "$lib/types/Conversation"; const BATCH_SIZE = 1000; const DELETE_THRESHOLD_MS = 60 * 60 * 1000; async function deleteBatch(conversations: Collection<Conversation>, ids: ObjectId[]) { if (ids.length === 0) return 0; const deleteResult = await conversations.deleteMany({ _id: { $in: ids } }); return deleteResult.deletedCount; } async function processCursor<T>( cursor: FindCursor<T>, processBatchFn: (batch: T[]) => Promise<void> ) { let batch = []; while (await cursor.hasNext()) { const doc = await cursor.next(); if (doc) { batch.push(doc); } if (batch.length >= BATCH_SIZE) { await processBatchFn(batch); batch = []; } } if (batch.length > 0) { await processBatchFn(batch); } } export async function deleteConversations( collections: typeof import("$lib/server/database").collections ) { let deleteCount = 0; const { conversations, sessions } = collections; // First criteria: Delete conversations with no user/assistant messages older than 1 hour const emptyConvCursor = conversations .find({ "messages.from": { $not: { $in: ["user", "assistant"] } }, createdAt: { $lt: new Date(Date.now() - DELETE_THRESHOLD_MS) }, }) .batchSize(BATCH_SIZE); await processCursor(emptyConvCursor, async (batch) => { const ids = batch.map((doc) => doc._id); deleteCount += await deleteBatch(conversations, ids); }); // Second criteria: Process conversations without users in batches and check sessions const noUserCursor = conversations.find({ userId: { $exists: false } }).batchSize(BATCH_SIZE); await processCursor(noUserCursor, async (batch) => { const sessionIds = [ ...new Set(batch.map((conv) => conv.sessionId).filter((id): id is string => !!id)), ]; const existingSessions = await sessions.find({ sessionId: { $in: sessionIds } }).toArray(); const validSessionIds = new Set(existingSessions.map((s) => s.sessionId)); const invalidConvs = batch.filter( (conv) => !conv.sessionId || !validSessionIds.has(conv.sessionId) ); const idsToDelete = invalidConvs.map((conv) => conv._id); deleteCount += await deleteBatch(conversations, idsToDelete); }); logger.info(`[MIGRATIONS] Deleted ${deleteCount} conversations in total.`); return deleteCount; } const deleteEmptyConversations: Migration = { _id: new ObjectId("000000000000000000000009"), name: "Delete conversations with no user or assistant messages or valid sessions", up: async () => { await deleteConversations(collections); return true; }, runEveryTime: false, runForHuggingChat: "only", }; export default deleteEmptyConversations;
chat-ui/src/lib/migrations/routines/09-delete-empty-conversations.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/09-delete-empty-conversations.ts", "repo_id": "chat-ui", "token_count": 950 }
80
import { z } from "zod"; import { embeddingEndpointTei, embeddingEndpointTeiParametersSchema, } from "./tei/embeddingEndpoints"; import { embeddingEndpointTransformersJS, embeddingEndpointTransformersJSParametersSchema, } from "./transformersjs/embeddingEndpoints"; import { embeddingEndpointOpenAI, embeddingEndpointOpenAIParametersSchema, } from "./openai/embeddingEndpoints"; import { embeddingEndpointHfApi, embeddingEndpointHfApiSchema } from "./hfApi/embeddingHfApi"; // parameters passed when generating text interface EmbeddingEndpointParameters { inputs: string[]; } export type Embedding = number[]; // type signature for the endpoint export type EmbeddingEndpoint = (params: EmbeddingEndpointParameters) => Promise<Embedding[]>; export const embeddingEndpointSchema = z.discriminatedUnion("type", [ embeddingEndpointTeiParametersSchema, embeddingEndpointTransformersJSParametersSchema, embeddingEndpointOpenAIParametersSchema, embeddingEndpointHfApiSchema, ]); type EmbeddingEndpointTypeOptions = z.infer<typeof embeddingEndpointSchema>["type"]; // generator function that takes in type discrimantor value for defining the endpoint and return the endpoint export type EmbeddingEndpointGenerator<T extends EmbeddingEndpointTypeOptions> = ( inputs: Extract<z.infer<typeof embeddingEndpointSchema>, { type: T }> ) => EmbeddingEndpoint | Promise<EmbeddingEndpoint>; // list of all endpoint generators export const embeddingEndpoints: { [Key in EmbeddingEndpointTypeOptions]: EmbeddingEndpointGenerator<Key>; } = { tei: embeddingEndpointTei, transformersjs: embeddingEndpointTransformersJS, openai: embeddingEndpointOpenAI, hfapi: embeddingEndpointHfApi, }; export default embeddingEndpoints;
chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 544 }
81
import { VertexAI, HarmCategory, HarmBlockThreshold, type Content, type TextPart, } from "@google-cloud/vertexai"; import type { Endpoint, TextGenerationStreamOutputWithToolsAndWebSources } from "../endpoints"; import { z } from "zod"; import type { Message } from "$lib/types/Message"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import { createDocumentProcessorOptionsValidator, makeDocumentProcessor } from "../document"; export const endpointVertexParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), // allow optional and validate against emptiness type: z.literal("vertex"), location: z.string().default("europe-west1"), extraBody: z.object({ model_version: z.string() }).optional(), project: z.string(), apiEndpoint: z.string().optional(), safetyThreshold: z .enum([ HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED, HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, HarmBlockThreshold.BLOCK_NONE, HarmBlockThreshold.BLOCK_ONLY_HIGH, ]) .optional(), tools: z.array(z.any()).optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: [ "image/png", "image/jpeg", "image/webp", "image/avif", "image/tiff", "image/gif", ], preferredMimeType: "image/webp", maxSizeInMB: 20, maxWidth: 4096, maxHeight: 4096, }), document: createDocumentProcessorOptionsValidator({ supportedMimeTypes: ["application/pdf", "text/plain"], maxSizeInMB: 20, }), }) .default({}), }); export function endpointVertex(input: z.input<typeof endpointVertexParametersSchema>): Endpoint { const { project, location, model, apiEndpoint, safetyThreshold, tools, multimodal, extraBody } = endpointVertexParametersSchema.parse(input); const vertex_ai = new VertexAI({ project, location, apiEndpoint, }); return async ({ messages, preprompt, generateSettings }) => { const parameters = { ...model.parameters, ...generateSettings }; const hasFiles = messages.some((message) => message.files && message.files.length > 0); const generativeModel = vertex_ai.getGenerativeModel({ model: extraBody?.model_version ?? model.id ?? model.name, safetySettings: safetyThreshold ? [ { category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_UNSPECIFIED, threshold: safetyThreshold, }, ] : undefined, generationConfig: { maxOutputTokens: parameters?.max_new_tokens ?? 4096, stopSequences: parameters?.stop, temperature: parameters?.temperature ?? 1, }, // tools and multimodal are mutually exclusive tools: !hasFiles ? tools : undefined, }); // Preprompt is the same as the first system message. let systemMessage = preprompt; if (messages[0].from === "system") { systemMessage = messages[0].content; messages.shift(); } const vertexMessages = await Promise.all( messages.map(async ({ from, content, files }: Omit<Message, "id">): Promise<Content> => { const imageProcessor = makeImageProcessor(multimodal.image); const documentProcessor = makeDocumentProcessor(multimodal.document); const processedFilesWithNull = files && files.length > 0 ? await Promise.all( files.map(async (file) => { if (file.mime.includes("image")) { const { image, mime } = await imageProcessor(file); return { file: image, mime }; } else if (file.mime === "application/pdf" || file.mime === "text/plain") { return documentProcessor(file); } return null; }) ) : []; const processedFiles = processedFilesWithNull.filter((file) => file !== null); return { role: from === "user" ? "user" : "model", parts: [ ...processedFiles.map((processedFile) => ({ inlineData: { data: processedFile.file.toString("base64"), mimeType: processedFile.mime, }, })), { text: content, }, ], }; }) ); const result = await generativeModel.generateContentStream({ contents: vertexMessages, systemInstruction: systemMessage ? { role: "system", parts: [ { text: systemMessage, }, ], } : undefined, }); let tokenId = 0; return (async function* () { let generatedText = ""; const webSources = []; for await (const data of result.stream) { if (!data?.candidates?.length) break; // Handle case where no candidates are present const candidate = data.candidates[0]; if (!candidate.content?.parts?.length) continue; // Skip if no parts are present const firstPart = candidate.content.parts.find((part) => "text" in part) as | TextPart | undefined; if (!firstPart) continue; // Skip if no text part is found const isLastChunk = !!candidate.finishReason; const candidateWebSources = candidate.groundingMetadata?.groundingChunks ?.map((chunk) => { const uri = chunk.web?.uri ?? chunk.retrievedContext?.uri; const title = chunk.web?.title ?? chunk.retrievedContext?.title; if (!uri || !title) { return null; } return { uri, title, }; }) .filter((source) => source !== null); if (candidateWebSources) { webSources.push(...candidateWebSources); } const content = firstPart.text; generatedText += content; const output: TextGenerationStreamOutputWithToolsAndWebSources = { token: { id: tokenId++, text: content, logprob: 0, special: isLastChunk, }, generated_text: isLastChunk ? generatedText : null, details: null, webSources, }; yield output; if (isLastChunk) break; } })(); }; } export default endpointVertex;
chat-ui/src/lib/server/endpoints/google/endpointVertex.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/google/endpointVertex.ts", "repo_id": "chat-ui", "token_count": 2595 }
82
import { dot } from "@huggingface/transformers"; import type { EmbeddingBackendModel } from "$lib/server/embeddingModels"; import type { Embedding } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; // see here: https://github.com/nmslib/hnswlib/blob/359b2ba87358224963986f709e593d799064ace6/README.md?plain=1#L34 export function innerProduct(embeddingA: Embedding, embeddingB: Embedding) { return 1.0 - dot(embeddingA, embeddingB); } export async function getSentenceSimilarity( embeddingModel: EmbeddingBackendModel, query: string, sentences: string[] ): Promise<{ distance: number; embedding: Embedding; idx: number }[]> { const inputs = [ `${embeddingModel.preQuery}${query}`, ...sentences.map((sentence) => `${embeddingModel.prePassage}${sentence}`), ]; const embeddingEndpoint = await embeddingModel.getEndpoint(); const output = await embeddingEndpoint({ inputs }).catch((err) => { throw Error("Failed to generate embeddings for sentence similarity", { cause: err }); }); const queryEmbedding: Embedding = output[0]; const sentencesEmbeddings: Embedding[] = output.slice(1); return sentencesEmbeddings.map((sentenceEmbedding, idx) => ({ distance: innerProduct(queryEmbedding, sentenceEmbedding), embedding: sentenceEmbedding, idx, })); }
chat-ui/src/lib/server/sentenceSimilarity.ts/0
{ "file_path": "chat-ui/src/lib/server/sentenceSimilarity.ts", "repo_id": "chat-ui", "token_count": 433 }
83
import { z } from "zod"; import { config } from "$lib/server/config"; import JSON5 from "json5"; // RATE_LIMIT is the legacy way to define messages per minute limit export const usageLimitsSchema = z .object({ conversations: z.coerce.number().optional(), // how many conversations messages: z.coerce.number().optional(), // how many messages in a conversation assistants: z.coerce.number().optional(), // how many assistants messageLength: z.coerce.number().optional(), // how long can a message be before we cut it off messagesPerMinute: z .preprocess((val) => { if (val === undefined) { return config.RATE_LIMIT; } return val; }, z.coerce.number().optional()) .optional(), // how many messages per minute tools: z.coerce.number().optional(), // how many tools }) .optional(); export const usageLimits = usageLimitsSchema.parse(JSON5.parse(config.USAGE_LIMITS));
chat-ui/src/lib/server/usageLimits.ts/0
{ "file_path": "chat-ui/src/lib/server/usageLimits.ts", "repo_id": "chat-ui", "token_count": 308 }
84
import type { WebSearchSource } from "$lib/types/WebSearch"; import { config } from "$lib/server/config"; export default async function search(query: string): Promise<WebSearchSource[]> { // const params = { // q: query, // // You can add other parameters if needed, like 'count', 'offset', etc. // }; const response = await fetch( "https://api.bing.microsoft.com/v7.0/search" + "?q=" + encodeURIComponent(query), { method: "GET", headers: { "Ocp-Apim-Subscription-Key": config.BING_SUBSCRIPTION_KEY, "Content-type": "application/json", }, } ); /* eslint-disable @typescript-eslint/no-explicit-any */ const data = (await response.json()) as Record<string, any>; if (!response.ok) { throw new Error( data["message"] ?? `Bing API returned error code ${response.status} - ${response.statusText}` ); } // Adapt the data structure from the Bing response to match the WebSearchSource type const webPages = data["webPages"]?.["value"] ?? []; return webPages.map((page: any) => ({ title: page.name, link: page.url, text: page.snippet, displayLink: page.displayUrl, })); }
chat-ui/src/lib/server/websearch/search/endpoints/bing.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/bing.ts", "repo_id": "chat-ui", "token_count": 407 }
85
import { browser } from "$app/environment"; import { invalidate } from "$app/navigation"; import { base } from "$app/paths"; import { UrlDependency } from "$lib/types/UrlDependency"; import type { ObjectId } from "mongodb"; import { getContext, setContext } from "svelte"; import { type Writable, writable, get } from "svelte/store"; type SettingsStore = { shareConversationsWithModelAuthors: boolean; hideEmojiOnSidebar: boolean; ethicsModalAccepted: boolean; ethicsModalAcceptedAt: Date | null; activeModel: string; customPrompts: Record<string, string>; recentlySaved: boolean; assistants: Array<ObjectId | string>; tools?: Array<string>; disableStream: boolean; directPaste: boolean; }; type SettingsStoreWritable = Writable<SettingsStore> & { instantSet: (settings: Partial<SettingsStore>) => Promise<void>; }; export function useSettingsStore() { return getContext<SettingsStoreWritable>("settings"); } export function createSettingsStore(initialValue: Omit<SettingsStore, "recentlySaved">) { const baseStore = writable({ ...initialValue, recentlySaved: false }); let timeoutId: NodeJS.Timeout; async function setSettings(settings: Partial<SettingsStore>) { baseStore.update((s) => ({ ...s, ...settings, })); if (browser) { clearTimeout(timeoutId); timeoutId = setTimeout(async () => { await fetch(`${base}/settings`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ ...get(baseStore), ...settings, }), }); invalidate(UrlDependency.ConversationList); // set savedRecently to true for 3s baseStore.update((s) => ({ ...s, recentlySaved: true, })); setTimeout(() => { baseStore.update((s) => ({ ...s, recentlySaved: false, })); }, 3000); }, 300); // debounce server calls by 300ms } } async function instantSet(settings: Partial<SettingsStore>) { baseStore.update((s) => ({ ...s, ...settings, })); if (browser) { await fetch(`${base}/settings`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ ...get(baseStore), ...settings, }), }); invalidate(UrlDependency.ConversationList); } } const newStore = { subscribe: baseStore.subscribe, set: setSettings, instantSet, update: (fn: (s: SettingsStore) => SettingsStore) => { setSettings(fn(get(baseStore))); }, } satisfies SettingsStoreWritable; setContext("settings", newStore); return newStore; }
chat-ui/src/lib/stores/settings.ts/0
{ "file_path": "chat-ui/src/lib/stores/settings.ts", "repo_id": "chat-ui", "token_count": 989 }
86
import type { ObjectId } from "mongodb"; import type { User } from "./User"; import type { Assistant } from "./Assistant"; import type { Timestamps } from "./Timestamps"; export interface Report extends Timestamps { _id: ObjectId; createdBy: User["_id"] | string; object: "assistant" | "tool"; contentId: Assistant["_id"]; reason?: string; }
chat-ui/src/lib/types/Report.ts/0
{ "file_path": "chat-ui/src/lib/types/Report.ts", "repo_id": "chat-ui", "token_count": 112 }
87
/** * A debounce function that works in both browser and Nodejs. * For pure Nodejs work, prefer the `Debouncer` class. */ export function debounce<T extends unknown[]>( callback: (...rest: T) => unknown, limit: number ): (...rest: T) => void { let timer: ReturnType<typeof setTimeout>; return function (...rest) { clearTimeout(timer); timer = setTimeout(() => { callback(...rest); }, limit); }; }
chat-ui/src/lib/utils/debounce.ts/0
{ "file_path": "chat-ui/src/lib/utils/debounce.ts", "repo_id": "chat-ui", "token_count": 138 }
88
export function parseStringToList(links: unknown): string[] { if (typeof links !== "string") { throw new Error("Expected a string"); } return links .split(",") .map((link) => link.trim()) .filter((link) => link.length > 0); }
chat-ui/src/lib/utils/parseStringToList.ts/0
{ "file_path": "chat-ui/src/lib/utils/parseStringToList.ts", "repo_id": "chat-ui", "token_count": 86 }
89
import { v4 } from "uuid"; import type { Tree, TreeId, NewNode, TreeNode } from "./tree"; export function addSibling<T>(conv: Tree<T>, message: NewNode<T>, siblingId: TreeId): TreeId { if (conv.messages.length === 0) { throw new Error("Cannot add a sibling to an empty conversation"); } if (!conv.rootMessageId) { throw new Error("Cannot add a sibling to a legacy conversation"); } const sibling = conv.messages.find((m) => m.id === siblingId); if (!sibling) { throw new Error("The sibling message doesn't exist"); } if (!sibling.ancestors || sibling.ancestors?.length === 0) { throw new Error("The sibling message is the root message, therefore we can't add a sibling"); } const messageId = v4(); conv.messages.push({ ...message, id: messageId, ancestors: sibling.ancestors, children: [], } as TreeNode<T>); const nearestAncestorId = sibling.ancestors[sibling.ancestors.length - 1]; const nearestAncestor = conv.messages.find((m) => m.id === nearestAncestorId); if (nearestAncestor) { if (nearestAncestor.children) { nearestAncestor.children.push(messageId); } else nearestAncestor.children = [messageId]; } return messageId; }
chat-ui/src/lib/utils/tree/addSibling.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/addSibling.ts", "repo_id": "chat-ui", "token_count": 409 }
90
import { json } from "@sveltejs/kit"; import { logger } from "$lib/server/logger"; import { computeAllStats } from "$lib/jobs/refresh-conversation-stats"; // Triger like this: // curl -X POST "http://localhost:5173/chat/admin/stats/compute" -H "Authorization: Bearer <ADMIN_API_SECRET>" export async function POST() { computeAllStats().catch((e) => logger.error(e)); return json( { message: "Stats job started", }, { status: 202 } ); }
chat-ui/src/routes/admin/stats/compute/+server.ts/0
{ "file_path": "chat-ui/src/routes/admin/stats/compute/+server.ts", "repo_id": "chat-ui", "token_count": 161 }
91
import { config } from "$lib/server/config"; import { collections } from "$lib/server/database.js"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import type { BaseTool, CommunityToolDB } from "$lib/types/Tool.js"; import { generateQueryTokens, generateSearchTokens } from "$lib/utils/searchTokens.js"; import type { Filter } from "mongodb"; import { ReviewStatus } from "$lib/types/Review"; export async function GET({ url }) { if (config.COMMUNITY_TOOLS !== "true") { return new Response("Community tools are not enabled", { status: 403 }); } const query = url.searchParams.get("q")?.trim() ?? null; const queryTokens = !!query && generateQueryTokens(query); const filter: Filter<CommunityToolDB> = { ...(queryTokens && { searchTokens: { $all: queryTokens } }), review: ReviewStatus.APPROVED, }; const matchingCommunityTools = await collections.tools .find(filter) .project<Pick<BaseTool, "_id" | "displayName" | "color" | "icon">>({ _id: 1, displayName: 1, color: 1, icon: 1, createdByName: 1, }) .sort({ useCount: -1 }) .limit(5) .toArray(); const matchingConfigTools = toolFromConfigs .filter((tool) => !tool?.isHidden) .filter((tool) => tool.name !== "websearch") // filter out websearch tool from config tools since its added separately .filter((tool) => { if (queryTokens) { return generateSearchTokens(tool.displayName).some((token) => queryTokens.some((queryToken) => queryToken.test(token)) ); } return true; }) .map((tool) => ({ _id: tool._id, displayName: tool.displayName, color: tool.color, icon: tool.icon, createdByName: undefined, })); const tools = [...matchingConfigTools, ...matchingCommunityTools] satisfies Array< Pick<BaseTool, "_id" | "displayName" | "color" | "icon"> & { createdByName?: string } >; return Response.json(tools.map((tool) => ({ ...tool, _id: tool._id.toString() })).slice(0, 5)); }
chat-ui/src/routes/api/tools/search/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/tools/search/+server.ts", "repo_id": "chat-ui", "token_count": 682 }
92
import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { MetricsServer } from "$lib/server/metrics.js"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; export async function POST({ params, request, locals }) { const { score } = z .object({ score: z.number().int().min(-1).max(1), }) .parse(await request.json()); const conversationId = new ObjectId(params.id); const messageId = params.messageId; // aggregate votes per model in order to detect model performance degradation const model = await collections.conversations .findOne( { _id: conversationId, ...authCondition(locals), }, { projection: { model: 1 } } ) .then((c) => c?.model); if (model) { if (score === 1) { MetricsServer.getMetrics().model.votesPositive.inc({ model }); } else { MetricsServer.getMetrics().model.votesNegative.inc({ model }); } } const document = await collections.conversations.updateOne( { _id: conversationId, ...authCondition(locals), "messages.id": messageId, }, { ...(score !== 0 ? { $set: { "messages.$.score": score, }, } : { $unset: { "messages.$.score": "" } }), } ); if (!document.matchedCount) { error(404, "Message not found"); } return new Response(); }
chat-ui/src/routes/conversation/[id]/message/[messageId]/vote/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/message/[messageId]/vote/+server.ts", "repo_id": "chat-ui", "token_count": 524 }
93
import { redirect, type LoadEvent } from "@sveltejs/kit"; export const load = async ({ params, url }: LoadEvent) => { const leafId = url.searchParams.get("leafId"); redirect(302, "../conversation/" + params.id + `?leafId=${leafId}`); };
chat-ui/src/routes/r/[id]/+page.ts/0
{ "file_path": "chat-ui/src/routes/r/[id]/+page.ts", "repo_id": "chat-ui", "token_count": 84 }
94
import { base } from "$app/paths"; import { redirect } from "@sveltejs/kit"; export async function load({ parent }) { const { enableCommunityTools } = await parent(); if (enableCommunityTools) { return {}; } redirect(302, `${base}/`); }
chat-ui/src/routes/tools/+layout.ts/0
{ "file_path": "chat-ui/src/routes/tools/+layout.ts", "repo_id": "chat-ui", "token_count": 82 }
95
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": "point", "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "quantitative", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "quantitative", "title": "<DVC_METRIC_Y_LABEL>", "scale": { "zero": false } }, "color": { "field": "rev", "type": "nominal" } } }
datasets/.dvc/plots/scatter.json/0
{ "file_path": "datasets/.dvc/plots/scatter.json", "repo_id": "datasets", "token_count": 402 }
96
cff-version: 1.2.0 message: "If you use this software, please cite it as below." title: "huggingface/datasets" authors: - family-names: Lhoest given-names: Quentin - family-names: Villanova del Moral given-names: Albert orcid: "https://orcid.org/0000-0003-1727-1045" - family-names: von Platen given-names: Patrick - family-names: Wolf given-names: Thomas - family-names: Šaško given-names: Mario - family-names: Jernite given-names: Yacine - family-names: Thakur given-names: Abhishek - family-names: Tunstall given-names: Lewis - family-names: Patil given-names: Suraj - family-names: Drame given-names: Mariama - family-names: Chaumond given-names: Julien - family-names: Plu given-names: Julien - family-names: Davison given-names: Joe - family-names: Brandeis given-names: Simon - family-names: Sanh given-names: Victor - family-names: Le Scao given-names: Teven - family-names: Canwen Xu given-names: Kevin - family-names: Patry given-names: Nicolas - family-names: Liu given-names: Steven - family-names: McMillan-Major given-names: Angelina - family-names: Schmid given-names: Philipp - family-names: Gugger given-names: Sylvain - family-names: Raw given-names: Nathan - family-names: Lesage given-names: Sylvain - family-names: Lozhkov given-names: Anton - family-names: Carrigan given-names: Matthew - family-names: Matussière given-names: Théo - family-names: von Werra given-names: Leandro - family-names: Debut given-names: Lysandre - family-names: Bekman given-names: Stas - family-names: Delangue given-names: Clément doi: 10.5281/zenodo.4817768 repository-code: "https://github.com/huggingface/datasets" license: Apache-2.0 preferred-citation: type: conference-paper title: "Datasets: A Community Library for Natural Language Processing" authors: - family-names: Lhoest given-names: Quentin - family-names: Villanova del Moral given-names: Albert orcid: "https://orcid.org/0000-0003-1727-1045" - family-names: von Platen given-names: Patrick - family-names: Wolf given-names: Thomas - family-names: Šaško given-names: Mario - family-names: Jernite given-names: Yacine - family-names: Thakur given-names: Abhishek - family-names: Tunstall given-names: Lewis - family-names: Patil given-names: Suraj - family-names: Drame given-names: Mariama - family-names: Chaumond given-names: Julien - family-names: Plu given-names: Julien - family-names: Davison given-names: Joe - family-names: Brandeis given-names: Simon - family-names: Sanh given-names: Victor - family-names: Le Scao given-names: Teven - family-names: Canwen Xu given-names: Kevin - family-names: Patry given-names: Nicolas - family-names: Liu given-names: Steven - family-names: McMillan-Major given-names: Angelina - family-names: Schmid given-names: Philipp - family-names: Gugger given-names: Sylvain - family-names: Raw given-names: Nathan - family-names: Lesage given-names: Sylvain - family-names: Lozhkov given-names: Anton - family-names: Carrigan given-names: Matthew - family-names: Matussière given-names: Théo - family-names: von Werra given-names: Leandro - family-names: Debut given-names: Lysandre - family-names: Bekman given-names: Stas - family-names: Delangue given-names: Clément collection-title: "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations" collection-type: proceedings month: 11 year: 2021 publisher: name: "Association for Computational Linguistics" url: "https://aclanthology.org/2021.emnlp-demo.21" start: 175 end: 184 identifiers: - type: other value: "arXiv:2109.02846" description: "The arXiv preprint of the paper"
datasets/CITATION.cff/0
{ "file_path": "datasets/CITATION.cff", "repo_id": "datasets", "token_count": 1428 }
97
# Load audio data You can load an audio dataset using the [`Audio`] feature that automatically decodes and resamples the audio files when you access the examples. Audio decoding is based on the [`soundfile`](https://github.com/bastibe/python-soundfile) python package, which uses the [`libsndfile`](https://github.com/libsndfile/libsndfile) C library under the hood. ## Installation To work with audio datasets, you need to have the `audio` dependencies installed. Check out the [installation](./installation#audio) guide to learn how to install it. ## Local files You can load your own dataset using the paths to your audio files. Use the [`~Dataset.cast_column`] function to take a column of audio file paths, and cast it to the [`Audio`] feature: ```py >>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]}).cast_column("audio", Audio()) >>> audio_dataset[0]["audio"] <datasets.features._torchcodec.AudioDecoder object at 0x11642b6a0> ``` ## AudioFolder You can also load a dataset with an `AudioFolder` dataset builder. It does not require writing a custom dataloader, making it useful for quickly creating and loading audio datasets with several thousand audio files. ## AudioFolder with metadata To link your audio files with metadata information, make sure your dataset has a `metadata.csv` file. Your dataset structure might look like: ``` folder/train/metadata.csv folder/train/first_audio_file.mp3 folder/train/second_audio_file.mp3 folder/train/third_audio_file.mp3 ``` Your `metadata.csv` file must have a `file_name` column which links audio files with their metadata. An example `metadata.csv` file might look like: ```text file_name,transcription first_audio_file.mp3,znowu się duch z ciałem zrośnie w młodocianej wstaniesz wiosnie i możesz skutkiem tych leków umierać wstawać wiek wieków dalej tam były przestrogi jak siekać głowę jak nogi second_audio_file.mp3,już u źwierzyńca podwojów król zasiada przy nim książęta i panowie rada a gdzie wzniosły krążył ganek rycerze obok kochanek król skinął palcem zaczęto igrzysko third_audio_file.mp3,pewnie kędyś w obłędzie ubite minęły szlaki zaczekajmy dzień jaki poślemy szukać wszędzie dziś jutro pewnie będzie posłali wszędzie sługi czekali dzień i drugi gdy nic nie doczekali z płaczem chcą jechać dali ``` `AudioFolder` will load audio data and create a `transcription` column containing texts from `metadata.csv`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("username/dataset_name") >>> # OR locally: >>> dataset = load_dataset("/path/to/folder") ``` For local datasets, this is equivalent to passing `audiofolder` manually in [`load_dataset`] and the directory in `data_dir`: ```py >>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder") ``` Metadata can also be specified as JSON Lines, in which case use `metadata.jsonl` as the name of the metadata file. This format is helpful in scenarios when one of the columns is complex, e.g. a list of floats, to avoid parsing errors or reading the complex values as strings. To ignore the information in the metadata file, set `drop_metadata=True` in [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("username/dataset_with_metadata", drop_metadata=True) ``` If you don't have a metadata file, `AudioFolder` automatically infers the label name from the directory name. If you want to drop automatically created labels, set `drop_labels=True`. In this case, your dataset will only contain an audio column: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("username/dataset_without_metadata", drop_labels=True) ``` Finally the `filters` argument lets you load only a subset of the dataset, based on a condition on the label or the metadata. This is especially useful if the metadata is in Parquet format, since this format enables fast filtering. It is also recommended to use this argument with `streaming=True`, because by default the dataset is fully downloaded before filtering. ```python >>> filters = [("label", "=", 0)] >>> dataset = load_dataset("username/dataset_name", streaming=True, filters=filters) ``` <Tip> For more information about creating your own `AudioFolder` dataset, take a look at the [Create an audio dataset](./audio_dataset) guide. </Tip> For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>. ## Audio decoding By default, audio files are decoded sequentially as torchcodec [`AudioDecoder`](https://docs.pytorch.org/torchcodec/stable/generated/torchcodec.decoders.AudioDecoder.html#torchcodec.decoders.AudioDecoder) objects when you iterate on a dataset. However it is possible to speed up the dataset significantly using multithreaded decoding: ```python >>> import os >>> num_threads = num_threads = min(32, (os.cpu_count() or 1) + 4) >>> dataset = dataset.decode(num_threads=num_threads) >>> for example in dataset: # up to 20 times faster ! ... ... ``` You can enable multithreading using `num_threads`. This is especially useful to speed up remote data streaming. However it can be slower than `num_threads=0` for local data on fast disks. If you are not interested in the images decoded as NumPy arrays and would like to access the path/bytes instead, you can disable decoding: ```python >>> dataset = dataset.decode(False) ``` Note: [`IterableDataset.decode`] is only available for streaming datasets at the moment.
datasets/docs/source/audio_load.mdx/0
{ "file_path": "datasets/docs/source/audio_load.mdx", "repo_id": "datasets", "token_count": 1760 }
98
# Structure your repository To host and share your dataset, create a dataset repository on the Hugging Face Hub and upload your data files. This guide will show you how to structure your dataset repository when you upload it. A dataset with a supported structure and file format (`.txt`, `.csv`, `.parquet`, `.jsonl`, `.mp3`, `.jpg`, `.zip` etc.) are loaded automatically with [`~datasets.load_dataset`], and it'll have a dataset viewer on its dataset page on the Hub. ## Main use-case The simplest dataset structure has two files: `train.csv` and `test.csv` (this works with any supported file format). Your repository will also contain a `README.md` file, the [dataset card](dataset_card) displayed on your dataset page. ``` my_dataset_repository/ ├── README.md ├── train.csv └── test.csv ``` In this simple case, you'll get a dataset with two splits: `train` (containing examples from `train.csv`) and `test` (containing examples from `test.csv`). ## Define your splits and subsets in YAML ## Splits If you have multiple files and want to define which file goes into which split, you can use the YAML `configs` field at the top of your README.md. For example, given a repository like this one: ``` my_dataset_repository/ ├── README.md ├── data.csv └── holdout.csv ``` You can define your splits by adding the `configs` field in the YAML block at the top of your README.md: ```yaml --- configs: - config_name: default data_files: - split: train path: "data.csv" - split: test path: "holdout.csv" --- ``` You can select multiple files per split using a list of paths: ``` my_dataset_repository/ ├── README.md ├── data/ │ ├── abc.csv │ └── def.csv └── holdout/ └── ghi.csv ``` ```yaml --- configs: - config_name: default data_files: - split: train path: - "data/abc.csv" - "data/def.csv" - split: test path: "holdout/ghi.csv" --- ``` Or you can use glob patterns to automatically list all the files you need: ```yaml --- configs: - config_name: default data_files: - split: train path: "data/*.csv" - split: test path: "holdout/*.csv" --- ``` <Tip warning={true}> Note that `config_name` field is required even if you have a single configuration. </Tip> ## Configurations Your dataset might have several subsets of data that you want to be able to load separately. In that case you can define a list of configurations inside the `configs` field in YAML: ``` my_dataset_repository/ ├── README.md ├── main_data.csv └── additional_data.csv ``` ```yaml --- configs: - config_name: main_data data_files: "main_data.csv" - config_name: additional_data data_files: "additional_data.csv" --- ``` Each configuration is shown separately on the Hugging Face Hub, and can be loaded by passing its name as a second parameter: ```python from datasets import load_dataset main_data = load_dataset("my_dataset_repository", "main_data") additional_data = load_dataset("my_dataset_repository", "additional_data") ``` ## Builder parameters Not only `data_files`, but other builder-specific parameters can be passed via YAML, allowing for more flexibility on how to load the data while not requiring any custom code. For example, define which separator to use in which configuration to load your `csv` files: ```yaml --- configs: - config_name: tab data_files: "main_data.csv" sep: "\t" - config_name: comma data_files: "additional_data.csv" sep: "," --- ``` Refer to [specific builders' documentation](./package_reference/builder_classes) to see what configuration parameters they have. <Tip> You can set a default configuration using `default: true`, e.g. you can run `main_data = load_dataset("my_dataset_repository")` if you set ```yaml - config_name: main_data data_files: "main_data.csv" default: true ``` </Tip> ## Automatic splits detection If no YAML is provided, 🤗 Datasets searches for certain patterns in the dataset repository to automatically infer the dataset splits. There is an order to the patterns, beginning with the custom filename split format to treating all files as a single split if no pattern is found. ### Directory name Your data files may also be placed into different directories named `train`, `test`, and `validation` where each directory contains the data files for that split: ``` my_dataset_repository/ ├── README.md └── data/ ├── train/ │ └── bees.csv ├── test/ │ └── more_bees.csv └── validation/ └── even_more_bees.csv ``` ### Filename splits If you don't have any non-traditional splits, then you can place the split name anywhere in the data file and it is automatically inferred. The only rule is that the split name must be delimited by non-word characters, like `test-file.csv` for example instead of `testfile.csv`. Supported delimiters include underscores, dashes, spaces, dots, and numbers. For example, the following file names are all acceptable: - train split: `train.csv`, `my_train_file.csv`, `train1.csv` - validation split: `validation.csv`, `my_validation_file.csv`, `validation1.csv` - test split: `test.csv`, `my_test_file.csv`, `test1.csv` Here is an example where all the files are placed into a directory named `data`: ``` my_dataset_repository/ ├── README.md └── data/ ├── train.csv ├── test.csv └── validation.csv ``` ### Custom filename split If your dataset splits have custom names that aren't `train`, `test`, or `validation`, then you can name your data files like `data/<split_name>-xxxxx-of-xxxxx.csv`. Here is an example with three splits, `train`, `test`, and `random`: ``` my_dataset_repository/ ├── README.md └── data/ ├── train-00000-of-00003.csv ├── train-00001-of-00003.csv ├── train-00002-of-00003.csv ├── test-00000-of-00001.csv ├── random-00000-of-00003.csv ├── random-00001-of-00003.csv └── random-00002-of-00003.csv ``` ### Single split When 🤗 Datasets can't find any of the above patterns, then it'll treat all the files as a single train split. If your dataset splits aren't loading as expected, it may be due to an incorrect pattern. ### Split name keywords There are several ways to name splits. Validation splits are sometimes called "dev", and test splits may be referred to as "eval". These other split names are also supported, and the following keywords are equivalent: - train, training - validation, valid, val, dev - test, testing, eval, evaluation The structure below is a valid repository: ``` my_dataset_repository/ ├── README.md └── data/ ├── training.csv ├── eval.csv └── valid.csv ``` ### Multiple files per split If one of your splits comprises several files, 🤗 Datasets can still infer whether it is the train, validation, and test split from the file name. For example, if your train and test splits span several files: ``` my_dataset_repository/ ├── README.md ├── train_0.csv ├── train_1.csv ├── train_2.csv ├── train_3.csv ├── test_0.csv └── test_1.csv ``` Make sure all the files of your `train` set have *train* in their names (same for test and validation). Even if you add a prefix or suffix to `train` in the file name (like `my_train_file_00001.csv` for example), 🤗 Datasets can still infer the appropriate split. For convenience, you can also place your data files into different directories. In this case, the split name is inferred from the directory name. ``` my_dataset_repository/ ├── README.md └── data/ ├── train/ │ ├── shard_0.csv │ ├── shard_1.csv │ ├── shard_2.csv │ └── shard_3.csv └── test/ ├── shard_0.csv └── shard_1.csv ```
datasets/docs/source/repository_structure.mdx/0
{ "file_path": "datasets/docs/source/repository_structure.mdx", "repo_id": "datasets", "token_count": 2555 }
99
# Using Datasets with TensorFlow This document is a quick introduction to using `datasets` with TensorFlow, with a particular focus on how to get `tf.Tensor` objects out of our datasets, and how to stream data from Hugging Face `Dataset` objects to Keras methods like `model.fit()`. ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc. To get TensorFlow tensors instead, you can set the format of the dataset to `tf`: ```py >>> from datasets import Dataset >>> data = [[1, 2],[3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([1, 2])>} >>> ds[:2] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` <Tip> A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to TensorFlow tensors. </Tip> This can be useful for converting your dataset to a dict of `Tensor` objects, or for writing a generator to load TF samples from it. If you wish to convert the entire dataset to `Tensor`, simply query the full dataset: ```py >>> ds[:] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` ### N-dimensional arrays If your dataset consists of N-dimensional arrays, you will see that by default they are considered as the same tensor if the shape is fixed: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] # fixed shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` Otherwise, a TensorFlow formatted dataset outputs a `RaggedTensor` instead of a single tensor: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3]],[[4, 5, 6],[7, 8]]] # varying shape >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("torch") >>> ds[0] {'data': <tf.RaggedTensor [[1, 2], [3]]>} ``` However this logic often requires slow shape comparisons and data copies. To avoid this, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: ```py >>> from datasets import Dataset, Features, Array2D >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] >>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) >>> ds = Dataset.from_dict({"data": data}, features=features) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} >>> ds[:2] {'data': <tf.Tensor: shape=(2, 2, 2), dtype=int64, numpy= array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])>} ``` ### Other feature types [`ClassLabel`] data are properly converted to tensors: ```py >>> from datasets import Dataset, Features, ClassLabel >>> labels = [0, 0, 1] >>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) >>> ds = Dataset.from_dict({"label": labels}, features=features) >>> ds = ds.with_format("tf") >>> ds[:3] {'label': <tf.Tensor: shape=(3,), dtype=int64, numpy=array([0, 0, 1])>} ``` Strings and binary objects are also supported: ```py >>> from datasets import Dataset, Features >>> text = ["foo", "bar"] >>> data = [0, 1] >>> ds = Dataset.from_dict({"text": text, "data": data}) >>> ds = ds.with_format("tf") >>> ds[:2] {'text': <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'foo', b'bar'], dtype=object)>, 'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>} ``` You can also explicitly format certain columns and leave the other columns unformatted: ```py >>> ds = ds.with_format("tf", columns=["data"], output_all_columns=True) >>> ds[:2] {'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>, 'text': ['foo', 'bar']} ``` String and binary objects are unchanged, since PyTorch only supports numbers. The [`Image`] and [`Audio`] feature types are also supported. <Tip> To use the [`Image`] feature type, you'll need to install the `vision` extra as `pip install datasets[vision]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> images = ["path/to/image.png"] * 10 >>> features = Features({"image": Image()}) >>> ds = Dataset.from_dict({"image": images}, features=features) >>> ds = ds.with_format("tf") >>> ds[0] {'image': <tf.Tensor: shape=(512, 512, 4), dtype=uint8, numpy= array([[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]], dtype=uint8)>} >>> ds[:2] {'image': <tf.Tensor: shape=(2, 512, 512, 4), dtype=uint8, numpy= array([[[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]]], dtype=uint8)>} ``` <Tip> To use the [`Audio`] feature type, you'll need to install the `audio` extra as `pip install datasets[audio]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> audio = ["path/to/audio.wav"] * 10 >>> features = Features({"audio": Audio()}) >>> ds = Dataset.from_dict({"audio": audio}, features=features) >>> ds = ds.with_format("tf") >>> ds[0]["audio"]["array"] <tf.Tensor: shape=(202311,), dtype=float32, numpy= array([ 6.1035156e-05, 1.5258789e-05, 1.6784668e-04, ..., -1.5258789e-05, -1.5258789e-05, 1.5258789e-05], dtype=float32)> >>> ds[0]["audio"]["sampling_rate"] <tf.Tensor: shape=(), dtype=int32, numpy=44100> ``` ## Data loading Although you can load individual samples and batches just by indexing into your dataset, this won't work if you want to use Keras methods like `fit()` and `predict()`. You could write a generator function that shuffles and loads batches from your dataset and `fit()` on that, but that sounds like a lot of unnecessary work. Instead, if you want to stream data from your dataset on-the-fly, we recommend converting your dataset to a `tf.data.Dataset` using the `to_tf_dataset()` method. The `tf.data.Dataset` class covers a wide range of use-cases - it is often created from Tensors in memory, or using a load function to read files on disc or external storage. The dataset can be transformed arbitrarily with the `map()` method, or methods like `batch()` and `shuffle()` can be used to create a dataset that's ready for training. These methods do not modify the stored data in any way - instead, the methods build a data pipeline graph that will be executed when the dataset is iterated over, usually during model training or inference. This is different from the `map()` method of Hugging Face `Dataset` objects, which runs the map function immediately and saves the new or changed columns. Since the entire data preprocessing pipeline can be compiled in a `tf.data.Dataset`, this approach allows for massively parallel, asynchronous data loading and training. However, the requirement for graph compilation can be a limitation, particularly for Hugging Face tokenizers, which are usually not (yet!) compilable as part of a TF graph. As a result, we usually advise pre-processing the dataset as a Hugging Face dataset, where arbitrary Python functions can be used, and then converting to `tf.data.Dataset` afterwards using `to_tf_dataset()` to get a batched dataset ready for training. To see examples of this approach, please see the [examples](https://github.com/huggingface/transformers/tree/main/examples) or [notebooks](https://huggingface.co/docs/transformers/notebooks) for `transformers`. ### Using `to_tf_dataset()` Using `to_tf_dataset()` is straightforward. Once your dataset is preprocessed and ready, simply call it like so: ```py >>> from datasets import Dataset >>> data = {"inputs": [[1, 2],[3, 4]], "labels": [0, 1]} >>> ds = Dataset.from_dict(data) >>> tf_ds = ds.to_tf_dataset( columns=["inputs"], label_cols=["labels"], batch_size=2, shuffle=True ) ``` The returned `tf_ds` object here is now fully ready to train on, and can be passed directly to `model.fit()`. Note that you set the batch size when creating the dataset, and so you don't need to specify it when calling `fit()`: ```py >>> model.fit(tf_ds, epochs=2) ``` For a full description of the arguments, please see the [`~Dataset.to_tf_dataset`] documentation. In many cases, you will also need to add a `collate_fn` to your call. This is a function that takes multiple elements of the dataset and combines them into a single batch. When all elements have the same length, the built-in default collator will suffice, but for more complex tasks a custom collator may be necessary. In particular, many tasks have samples with varying sequence lengths which will require a [data collator](https://huggingface.co/docs/transformers/main/en/main_classes/data_collator) that can pad batches correctly. You can see examples of this in the `transformers` NLP [examples](https://github.com/huggingface/transformers/tree/main/examples) and [notebooks](https://huggingface.co/docs/transformers/notebooks), where variable sequence lengths are very common. If you find that loading with `to_tf_dataset` is slow, you can also use the `num_workers` argument. This spins up multiple subprocesses to load data in parallel. This feature is recent and still somewhat experimental - please file an issue if you encounter any bugs while using it! ### When to use to_tf_dataset The astute reader may have noticed at this point that we have offered two approaches to achieve the same goal - if you want to pass your dataset to a TensorFlow model, you can either convert the dataset to a `Tensor` or `dict` of `Tensors` using `.with_format('tf')`, or you can convert the dataset to a `tf.data.Dataset` with `to_tf_dataset()`. Either of these can be passed to `model.fit()`, so which should you choose? The key thing to recognize is that when you convert the whole dataset to `Tensor`s, it is static and fully loaded into RAM. This is simple and convenient, but if any of the following apply, you should probably use `to_tf_dataset()` instead: - Your dataset is too large to fit in RAM. `to_tf_dataset()` streams only one batch at a time, so even very large datasets can be handled with this method. - You want to apply random transformations using `dataset.with_transform()` or the `collate_fn`. This is common in several modalities, such as image augmentations when training vision models, or random masking when training masked language models. Using `to_tf_dataset()` will apply those transformations at the moment when a batch is loaded, which means the same samples will get different augmentations each time they are loaded. This is usually what you want. - Your data has a variable dimension, such as input texts in NLP that consist of varying numbers of tokens. When you create a batch with samples with a variable dimension, the standard solution is to pad the shorter samples to the length of the longest one. When you stream samples from a dataset with `to_tf_dataset`, you can apply this padding to each batch via your `collate_fn`. However, if you want to convert such a dataset to dense `Tensor`s, then you will have to pad samples to the length of the longest sample in *the entire dataset!* This can result in huge amounts of padding, which wastes memory and reduces your model's speed. ### Caveats and limitations Right now, `to_tf_dataset()` always returns a batched dataset - we will add support for unbatched datasets soon!
datasets/docs/source/use_with_tensorflow.mdx/0
{ "file_path": "datasets/docs/source/use_with_tensorflow.mdx", "repo_id": "datasets", "token_count": 3825 }
100
import logging import os from argparse import ArgumentParser from collections.abc import Generator from shutil import rmtree import datasets.config from datasets.builder import DatasetBuilder from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_manager import DownloadMode from datasets.info import DatasetInfosDict from datasets.load import dataset_module_factory, get_dataset_builder_class from datasets.utils.info_utils import VerificationMode from datasets.utils.logging import ERROR, get_logger logger = get_logger(__name__) def _test_command_factory(args): return TestCommand( args.dataset, args.name, args.cache_dir, args.data_dir, args.all_configs, args.save_info or args.save_infos, args.ignore_verifications, args.force_redownload, args.clear_cache, args.num_proc, ) class TestCommand(BaseDatasetsCLICommand): __test__ = False # to tell pytest it's not a test class @staticmethod def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("test", help="Test dataset loading.") test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name") test_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored.", ) test_parser.add_argument( "--data_dir", type=str, default=None, help="Can be used to specify a manual directory to get the files from.", ) test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") test_parser.add_argument( "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)" ) test_parser.add_argument( "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks.", ) test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") test_parser.add_argument( "--clear_cache", action="store_true", help="Remove downloaded files and cached datasets after each config test", ) test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes") # aliases test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info") test_parser.add_argument("dataset", type=str, help="Name of the dataset to download") test_parser.set_defaults(func=_test_command_factory) def __init__( self, dataset: str, name: str, cache_dir: str, data_dir: str, all_configs: bool, save_infos: bool, ignore_verifications: bool, force_redownload: bool, clear_cache: bool, num_proc: int, ): self._dataset = dataset self._name = name self._cache_dir = cache_dir self._data_dir = data_dir self._all_configs = all_configs self._save_infos = save_infos self._ignore_verifications = ignore_verifications self._force_redownload = force_redownload self._clear_cache = clear_cache self._num_proc = num_proc if clear_cache and not cache_dir: print( "When --clear_cache is used, specifying a cache directory is mandatory.\n" "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n" "Please provide a --cache_dir that will be used to test the dataset." ) exit(1) if save_infos: self._ignore_verifications = True def run(self): logging.getLogger("filelock").setLevel(ERROR) if self._name is not None and self._all_configs: print("Both parameters `config` and `all_configs` can't be used at once.") exit(1) path, config_name = self._dataset, self._name module = dataset_module_factory(path) builder_cls = get_dataset_builder_class(module) n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1 def get_builders() -> Generator[DatasetBuilder, None, None]: if self._all_configs and builder_cls.BUILDER_CONFIGS: for i, config in enumerate(builder_cls.BUILDER_CONFIGS): if "config_name" in module.builder_kwargs: yield builder_cls( cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: yield builder_cls( config_name=config.name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: if "config_name" in module.builder_kwargs: yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs) else: yield builder_cls( config_name=config_name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) for j, builder in enumerate(get_builders()): print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})") builder._record_infos = os.path.exists( os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME) ) # record checksums only if we need to update a (deprecated) dataset_infos.json builder.download_and_prepare( download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS if not self._force_redownload else DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS if self._ignore_verifications else VerificationMode.ALL_CHECKS, num_proc=self._num_proc, ) builder.as_dataset() # If save_infos=True, we create the dataset card (README.md) # The dataset_infos are saved in the YAML part of the README.md # This is to allow the user to upload them on HF afterwards. if self._save_infos: save_infos_dir = os.path.basename(path) if not os.path.isdir(path) else path os.makedirs(save_infos_dir, exist_ok=True) DatasetInfosDict(**{builder.config.name: builder.info}).write_to_directory(save_infos_dir) print(f"Dataset card saved at {os.path.join(save_infos_dir, datasets.config.REPOCARD_FILENAME)}") # If clear_cache=True, the download folder and the dataset builder cache directory are deleted if self._clear_cache: if os.path.isdir(builder._cache_dir): logger.warning(f"Clearing cache at {builder._cache_dir}") rmtree(builder._cache_dir) download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR) if os.path.isdir(download_dir): logger.warning(f"Clearing cache at {download_dir}") rmtree(download_dir) print("Test successful.")
datasets/src/datasets/commands/test.py/0
{ "file_path": "datasets/src/datasets/commands/test.py", "repo_id": "datasets", "token_count": 3682 }
101
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class Translation: """`Feature` for translations with fixed languages per example. Here for compatibility with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to string translations. Example: ```python >>> # At construction time: >>> datasets.features.Translation(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': 'le chat', ... 'de': 'die katze' ... } ``` """ languages: list[str] id: Optional[str] = field(default=None, repr=False) # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="Translation", init=False, repr=False) def __call__(self): return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """Flatten the Translation feature into a dictionary.""" from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class TranslationVariableLanguages: """`Feature` for translations with variable languages per example. Here for compatibility with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to one or more string translations. The languages present may vary from example to example. Returns: - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`): Language codes sorted in ascending order or plain text translations, sorted to align with language codes. Example: ```python >>> # At construction time: >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': ['le chat', 'la chatte,'] ... 'de': 'die katze' ... } >>> # Tensor returned : >>> { ... 'language': ['en', 'de', 'fr', 'fr'], ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'], ... } ``` """ languages: Optional[list] = None num_languages: Optional[int] = None id: Optional[str] = field(default=None, repr=False) # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="TranslationVariableLanguages", init=False, repr=False) def __post_init__(self): self.languages = sorted(set(self.languages)) if self.languages else None self.num_languages = len(self.languages) if self.languages else None def __call__(self): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def encode_example(self, translation_dict): lang_set = set(self.languages) if set(translation_dict) == {"language", "translation"}: return translation_dict elif self.languages and set(translation_dict) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. translation_tuples = [] for lang, text in translation_dict.items(): if isinstance(text, str): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. languages, translations = zip(*sorted(translation_tuples)) return {"language": languages, "translation": translations} def flatten(self) -> Union["FeatureType", dict[str, "FeatureType"]]: """Flatten the TranslationVariableLanguages feature into a dictionary.""" from .features import List, Value return { "language": List(Value("string")), "translation": List(Value("string")), }
datasets/src/datasets/features/translation.py/0
{ "file_path": "datasets/src/datasets/features/translation.py", "repo_id": "datasets", "token_count": 1684 }
102
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class AbstractDatasetReader(ABC): def __init__( self, path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None, split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): self.path_or_paths = path_or_paths self.split = split if split or isinstance(path_or_paths, dict) else "train" self.features = features self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.streaming = streaming self.num_proc = num_proc self.kwargs = kwargs @abstractmethod def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class AbstractDatasetInputStream(ABC): def __init__( self, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): self.features = features self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.streaming = streaming self.num_proc = num_proc self.kwargs = kwargs @abstractmethod def read(self) -> Union[Dataset, IterableDataset]: pass
datasets/src/datasets/io/abc.py/0
{ "file_path": "datasets/src/datasets/io/abc.py", "repo_id": "datasets", "token_count": 721 }
103
import datasets from ..folder_based_builder import folder_based_builder logger = datasets.utils.logging.get_logger(__name__) class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): """Builder Config for AudioFolder.""" drop_labels: bool = None drop_metadata: bool = None def __post_init__(self): super().__post_init__() class AudioFolder(folder_based_builder.FolderBasedBuilder): BASE_FEATURE = datasets.Audio BASE_COLUMN_NAME = "audio" BUILDER_CONFIG_CLASS = AudioFolderConfig EXTENSIONS: list[str] # definition at the bottom of the script # Obtained with: # ``` # import soundfile as sf # # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] # # # .opus decoding is supported if libsndfile >= 1.0.31: # AUDIO_EXTENSIONS.extend([".opus"]) # ``` # We intentionally do not run this code on launch because: # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed # (2) To ensure the list of supported extensions is deterministic AUDIO_EXTENSIONS = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ".3gp", ".3g2", ".avi", ".asf", ".flv", ".mp4", ".mov", ".m4v", ".mkv", ".mpg", ".webm", ".f4v", ".wmv", ".wma", ".ogg", ".ogm", ".mxf", ".nut", ] AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py", "repo_id": "datasets", "token_count": 726 }
104
import itertools import warnings from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class PandasConfig(datasets.BuilderConfig): """BuilderConfig for Pandas.""" features: Optional[datasets.Features] = None def __post_init__(self): super().__post_init__() class Pandas(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = PandasConfig def _info(self): warnings.warn( "The Pandas builder is deprecated and will be removed in the next major version of datasets.", FutureWarning, ) return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for i, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: pa_table = pa.Table.from_pandas(pd.read_pickle(f)) yield i, self._cast_table(pa_table)
datasets/src/datasets/packaged_modules/pandas/pandas.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/pandas/pandas.py", "repo_id": "datasets", "token_count": 1040 }
105
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import asyncio import glob import io import json import multiprocessing import os import posixpath import re import shutil import tarfile import time import xml.dom.minidom import zipfile from collections.abc import Generator from io import BytesIO from itertools import chain from pathlib import Path, PurePosixPath from typing import Any, Optional, TypeVar, Union from unittest.mock import patch from urllib.parse import urlparse from xml.etree import ElementTree as ET import fsspec import huggingface_hub import huggingface_hub.errors import requests from fsspec.core import strip_protocol, url_to_fs from fsspec.utils import can_be_local from huggingface_hub.utils import EntryNotFoundError, get_session, insecure_hashlib from packaging import version from .. import __version__, config from ..download.download_config import DownloadConfig from ..filesystems import COMPRESSION_FILESYSTEMS from . import _tqdm, logging from ._filelock import FileLock from .extract import ExtractManager from .track import TrackedIterableFromGenerator try: from aiohttp.client_exceptions import ClientError as _AiohttpClientError except ImportError: # aiohttp is not available; synthesize an exception type # that will never be raised by any actual code for use in the `except` # clause only. class _AiohttpClientError(Exception): pass logger = logging.get_logger(__name__) # pylint: disable=invalid-name INCOMPLETE_SUFFIX = ".incomplete" T = TypeVar("T", str, Path) def is_remote_url(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_local_path(url_or_filename: str) -> bool: # On unix the scheme of a local path is empty (for both absolute and relative), # while on windows the scheme is the drive name (ex: "c") for absolute paths. # for details on the windows behavior, see https://bugs.python.org/issue42215 return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_relative_path(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) def relative_to_absolute_path(path: T) -> T: """Convert relative path to absolute path.""" abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) return Path(abs_path_str) if isinstance(path, Path) else abs_path_str def url_or_path_join(base_name: str, *pathnames: str) -> str: if is_remote_url(base_name): return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) else: return Path(base_name, *pathnames).as_posix() def url_or_path_parent(url_or_path: str) -> str: if is_remote_url(url_or_path): return url_or_path[: url_or_path.rindex("/")] else: return os.path.dirname(url_or_path) def hash_url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can identify it as a HDF5 file (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) """ url_bytes = url.encode("utf-8") url_hash = insecure_hashlib.sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = insecure_hashlib.sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".py"): filename += ".py" return filename def cached_path( url_or_filename, download_config=None, **download_kwargs, ) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk ValueError: if it couldn't parse the url or filename correctly requests.exceptions.ConnectionError: in case of internet connection issue """ if download_config is None: download_config = DownloadConfig(**download_kwargs) cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) # Convert fsspec URL in the format "file://local/path" to "local/path" if can_be_local(url_or_filename): url_or_filename = strip_protocol(url_or_filename) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) url_or_filename, storage_options = _prepare_path_and_storage_options( url_or_filename, download_config=download_config ) # Download files from Hugging Face. # Note: no need to check for https://huggingface.co file URLs since _prepare_path_and_storage_options # prepares Hugging Face HTTP URLs as hf:// paths already if url_or_filename.startswith("hf://"): resolved_path = huggingface_hub.HfFileSystem( endpoint=config.HF_ENDPOINT, token=download_config.token ).resolve_path(url_or_filename) try: output_path = huggingface_hub.HfApi( endpoint=config.HF_ENDPOINT, token=download_config.token, library_name="datasets", library_version=__version__, user_agent=get_datasets_user_agent(download_config.user_agent), ).hf_hub_download( repo_id=resolved_path.repo_id, repo_type=resolved_path.repo_type, revision=resolved_path.revision, filename=resolved_path.path_in_repo, force_download=download_config.force_download, proxies=download_config.proxies, ) except ( huggingface_hub.utils.RepositoryNotFoundError, huggingface_hub.utils.EntryNotFoundError, huggingface_hub.utils.RevisionNotFoundError, huggingface_hub.utils.GatedRepoError, ) as e: raise FileNotFoundError(str(e)) from e # Download external files else: output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=download_config.force_download, user_agent=download_config.user_agent, use_etag=download_config.use_etag, token=download_config.token, storage_options=storage_options, download_desc=download_config.download_desc, disable_tqdm=download_config.disable_tqdm, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif is_local_path(url_or_filename): # File, but it doesn't exist. raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if output_path is None: return output_path if download_config.extract_compressed_file: if download_config.extract_on_the_fly: # Add a compression prefix to the compressed file so that it can be extracted # as it's being read using xopen. protocol = _get_extraction_protocol(output_path, download_config=download_config) extension = _get_path_extension(url_or_filename.split("::")[0]) if ( protocol and extension not in ["tgz", "tar"] and not url_or_filename.split("::")[0].endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): output_path = relative_to_absolute_path(output_path) if protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: # there is one single file which is the uncompressed file inner_file = os.path.basename(output_path) inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file output_path = f"{protocol}://{inner_file}::{output_path}" else: output_path = f"{protocol}://::{output_path}" return output_path # Eager extraction output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( output_path, force_extract=download_config.force_extract ) return relative_to_absolute_path(output_path) def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: ua = f"datasets/{__version__}" ua += f"; python/{config.PY_VERSION}" ua += f"; huggingface_hub/{huggingface_hub.__version__}" ua += f"; pyarrow/{config.PYARROW_VERSION}" if config.TORCH_AVAILABLE: ua += f"; torch/{config.TORCH_VERSION}" if config.TF_AVAILABLE: ua += f"; tensorflow/{config.TF_VERSION}" if config.JAX_AVAILABLE: ua += f"; jax/{config.JAX_VERSION}" if isinstance(user_agent, dict): ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" elif isinstance(user_agent, str): ua += "; " + user_agent return ua def get_authentication_headers_for_url(url: str, token: Optional[Union[str, bool]] = None) -> dict: """Handle the HF authentication""" if url.startswith(config.HF_ENDPOINT): return huggingface_hub.utils.build_hf_headers( token=token, library_name="datasets", library_version=__version__ ) else: return {} def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_HUB_OFFLINE is True.""" if config.HF_HUB_OFFLINE: raise huggingface_hub.errors.OfflineModeIsEnabled( "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) ) def fsspec_head(url, storage_options=None): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, path = url_to_fs(url, **(storage_options or {})) return fs.info(path) def stack_multiprocessing_download_progress_bars(): # Stack downloads progress bars automatically using HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS=1 # We use environment variables since the download may happen in a subprocess return patch.dict(os.environ, {"HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS": "1"}) class TqdmCallback(fsspec.callbacks.TqdmCallback): def __init__(self, tqdm_kwargs=None, *args, **kwargs): if config.FSSPEC_VERSION < version.parse("2024.2.0"): super().__init__(tqdm_kwargs, *args, **kwargs) self._tqdm = _tqdm # replace tqdm module by datasets.utils.tqdm module else: kwargs["tqdm_cls"] = _tqdm.tqdm super().__init__(tqdm_kwargs, *args, **kwargs) def fsspec_get(url, temp_file, storage_options=None, desc=None, disable_tqdm=False): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, path = url_to_fs(url, **(storage_options or {})) callback = TqdmCallback( tqdm_kwargs={ "desc": desc or "Downloading", "unit": "B", "unit_scale": True, "position": multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1" and multiprocessing.current_process()._identity else None, "disable": disable_tqdm, } ) fs.get_file(path, temp_file.name, callback=callback) def get_from_cache( url, cache_dir=None, force_download=False, user_agent=None, use_etag=True, token=None, storage_options=None, download_desc=None, disable_tqdm=False, ) -> str: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk """ if storage_options is None: storage_options = {} if cache_dir is None: cache_dir = config.HF_DATASETS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) response = None etag = None # Try a first time to file the file on the local file system without eTag (None) # if we don't ask for 'force_download' then we spare a request filename = hash_url_to_filename(url, etag=None) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download and not use_etag: return cache_path # Prepare headers for authentication headers = get_authentication_headers_for_url(url, token=token) if user_agent is not None: headers["user-agent"] = user_agent response = fsspec_head(url, storage_options=storage_options) etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None # Try a second time filename = hash_url_to_filename(url, etag) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # Retry in case previously locked processes just enter after the precedent process releases the lock if os.path.exists(cache_path) and not force_download: return cache_path incomplete_path = cache_path + ".incomplete" # Download to temporary file, then copy to cache path once finished. # Otherwise, you get corrupt cache entries if the download gets interrupted. with open(incomplete_path, "w+b") as temp_file: logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") # GET file object fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc, disable_tqdm=disable_tqdm) logger.info(f"storing {url} in cache at {cache_path}") shutil.move(temp_file.name, cache_path) logger.info(f"creating metadata file for {cache_path}") meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w", encoding="utf-8") as meta_file: json.dump(meta, meta_file) return cache_path def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) return fn return docstring_decorator def estimate_dataset_size(paths): return sum(path.stat().st_size for path in paths) def readline(f: io.RawIOBase): # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 res = bytearray() while True: b = f.read(1) if not b: break res += b if res.endswith(b"\n"): break return bytes(res) ####################### # Streaming utilities # ####################### BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", "arrow", ] COMPRESSION_EXTENSION_TO_PROTOCOL = { # single file compression **{ extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS for extension in fs_class.extensions }, # archive compression "zip": "zip", } SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL = { extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS for extension in fs_class.extensions } SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS} SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/") MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class NonStreamableDatasetError(Exception): pass def _get_path_extension(path: str) -> str: # Get extension: https://foo.bar/train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]: # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz urlpath = str(urlpath) path = urlpath.split("::")[0] extension = _get_path_extension(path) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL: return COMPRESSION_EXTENSION_TO_PROTOCOL[extension] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) try: with fsspec.open(urlpath, **(storage_options or {})) as f: return _get_extraction_protocol_with_magic_number(f) except FileNotFoundError: if urlpath.startswith(config.HF_ENDPOINT): raise FileNotFoundError( urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): return os.path.join(a, *p) else: a = posixpath.join(a, *p) return "::".join([a] + b) def xdirname(a): """ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xdirname function allows you to apply the dirname on the first path of the chain. Example:: >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip") zip://folder1::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): a = os.path.dirname(Path(a).as_posix()) else: a = posixpath.dirname(a) # if we end up at the root of the protocol, we get for example a = 'http:' # so we have to fix it by adding the '//' that was removed: if a.endswith(":"): a += "//" return "::".join([a] + b) def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None): """Extend `os.path.exists` function to support both local and remote files. Args: urlpath (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return os.path.exists(main_hop) else: urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) return fs.exists(main_hop) def xbasename(a): """ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xbasename function allows you to apply the basename on the first path of the chain. Example:: >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip") file.txt """ a, *b = str(a).split("::") if is_local_path(a): return os.path.basename(Path(a).as_posix()) else: return posixpath.basename(a) def xsplit(a): """ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplit function allows you to apply the xsplit on the first path of the chain. Example:: >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1::https://host.com/archive.zip', 'file.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.split(Path(a).as_posix()) else: a, tail = posixpath.split(a) return "::".join([a + "//" if a.endswith(":") else a] + b), tail def xsplitext(a): """ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplitext function allows you to apply the splitext on the first path of the chain. Example:: >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1/file::https://host.com/archive.zip', '.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.splitext(Path(a).as_posix()) else: a, ext = posixpath.splitext(a) return "::".join([a] + b), ext def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isfile` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isfile(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = url_to_fs(path, **storage_options) return fs.isfile(main_hop) def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int: """Extend `os.path.getsize` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `int`: optional """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.getsize(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fs, *_ = url_to_fs(path, **storage_options) try: size = fs.size(main_hop) except EntryNotFoundError: raise FileNotFoundError(f"No such file: {path}") if size is None: # use xopen instead of fs.open to make data fetching more robust with xopen(path, download_config=download_config) as f: size = len(f.read()) return size def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isdir(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fs, *_ = url_to_fs(path, **storage_options) inner_path = main_hop.split("://")[-1] if not inner_path.strip("/"): return True return fs.isdir(inner_path) def xrelpath(path, start=None): """Extend `os.path.relpath` function to support remote files. Args: path (`str`): URL path. start (`str`): Start URL directory path. Returns: `str` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop) else: return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop) def _add_retries_to_file_obj_read_method(file_obj): read = file_obj.read max_retries = config.STREAMING_READ_MAX_RETRIES def read_with_retries(*args, **kwargs): disconnect_err = None for retry in range(1, max_retries + 1): try: out = read(*args, **kwargs) break except ( _AiohttpClientError, asyncio.TimeoutError, requests.exceptions.ConnectionError, requests.exceptions.Timeout, ) as err: disconnect_err = err logger.warning( f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]" ) time.sleep(config.STREAMING_READ_RETRY_INTERVAL) else: raise ConnectionError("Server Disconnected") from disconnect_err return out try: file_obj.read = read_with_retries except AttributeError: # read-only attribute orig_file_obj = file_obj file_obj = io.RawIOBase() file_obj.read = read_with_retries file_obj.__getattr__ = lambda _, attr: getattr(orig_file_obj, attr) return file_obj def _prepare_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> tuple[str, dict[str, dict[str, Any]]]: prepared_urlpath = [] prepared_storage_options = {} for hop in urlpath.split("::"): hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config) prepared_urlpath.append(hop) prepared_storage_options.update(storage_options) return "::".join(prepared_urlpath), storage_options def _prepare_single_hop_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> tuple[str, dict[str, dict[str, Any]]]: """ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or HfFileSystem In particular it resolves google drive URLs It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths. Storage options are formatted in the form {protocol: storage_options_for_protocol} """ token = None if download_config is None else download_config.token if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath: urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) protocol = urlpath.split("://")[0] if "://" in urlpath else "file" if download_config is not None and protocol in download_config.storage_options: storage_options = download_config.storage_options[protocol].copy() elif download_config is not None and protocol not in download_config.storage_options: storage_options = { option_name: option_value for option_name, option_value in download_config.storage_options.items() if option_name not in fsspec.available_protocols() } else: storage_options = {} if protocol in {"http", "https"}: client_kwargs = storage_options.pop("client_kwargs", {}) storage_options["client_kwargs"] = {"trust_env": True, **client_kwargs} # Enable reading proxy env variables if "drive.google.com" in urlpath: response = get_session().head(urlpath, timeout=10) for k, v in response.cookies.items(): if k.startswith("download_warning"): urlpath += "&confirm=" + v cookies = response.cookies storage_options = {"cookies": cookies, **storage_options} # Fix Google Drive URL to avoid Virus scan warning if "confirm=" not in urlpath: urlpath += "&confirm=t" if urlpath.startswith("https://raw.githubusercontent.com/"): # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389 headers = storage_options.pop("headers", {}) storage_options["headers"] = {"Accept-Encoding": "identity", **headers} elif protocol == "hf": storage_options = { "token": token, "endpoint": config.HF_ENDPOINT, **storage_options, } # streaming with block_size=0 is only implemented in 0.21 (see https://github.com/huggingface/huggingface_hub/pull/1967) if config.HF_HUB_VERSION < version.parse("0.21.0"): storage_options["block_size"] = "default" if storage_options: storage_options = {protocol: storage_options} return urlpath, storage_options def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `open` function to support remote files using `fsspec`. It also has a retry mechanism in case connection fails. The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co Args: file (`str`): Path name of the file to be opened. mode (`str`, *optional*, default "r"): Mode in which the file is opened. *args: Arguments to be passed to `fsspec.open`. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Keyword arguments to be passed to `fsspec.open`. Returns: file object """ # This works as well for `xopen(str(Path(...)))` file_str = _as_str(file) main_hop, *rest_hops = file_str.split("::") if is_local_path(main_hop): # ignore fsspec-specific kwargs kwargs.pop("block_size", None) return open(main_hop, mode, *args, **kwargs) # add headers and cookies for authentication on the HF Hub and for Google Drive file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config) kwargs = {**kwargs, **(storage_options or {})} try: file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open() except ValueError as e: if str(e) == "Cannot seek streaming HTTP file": raise NonStreamableDatasetError( "Streaming is not possible for this dataset because data host server doesn't support HTTP range " "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)" ) from e else: raise except FileNotFoundError: if file.startswith(config.HF_ENDPOINT): raise FileNotFoundError( file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise file_obj = _add_retries_to_file_obj_read_method(file_obj) return file_obj def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> list[str]: """Extend `os.listdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(path).split("::") if is_local_path(main_hop): return os.listdir(path) else: # globbing inside a zip in a private repo requires authentication path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = url_to_fs(path, **storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): raise FileNotFoundError(f"Directory doesn't exist: {path}") paths = fs.listdir(inner_path, detail=False) return [os.path.basename(path.rstrip("/")) for path in paths] def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None): """Extend `glob.glob` function to support remote files. Args: urlpath (`str`): URL path with shell-style wildcard patterns. recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more directories or subdirectories. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return glob.glob(main_hop, recursive=recursive) else: # globbing inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) inner_path = main_hop.split("://")[1] globbed_paths = fs.glob(inner_path) protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths] def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `os.walk` function to support remote files. Args: urlpath (`str`): URL root path. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Additional keyword arguments forwarded to the underlying filesystem. Yields: `tuple`: 3-tuple (dirpath, dirnames, filenames). """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): yield from os.walk(main_hop, **kwargs) else: # walking inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): return [] protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs): yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames class xPath(type(Path())): """Extension of `pathlib.Path` to support both local paths and remote URLs.""" def __str__(self): path_str = super().__str__() main_hop, *rest_hops = path_str.split("::") if is_local_path(main_hop): return main_hop path_as_posix = path_str.replace("\\", "/") path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix) path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol return path_as_posix def exists(self, download_config: Optional[DownloadConfig] = None): """Extend `pathlib.Path.exists` method to support both local and remote files. Args: download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ return xexists(str(self), download_config=download_config) def glob(self, pattern, download_config: Optional[DownloadConfig] = None): """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. download_config : mainly use token or storage_options to support different platforms and auth types. Yields: [`xPath`] """ posix_path = self.as_posix() main_hop, *rest_hops = posix_path.split("::") if is_local_path(main_hop): yield from Path(main_hop).glob(pattern) else: # globbing inside a zip in a private repo requires authentication if rest_hops: urlpath = rest_hops[0] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) storage_options = {urlpath.split("://")[0]: storage_options} posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]]) else: storage_options = None fs, *_ = url_to_fs(xjoin(posix_path, pattern), **(storage_options or {})) globbed_paths = fs.glob(xjoin(main_hop, pattern)) for globbed_path in globbed_paths: yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) def rglob(self, pattern, **kwargs): """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. Yields: [`xPath`] """ return self.glob("**/" + pattern, **kwargs) @property def parent(self) -> "xPath": """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: [`xPath`] """ return type(self)(xdirname(self.as_posix())) @property def name(self) -> str: """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).name @property def stem(self) -> str: """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).stem @property def suffix(self) -> str: """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).suffix def open(self, *args, **kwargs): """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`. Args: **args: Arguments passed to :func:`fsspec.open`. **kwargs: Keyword arguments passed to :func:`fsspec.open`. Returns: `io.FileIO`: File-like object. """ return xopen(str(self), *args, **kwargs) def joinpath(self, *p: tuple[str, ...]) -> "xPath": """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`. Args: *p (`tuple` of `str`): Other path components. Returns: [`xPath`] """ return type(self)(xjoin(self.as_posix(), *p)) def __truediv__(self, p: str) -> "xPath": return self.joinpath(p) def with_suffix(self, suffix): main_hop, *rest_hops = str(self).split("::") if is_local_path(main_hop): return type(self)(str(super().with_suffix(suffix))) return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) def _as_str(path: Union[str, Path, xPath]): return str(path) if isinstance(path, xPath) else str(xPath(str(path))) def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import gzip if hasattr(filepath_or_buffer, "read"): return gzip.open(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import numpy as np if hasattr(filepath_or_buffer, "read"): return np.load(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): return pd.read_csv(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) if kwargs.get("compression", "infer") == "infer": kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config) return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): try: return pd.read_excel(filepath_or_buffer, **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) try: return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel( BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs ) def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pyarrow.parquet as pq if hasattr(filepath_or_buffer, "read"): return pq.read_table(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs) def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import scipy.io as sio if hasattr(filepath_or_buffer, "read"): return sio.loadmat(filepath_or_buffer, **kwargs) else: return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None): """Extend `xml.etree.ElementTree.parse` function to support remote files. Args: source: File path or file object. parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `xml.etree.ElementTree.Element`: Root element of the given source document. """ if hasattr(source, "read"): return ET.parse(source, parser=parser) else: with xopen(source, "rb", download_config=download_config) as f: return ET.parse(f, parser=parser) def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `xml.dom.minidom.parse` function to support remote files. Args: filename_or_file (`str` or file): File path or file object. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`. Returns: :obj:`xml.dom.minidom.Document`: Parsed document. """ if hasattr(filename_or_file, "read"): return xml.dom.minidom.parse(filename_or_file, **kwargs) else: with xopen(filename_or_file, "rb", download_config=download_config) as f: return xml.dom.minidom.parse(f, **kwargs) class ArchiveIterable(TrackedIterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_urlpath( cls, urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Generator[tuple, None, None]: compression = _get_extraction_protocol(urlpath, download_config=download_config) # Set block_size=0 to get faster streaming # (e.g. for hf:// and https:// it uses streaming Requests file-like instances) with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable": return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) class FilesIterable(TrackedIterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_urlpaths( cls, urlpaths: Union[str, list[str]], download_config: Optional[DownloadConfig] = None ) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if xisfile(urlpath, download_config=download_config): yield urlpath elif xisdir(urlpath, download_config=download_config): for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config): # in-place modification to prune the search dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if xbasename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield xjoin(dirpath, filename) else: raise FileNotFoundError(urlpath) @classmethod def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable": return cls(cls._iter_from_urlpaths, urlpaths, download_config)
datasets/src/datasets/utils/file_utils.py/0
{ "file_path": "datasets/src/datasets/utils/file_utils.py", "repo_id": "datasets", "token_count": 22255 }
106
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF-specific utils import.""" import os import warnings from functools import partial from math import ceil from uuid import uuid4 import numpy as np import pyarrow as pa from multiprocess import get_context try: from multiprocess.shared_memory import SharedMemory except ImportError: SharedMemory = None # Version checks should prevent this being called on older Python versions from .. import config def minimal_tf_collate_fn(features): if isinstance(features, dict): # case batch_size=None: nothing to collate return features elif config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") first = features[0] batch = {} for k, v in first.items(): if isinstance(v, np.ndarray): batch[k] = np.stack([f[k] for f in features]) elif isinstance(v, tf.Tensor): batch[k] = tf.stack([f[k] for f in features]) else: batch[k] = np.array([f[k] for f in features]) return batch def minimal_tf_collate_fn_with_renaming(features): batch = minimal_tf_collate_fn(features) if "label" in batch: batch["labels"] = batch["label"] del batch["label"] return batch def is_numeric_pa_type(pa_type): if pa.types.is_list(pa_type): return is_numeric_pa_type(pa_type.value_type) return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type) def np_get_batch( indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False ): if not isinstance(indices, np.ndarray): indices = indices.numpy() is_batched = True # Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices if isinstance(indices, np.integer): batch = dataset[indices.item()] is_batched = False elif np.all(np.diff(indices) == 1): batch = dataset[indices[0] : indices[-1] + 1] elif isinstance(indices, np.ndarray): batch = dataset[indices] else: raise RuntimeError(f"Unexpected type for indices: {type(indices)}") if cols_to_retain is not None: batch = { key: value for key, value in batch.items() if key in cols_to_retain or key in ("label", "label_ids", "labels") } if is_batched: actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)] batch = collate_fn(batch, **collate_fn_args) if return_dict: out_batch = {} for col, cast_dtype in columns_to_np_types.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch[col] = array else: out_batch = [] for col, cast_dtype in columns_to_np_types.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch.append(array) return out_batch def dataset_to_tf( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, ): """Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess equivalent is multiprocess_dataset_to_tf. Args: dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. cols_to_retain (`List[str]`): Dataset column(s) to load in the tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and that do not exist in the original dataset. collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. Can be empty. columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to `tf.TensorSpec` objects. shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. Returns: `tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") # TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything # to the NumPy multiprocessing path. if hasattr(tf, "random_index_shuffle"): random_index_shuffle = tf.random_index_shuffle elif hasattr(tf.random.experimental, "index_shuffle"): random_index_shuffle = tf.random.experimental.index_shuffle else: if len(dataset) > 10_000_000: warnings.warn( "to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. " "If you are iterating over a dataset with a very large number of samples, consider " "upgrading to TF >= 2.9." ) random_index_shuffle = None getter_fn = partial( np_get_batch, dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, return_dict=False, ) # This works because dictionaries always output in the same order tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()] @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)]) def fetch_function(indices): output = tf.py_function( getter_fn, inp=[indices], Tout=tout, ) return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())} tf_dataset = tf.data.Dataset.range(len(dataset)) if shuffle and random_index_shuffle is not None: base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64)) def scan_random_index(state, index): if tf.reduce_all(state == -1): # This generates a new random seed once per epoch only, # to ensure that we iterate over each sample exactly once per epoch state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64) shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1) return state, shuffled_index tf_dataset = tf_dataset.scan(base_seed, scan_random_index) elif shuffle: tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality()) if batch_size is not None: tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder) tf_dataset = tf_dataset.map(fetch_function) if batch_size is not None: def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()} else: # Ensure shape but remove batch dimension of output_signature[key].shape def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()} return tf_dataset.map(ensure_shapes) class SharedMemoryContext: # This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted # The process that creates shared memory is always the one responsible for unlinking it in the end def __init__(self): self.created_shms = [] self.opened_shms = [] def get_shm(self, name, size, create): shm = SharedMemory(size=int(size), name=name, create=create) if create: # We only unlink the ones we created in this context self.created_shms.append(shm) else: # If we didn't create it, we only close it when done, we don't unlink it self.opened_shms.append(shm) return shm def get_array(self, name, shape, dtype, create): shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create) return np.ndarray(shape, dtype=dtype, buffer=shm.buf) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): for shm in self.created_shms: shm.close() shm.unlink() for shm in self.opened_shms: shm.close() class NumpyMultiprocessingGenerator: def __init__( self, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, num_workers, ): self.dataset = dataset self.cols_to_retain = cols_to_retain self.collate_fn = collate_fn self.collate_fn_args = collate_fn_args self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype is np.str_] # Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize self.columns_to_np_types = { col: dtype if col not in self.string_columns else np.dtype("U1") for col, dtype in columns_to_np_types.items() } self.output_signature = output_signature self.shuffle = shuffle self.batch_size = batch_size self.drop_remainder = drop_remainder self.num_workers = num_workers # Because strings are converted to characters, we need to add one extra dimension to the shape self.columns_to_ranks = { col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1 for col, spec in output_signature.items() } def __iter__(self): # Make sure we only spawn workers if they have work to do num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size))) # Do the shuffling in iter so that it's done at the start of each epoch per_worker_batches, final_batch, final_batch_worker = self.distribute_batches( self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle ) ctx = get_context("spawn") names = [] shape_arrays = [] workers = [] array_ready_events = [ctx.Event() for _ in range(num_workers)] array_loaded_events = [ctx.Event() for _ in range(num_workers)] base_args = { "dataset": self.dataset, "cols_to_retain": self.cols_to_retain, "collate_fn": self.collate_fn, "collate_fn_args": self.collate_fn_args, "columns_to_np_types": self.columns_to_np_types, "columns_to_ranks": self.columns_to_ranks, "string_columns": self.string_columns, } with SharedMemoryContext() as shm_ctx: for i in range(num_workers): worker_random_id = str(uuid4()) worker_name = f"dw_{i}_{worker_random_id}"[:10] names.append(worker_name) worker_shape_arrays = { col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True) for col, rank in self.columns_to_ranks.items() } shape_arrays.append(worker_shape_arrays) worker_indices = per_worker_batches[i] if i == final_batch_worker and final_batch is not None: final_batch_arg = final_batch else: final_batch_arg = None worker_kwargs = { "worker_name": worker_name, "indices": worker_indices, "extra_batch": final_batch_arg, "array_ready_event": array_ready_events[i], "array_loaded_event": array_loaded_events[i], **base_args, } worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True) worker.start() workers.append(worker) end_signal_received = False while not end_signal_received: for i in range(num_workers): if not array_ready_events[i].wait(timeout=60): raise TimeoutError("Data loading worker timed out!") array_ready_events[i].clear() array_shapes = shape_arrays[i] if any(np.any(shape < 0) for shape in array_shapes.values()): # Child processes send negative array shapes to indicate # that no more data is going to be sent end_signal_received = True break # Matt: Because array shapes are variable we recreate the shared memory each iteration. # I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process. # A future optimization, at the cost of some code complexity, could be to reuse shared memory # between iterations, but this would require knowing in advance the maximum size, or having # a system to only create a new memory block when a new maximum size is seen. # Another potential optimization would be to figure out which memory copies are necessary, # or whether we can yield objects straight out of shared memory. with SharedMemoryContext() as batch_shm_ctx: # This memory context only lasts long enough to copy everything out of the batch arrays = { col: batch_shm_ctx.get_array( f"{names[i]}_{col}", shape=shape, dtype=self.columns_to_np_types[col], create=False, ) for col, shape in array_shapes.items() } # Copy everything out of shm because the memory # will be unlinked by the child process at some point arrays = {col: np.copy(arr) for col, arr in arrays.items()} # Now we convert any unicode char arrays to strings for string_col in self.string_columns: arrays[string_col] = ( arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1) ) yield arrays array_loaded_events[i].set() # Now we just do some cleanup # Shared memory is cleaned up by the context manager, so we just make sure workers finish for worker in workers: worker.join() def __call__(self): return self @staticmethod def worker_loop( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, columns_to_ranks, string_columns, indices, extra_batch, worker_name, array_ready_event, array_loaded_event, ): os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory def send_batch_to_parent(indices): batch = np_get_batch( indices=indices, dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, return_dict=True, ) # Now begins the fun part where we start shovelling shared memory at the parent process out_arrays = {} with SharedMemoryContext() as batch_shm_ctx: # The batch shared memory context exists only as long as it takes for the parent process # to read everything, after which it cleans everything up again for col, cast_dtype in columns_to_np_types.items(): # Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor array = batch[col] if col in string_columns: # We can't send unicode arrays over shared memory, so we convert to single chars ("U1") # which have a fixed width of 4 bytes. The parent process will convert these back to strings. array = array.view("U1").reshape(array.shape + (-1,)) shape_arrays[col][:] = array.shape out_arrays[col] = batch_shm_ctx.get_array( f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True ) out_arrays[col][:] = array array_ready_event.set() array_loaded_event.wait() array_loaded_event.clear() with SharedMemoryContext() as shm_ctx: shape_arrays = { col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False) for col, rank in columns_to_ranks.items() } for batch in indices: send_batch_to_parent(batch) if extra_batch is not None: send_batch_to_parent(extra_batch) # Now we send a batsignal to the parent process that we're done for col, array in shape_arrays.items(): array[:] = -1 array_ready_event.set() @staticmethod def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle): indices = np.arange(len(dataset)) if shuffle: np.random.shuffle(indices) num_samples = len(indices) # We distribute the batches so that reading from the workers in round-robin order yields the exact # order specified in indices. This is only important when shuffle is False, but we do it regardless. incomplete_batch_cutoff = num_samples - (num_samples % batch_size) indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff]) if drop_remainder or len(last_incomplete_batch) == 0: last_incomplete_batch = None indices = indices.reshape(-1, batch_size) num_batches = len(indices) final_batches_cutoff = num_batches - (num_batches % num_workers) indices, final_batches = np.split(indices, [final_batches_cutoff]) indices = indices.reshape(-1, num_workers, batch_size) per_worker_indices = np.split(indices, indices.shape[1], axis=1) per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices] # Distribute the final batches to the first workers for i in range(len(final_batches)): # len(final_batches) can be zero, and is always less than num_workers per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0) # Add the last incomplete batch to the next worker, which might be the first worker if last_incomplete_batch is not None: incomplete_batch_worker_idx = len(final_batches) else: incomplete_batch_worker_idx = None return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx def multiprocess_dataset_to_tf( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, num_workers, ): """Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process equivalent is dataset_to_tf. Args: dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. cols_to_retain (`List[str]`): Dataset column(s) to load in the tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and that do not exist in the original dataset. collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. Can be empty. columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to `tf.TensorSpec` objects. shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1. Returns: `tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") data_generator = NumpyMultiprocessingGenerator( dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, output_signature=output_signature, shuffle=shuffle, batch_size=batch_size, drop_remainder=drop_remainder, num_workers=num_workers, ) tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature) if drop_remainder: dataset_length = int(len(dataset) // batch_size) else: dataset_length = int(ceil(len(dataset) / batch_size)) return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
datasets/src/datasets/utils/tf_utils.py/0
{ "file_path": "datasets/src/datasets/utils/tf_utils.py", "repo_id": "datasets", "token_count": 10951 }
107
import posixpath from pathlib import Path from unittest.mock import patch import pytest from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path from fsspec.registry import _registry as _fsspec_registry class MockFileSystem(AbstractFileSystem): protocol = "mock" def __init__(self, *args, local_root_dir, **kwargs): super().__init__() self._fs = LocalFileSystem(*args, **kwargs) self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/" def mkdir(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.mkdir(path, *args, **kwargs) def makedirs(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.makedirs(path, *args, **kwargs) def rmdir(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rmdir(path) def ls(self, path, detail=True, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) out = self._fs.ls(path, detail=detail, *args, **kwargs) if detail: return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out] else: return [name[len(self.local_root_dir) :] for name in out] def info(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) out = dict(self._fs.info(path, *args, **kwargs)) out["name"] = out["name"][len(self.local_root_dir) :] return out def cp_file(self, path1, path2, *args, **kwargs): path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1)) path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2)) return self._fs.cp_file(path1, path2, *args, **kwargs) def rm_file(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rm_file(path, *args, **kwargs) def rm(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rm(path, *args, **kwargs) def _open(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs._open(path, *args, **kwargs) def created(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.created(path) def modified(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.modified(path) @classmethod def _strip_protocol(cls, path): path = stringify_path(path) if path.startswith("mock://"): path = path[7:] return path class TmpDirFileSystem(MockFileSystem): protocol = "tmp" tmp_dir = None def __init__(self, *args, **kwargs): assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set" super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True) @classmethod def _strip_protocol(cls, path): path = stringify_path(path) if path.startswith("tmp://"): path = path[6:] return path @pytest.fixture def mock_fsspec(): _fsspec_registry["mock"] = MockFileSystem _fsspec_registry["tmp"] = TmpDirFileSystem yield del _fsspec_registry["mock"] del _fsspec_registry["tmp"] @pytest.fixture def mockfs(tmp_path_factory, mock_fsspec): local_fs_dir = tmp_path_factory.mktemp("mockfs") return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True) @pytest.fixture def tmpfs(tmp_path_factory, mock_fsspec): tmp_fs_dir = tmp_path_factory.mktemp("tmpfs") with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir): yield TmpDirFileSystem() TmpDirFileSystem.clear_instance_cache()
datasets/tests/fixtures/fsspec.py/0
{ "file_path": "datasets/tests/fixtures/fsspec.py", "repo_id": "datasets", "token_count": 1757 }
108
import importlib import shutil import textwrap import pytest from datasets import ClassLabel, DownloadManager from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesDict, DataFilesList, get_data_patterns from datasets.download.streaming_download_manager import StreamingDownloadManager from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( FolderBasedBuilder, FolderBasedBuilderConfig, ) remote_files = [ "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hallo.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hello.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour2.txt", ] class DummyFeature: pass class DummyFolderBasedBuilder(FolderBasedBuilder): BASE_FEATURE = DummyFeature BASE_COLUMN_NAME = "base" BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig EXTENSIONS = [".txt"] @pytest.fixture def cache_dir(tmp_path): return str(tmp_path / "autofolder_cache_dir") @pytest.fixture def auto_text_file(text_file): return str(text_file) @pytest.fixture def data_files_with_labels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_labels_no_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_labels_no_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_labels_no_metadata @pytest.fixture def data_files_with_different_levels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_different_levels" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "subdir" / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_different_levels = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_different_levels @pytest.fixture def data_files_with_one_label_no_metadata(tmp_path, auto_text_file): # only one label found = all files in a single dir/in a root dir data_dir = tmp_path / "data_files_with_one_label" data_dir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_one_label = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) return data_files_with_one_label @pytest.fixture def files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "files_with_labels_and_label_key_in_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file_class0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file_class1.txt" shutil.copyfile(auto_text_file, filename2) metadata_filename = tmp_path / data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "class0/file_class0.txt", "additional_feature": "First dummy file", "label": "CLASS_0"} {"file_name": "class1/file_class1.txt", "additional_feature": "Second dummy file", "label": "CLASS_1"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(filename2), str(metadata_filename) @pytest.fixture def file_with_metadata(tmp_path, text_file): filename = tmp_path / "file.txt" shutil.copyfile(text_file, filename) metadata_filename = tmp_path / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(metadata_filename) @pytest.fixture def data_files_with_one_split_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_one_split" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file2.txt" shutil.copyfile(auto_text_file, filename2) filename3 = subdir / "file3.txt" # in subdir shutil.copyfile(auto_text_file, filename3) metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "file2.txt", "additional_feature": "Second dummy file"} {"file_name": "./subdir/file3.txt", "additional_feature": "Third dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata["train"]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) filename = train_dir / "file.txt" # train shutil.copyfile(auto_text_file, filename) filename2 = train_dir / "file2.txt" # train shutil.copyfile(auto_text_file, filename2) filename3 = test_dir / "file3.txt" # test shutil.copyfile(auto_text_file, filename3) train_metadata_filename = train_dir / "metadata.jsonl" train_metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Train dummy file"} {"file_name": "file2.txt", "additional_feature": "Second train dummy file"} """ ) with open(train_metadata_filename, "w", encoding="utf-8") as f: f.write(train_metadata) test_metadata_filename = test_dir / "metadata.jsonl" test_metadata = textwrap.dedent( """\ {"file_name": "file3.txt", "additional_feature": "Test dummy file"} """ ) with open(test_metadata_filename, "w", encoding="utf-8") as f: f.write(test_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture def data_files_with_zip_archives(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_zip_archives" data_dir.mkdir(parents=True, exist_ok=True) archive_dir = data_dir / "archive" archive_dir.mkdir(parents=True, exist_ok=True) subdir = archive_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = archive_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir / "file2.txt" # in subdir shutil.copyfile(auto_text_file, filename2) metadata_filename = archive_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "subdir/file2.txt", "additional_feature": "Second dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) shutil.make_archive(archive_dir, "zip", archive_dir) shutil.rmtree(str(archive_dir)) data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) assert len(data_files_with_zip_archives) == 1 assert len(data_files_with_zip_archives["train"]) == 1 return data_files_with_zip_archives def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = FolderBasedBuilderConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = FolderBasedBuilderConfig(name="name", data_files=data_files) def test_inferring_labels_from_data_dirs(data_files_with_labels_no_metadata, cache_dir): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs assert autofolder.info.features["label"] == ClassLabel(names=["class0", "class1"]) generator = autofolder._generate_examples(**gen_kwargs) assert all(example["label"] in {"class0", "class1"} for _, example in generator) def test_default_folder_builder_not_usable(data_files_with_labels_no_metadata, cache_dir): # builder would try to access non-existing attributes of a default `BuilderConfig` class # as a custom one is not provided with pytest.raises(AttributeError): _ = FolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, ) # test that AutoFolder is extended for streaming when it's child class is instantiated: # see line 115 in src/datasets/streaming.py def test_streaming_patched(): _ = DummyFolderBasedBuilder(data_dir=".") module = importlib.import_module(FolderBasedBuilder.__module__) assert hasattr(module, "_patched_for_streaming") assert module._patched_for_streaming @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_labels( data_files_with_labels_no_metadata, auto_text_file, drop_metadata, drop_labels, cache_dir ): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # removing labels explicitly requires drop_labels=True assert gen_kwargs["add_labels"] is not bool(drop_labels) assert gen_kwargs["add_metadata"] is False generator = autofolder._generate_examples(**gen_kwargs) if not drop_labels: assert all( example.keys() == {"base", "label"} and all(val is not None for val in example.values()) for _, example in generator ) else: assert all( example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator ) @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_metadata(file_with_metadata, drop_metadata, drop_labels, cache_dir): file, metadata_file = file_with_metadata autofolder = DummyFolderBasedBuilder( data_files=[file, metadata_file], drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True assert gen_kwargs["add_metadata"] is not bool(drop_metadata) # since the dataset has metadata, adding the labels explicitly requires drop_labels=False assert gen_kwargs["add_labels"] is False generator = autofolder._generate_examples(**gen_kwargs) expected_columns = {"base"} if gen_kwargs["add_metadata"]: expected_columns.add("additional_feature") if gen_kwargs["add_labels"]: expected_columns.add("label") result = [example for _, example in generator] assert len(result) == 1 example = result[0] assert example.keys() == expected_columns for column in expected_columns: assert example[column] is not None @pytest.mark.parametrize("remote", [True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_different_levels_no_metadata( data_files_with_different_levels_no_metadata, drop_labels, remote, cache_dir ): data_files = remote_files if remote else data_files_with_different_levels_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if files are on different levels in dir structure assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("remote", [False, True]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir): data_files = remote_files[:2] if remote else data_files_with_one_label_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if only one label is found (=if there is a single dir) assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("n_splits", [1, 2]) def test_data_files_with_metadata_and_splits( streaming, cache_dir, n_splits, data_files_with_one_split_and_metadata, data_files_with_two_splits_and_metadata ): data_files = data_files_with_one_split_and_metadata if n_splits == 1 else data_files_with_two_splits_and_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, ) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files.items(), generated_splits): assert split == generated_split.name expected_num_of_examples = len(files) - 1 generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): autofolder = DummyFolderBasedBuilder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files_with_zip_archives.items(), generated_splits): assert split == generated_split.name num_of_archives = len(files) expected_num_of_examples = 2 * num_of_archives generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "bad_metadata.jsonl" # bad file metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) assert all("additional_feature" not in example for _, example in generator) def test_data_files_with_custom_file_name_column_in_metadata_file(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_custom_file_name_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" """\ {"text_file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) assert all("text" in example and "text_file_name" not in example for _, example in generator)
datasets/tests/packaged_modules/test_folder_based_builder.py/0
{ "file_path": "datasets/tests/packaged_modules/test_folder_based_builder.py", "repo_id": "datasets", "token_count": 7784 }
109
from unittest import TestCase from datasets import List, Value from datasets.arrow_dataset import Dataset class DatasetListTest(TestCase): def _create_example_records(self): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _create_example_dict(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} return Dataset.from_dict(data) def test_create(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) self.assertListEqual(dset.column_names, ["col_1", "col_2"]) for i, r in enumerate(dset): self.assertDictEqual(r, example_records[i]) def test_list_dict_equivalent(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) dset_from_dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info, dset_from_dict.info) def test_uneven_records(self): # checks what happens with missing columns uneven_records = [{"col_1": 1}, {"col_2": "x"}] dset = Dataset.from_list(uneven_records) self.assertDictEqual(dset[0], {"col_1": 1}) self.assertDictEqual(dset[1], {"col_1": None}) # NB: first record is used for columns def test_variable_list_records(self): # checks if the type can be inferred from the second record list_records = [{"col_1": []}, {"col_1": [1, 2]}] dset = Dataset.from_list(list_records) self.assertEqual(dset.info.features["col_1"], List(Value("int64"))) def test_create_empty(self): dset = Dataset.from_list([]) self.assertEqual(len(dset), 0) self.assertListEqual(dset.column_names, [])
datasets/tests/test_dataset_list.py/0
{ "file_path": "datasets/tests/test_dataset_list.py", "repo_id": "datasets", "token_count": 875 }
110
import importlib import os import pickle import shutil import tempfile from multiprocessing import Pool from pathlib import Path from unittest import TestCase from unittest.mock import patch import dill import pyarrow as pa import pytest import requests import datasets from datasets import config, load_dataset from datasets.arrow_dataset import Dataset from datasets.arrow_writer import ArrowWriter from datasets.builder import DatasetBuilder from datasets.config import METADATA_CONFIGS_FIELD from datasets.data_files import DataFilesDict, DataFilesPatternsDict from datasets.dataset_dict import DatasetDict from datasets.download.download_config import DownloadConfig from datasets.exceptions import DatasetNotFoundError from datasets.features import Features, Value from datasets.iterable_dataset import IterableDataset from datasets.load import ( CachedDatasetModuleFactory, HubDatasetModuleFactory, LocalDatasetModuleFactory, PackagedDatasetModuleFactory, infer_module_for_data_files_list, infer_module_for_data_files_list_in_archives, load_dataset_builder, ) from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig from datasets.utils.logging import INFO, get_logger from .utils import ( OfflineSimulationMode, assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, offline, require_pil, require_sndfile, set_current_working_directory_to_temp_dir, ) SAMPLE_DATASET_IDENTIFIER2 = "hf-internal-testing/dataset_with_data_files" # only has data files SAMPLE_DATASET_IDENTIFIER3 = "hf-internal-testing/multi_dir_dataset" # has multiple data directories SAMPLE_DATASET_IDENTIFIER4 = "hf-internal-testing/imagefolder_with_metadata" # imagefolder with a metadata file inside the train/test directories SAMPLE_DATASET_IDENTIFIER5 = "hf-internal-testing/imagefolder_with_metadata_no_splits" # imagefolder with a metadata file and no default split names in data files SAMPLE_DATASET_COMMIT_HASH = "0e1cee81e718feadf49560b287c4eb669c2efb1a" SAMPLE_DATASET_COMMIT_HASH2 = "c19550d35263090b1ec2bfefdbd737431fafec40" SAMPLE_DATASET_COMMIT_HASH3 = "aaa2d4bdd1d877d1c6178562cfc584bdfa90f6dc" SAMPLE_DATASET_COMMIT_HASH4 = "507fa72044169a5a1802b7ac2d6bd38d5f310739" SAMPLE_DATASET_COMMIT_HASH5 = "4971fa562942cab8263f56a448c3f831b18f1c27" SAMPLE_DATASET_NO_CONFIGS_IN_METADATA = "hf-internal-testing/audiofolder_no_configs_in_metadata" SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT = ( "hf-internal-testing/audiofolder_two_configs_in_metadata_with_default" ) SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME = "hf-internal-testing/DatasetWithCapitalLetters" SAMPLE_DATASET_NO_CONFIGS_IN_METADATA_COMMIT_HASH = "26cd5079bb0d3cd1521c6894765a0b8edb159d7f" SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA_COMMIT_HASH = "1668dfc91efae975e44457cdabef60fb9200820a" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_COMMIT_HASH = "e71bce498e6c2bd2c58b20b097fdd3389793263f" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT_COMMIT_HASH = "38937109bb4dc7067f575fe6e7b420158eb9cf32" SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME_COMMIT_HASH = "70aa36264a6954920a13dd0465156a60b9f8af4b" SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER = "hf-internal-testing/_dummy" SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST = "_dummy" @pytest.fixture def data_dir(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() with open(data_dir / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir) @pytest.fixture def data_dir_with_arrow(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() output_train = os.path.join(data_dir, "train.arrow") with ArrowWriter(path=output_train) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 output_test = os.path.join(data_dir, "test.arrow") with ArrowWriter(path=output_test) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["bar"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 return str(data_dir) @pytest.fixture def data_dir_with_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_metadata" data_dir.mkdir() (data_dir / "train").mkdir() (data_dir / "test").mkdir() with open(data_dir / "train" / "cat.jpg", "wb") as f: f.write(b"train_image_bytes") with open(data_dir / "test" / "dog.jpg", "wb") as f: f.write(b"test_image_bytes") with open(data_dir / "train" / "metadata.jsonl", "w") as f: f.write( """\ {"file_name": "cat.jpg", "caption": "Cool train cat image"} """ ) with open(data_dir / "test" / "metadata.jsonl", "w") as f: f.write( """\ {"file_name": "dog.jpg", "caption": "Cool test dog image"} """ ) return str(data_dir) @pytest.fixture def data_dir_with_single_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_one_default_config_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom drop_labels: true --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_config_and_data_files(tmp_path): data_dir = tmp_path / "data_dir_with_config_and_data_files" cats_data_dir = data_dir / "data" / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "data" / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom data_files: "data/**/*.jpg" --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_two_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: "v1" drop_labels: true default: true - config_name: "v2" drop_labels: false --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_data_dir_configs_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") @pytest.fixture def sub_data_dirs(tmp_path): data_dir2 = tmp_path / "data_dir2" relative_subdir1 = "subdir1" sub_data_dir1 = data_dir2 / relative_subdir1 sub_data_dir1.mkdir(parents=True) with open(sub_data_dir1 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir1 / "test.txt", "w") as f: f.write("bar\n" * 10) relative_subdir2 = "subdir2" sub_data_dir2 = tmp_path / data_dir2 / relative_subdir2 sub_data_dir2.mkdir(parents=True) with open(sub_data_dir2 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir2 / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir2), relative_subdir1 @pytest.fixture def complex_data_dir(tmp_path): data_dir = tmp_path / "complex_data_dir" data_dir.mkdir() (data_dir / "data").mkdir() with open(data_dir / "data" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "data" / "test.txt", "w") as f: f.write("bar\n" * 10) with open(data_dir / "README.md", "w") as f: f.write("This is a readme") with open(data_dir / ".dummy", "w") as f: f.write("this is a dummy file that is not a data file") return str(data_dir) @pytest.mark.parametrize( "data_files, expected_module, expected_builder_kwargs", [ (["train.csv"], "csv", {}), (["train.tsv"], "csv", {"sep": "\t"}), (["train.json"], "json", {}), (["train.jsonl"], "json", {}), (["train.parquet"], "parquet", {}), (["train.geoparquet"], "parquet", {}), (["train.gpq"], "parquet", {}), (["train.arrow"], "arrow", {}), (["train.txt"], "text", {}), (["uppercase.TXT"], "text", {}), (["unsupported.ext"], None, {}), ([""], None, {}), ], ) def test_infer_module_for_data_files(data_files, expected_module, expected_builder_kwargs): module, builder_kwargs = infer_module_for_data_files_list(data_files) assert module == expected_module assert builder_kwargs == expected_builder_kwargs @pytest.mark.parametrize( "data_file, expected_module", [ ("zip_csv_path", "csv"), ("zip_csv_with_dir_path", "csv"), ("zip_uppercase_csv_path", "csv"), ("zip_unsupported_ext_path", None), ], ) def test_infer_module_for_data_files_in_archives( data_file, expected_module, zip_csv_path, zip_csv_with_dir_path, zip_uppercase_csv_path, zip_unsupported_ext_path ): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "zip_uppercase_csv_path": zip_uppercase_csv_path, "zip_unsupported_ext_path": zip_unsupported_ext_path, } data_files = [str(data_file_paths[data_file])] inferred_module, _ = infer_module_for_data_files_list_in_archives(data_files) assert inferred_module == expected_module class ModuleFactoryTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures( self, jsonl_path, data_dir, data_dir_with_metadata, data_dir_with_single_config_in_metadata, data_dir_with_config_and_data_files, data_dir_with_two_config_in_metadata, sub_data_dirs, ): self._jsonl_path = jsonl_path self._data_dir = data_dir self._data_dir_with_metadata = data_dir_with_metadata self._data_dir_with_single_config_in_metadata = data_dir_with_single_config_in_metadata self._data_dir_with_config_and_data_files = data_dir_with_config_and_data_files self._data_dir_with_two_config_in_metadata = data_dir_with_two_config_in_metadata self._data_dir2 = sub_data_dirs[0] self._sub_data_dir = sub_data_dirs[1] def setUp(self): self.cache_dir = tempfile.mkdtemp() self.download_config = DownloadConfig(cache_dir=self.cache_dir) def test_LocalDatasetModuleFactory(self): factory = LocalDatasetModuleFactory(self._data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) def test_LocalDatasetModuleFactory_with_data_dir(self): factory = LocalDatasetModuleFactory(self._data_dir2, data_dir=self._sub_data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) == 1 and len(builder_config.data_files["test"]) == 1 ) assert all( self._sub_data_dir in Path(data_file).parts for data_file in builder_config.data_files["train"] + builder_config.data_files["test"] ) def test_LocalDatasetModuleFactory_with_metadata(self): factory = LocalDatasetModuleFactory(self._data_dir_with_metadata) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) > 0 and len(builder_config.data_files["test"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["test"]) def test_LocalDatasetModuleFactory_with_single_config_in_metadata(self): factory = LocalDatasetModuleFactory( self._data_dir_with_single_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], ImageFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesPatternsDict) module_builder_configs[0]._resolve_data_files(self._data_dir_with_single_config_in_metadata, DownloadConfig()) assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert len(module_builder_configs[0].data_files) == 1 # one train split assert len(module_builder_configs[0].data_files["train"]) == 2 # two files assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name == "custom" # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_LocalDatasetModuleFactory_with_config_and_data_files(self): factory = LocalDatasetModuleFactory( self._data_dir_with_config_and_data_files, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs builder_kwargs = module_factory_result.builder_kwargs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "data_files" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["data_files"] == "data/**/*.jpg" assert "data_files" not in builder_kwargs def test_LocalDatasetModuleFactory_data_dir_with_config_and_data_files(self): factory = LocalDatasetModuleFactory(self._data_dir_with_config_and_data_files, data_dir="data") module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs builder_kwargs = module_factory_result.builder_kwargs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "data_files" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["data_files"] == "data/**/*.jpg" assert "data_files" in builder_kwargs assert "train" in builder_kwargs["data_files"] assert len(builder_kwargs["data_files"]["train"]) == 2 def test_LocalDatasetModuleFactory_with_two_configs_in_metadata(self): factory = LocalDatasetModuleFactory( self._data_dir_with_two_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, ImageFolderConfig) assert isinstance(module_builder_config_v2, ImageFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesPatternsDict) assert isinstance(module_builder_config_v2.data_files, DataFilesPatternsDict) module_builder_config_v1._resolve_data_files(self._data_dir_with_two_config_in_metadata, DownloadConfig()) module_builder_config_v2._resolve_data_files(self._data_dir_with_two_config_in_metadata, DownloadConfig()) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["train"] assert len(module_builder_config_v1.data_files["train"]) == 2 assert sorted(module_builder_config_v2.data_files) == ["train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata assert ( module_factory_result.builder_configs_parameters.default_config_name == "v1" ) # it's marked as a default one in yaml # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_PackagedDatasetModuleFactory(self): factory = PackagedDatasetModuleFactory( "json", data_files=self._jsonl_path, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_PackagedDatasetModuleFactory_with_data_dir(self): factory = PackagedDatasetModuleFactory("json", data_dir=self._data_dir, download_config=self.download_config) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None data_files = module_factory_result.builder_kwargs.get("data_files") assert data_files is not None and len(data_files["train"]) > 0 and len(data_files["test"]) > 0 assert Path(data_files["train"][0]).parent.samefile(self._data_dir) assert Path(data_files["test"][0]).parent.samefile(self._data_dir) def test_PackagedDatasetModuleFactory_with_data_dir_and_metadata(self): factory = PackagedDatasetModuleFactory( "imagefolder", data_dir=self._data_dir_with_metadata, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None data_files = module_factory_result.builder_kwargs.get("data_files") assert data_files is not None and len(data_files["train"]) > 0 and len(data_files["test"]) > 0 assert Path(self._data_dir_with_metadata) in Path(data_files["train"][0]).parents assert Path(self._data_dir_with_metadata) in Path(data_files["test"][0]).parents assert any(Path(data_file).name == "metadata.jsonl" for data_file in data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in data_files["test"]) @pytest.mark.integration def test_HubDatasetModuleFactory(self): factory = HubDatasetModuleFactory( SAMPLE_DATASET_IDENTIFIER2, commit_hash=SAMPLE_DATASET_COMMIT_HASH2, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) @pytest.mark.integration def test_HubDatasetModuleFactory_with_data_dir(self): data_dir = "data2" factory = HubDatasetModuleFactory( SAMPLE_DATASET_IDENTIFIER3, commit_hash=SAMPLE_DATASET_COMMIT_HASH3, data_dir=data_dir, download_config=self.download_config, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) == 1 and len(builder_config.data_files["test"]) == 1 ) assert all( data_dir in Path(data_file).parts for data_file in builder_config.data_files["train"] + builder_config.data_files["test"] ) @pytest.mark.integration def test_HubDatasetModuleFactory_with_metadata(self): factory = HubDatasetModuleFactory( SAMPLE_DATASET_IDENTIFIER4, commit_hash=SAMPLE_DATASET_COMMIT_HASH4, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) > 0 and len(builder_config.data_files["test"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["test"]) factory = HubDatasetModuleFactory( SAMPLE_DATASET_IDENTIFIER5, commit_hash=SAMPLE_DATASET_COMMIT_HASH5, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files) == 1 and len(builder_config.data_files["train"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) @pytest.mark.integration def test_HubDatasetModuleFactory_with_one_default_config_in_metadata(self): factory = HubDatasetModuleFactory( SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, commit_hash=SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA_COMMIT_HASH, download_config=self.download_config, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], AudioFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesPatternsDict) module_builder_configs[0]._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert sorted(module_builder_configs[0].data_files) == ["test", "train"] assert len(module_builder_configs[0].data_files["train"]) == 3 assert len(module_builder_configs[0].data_files["test"]) == 3 assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name == "custom" # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs @pytest.mark.integration def test_HubDatasetModuleFactory_with_two_configs_in_metadata(self): datasets_names = [ (SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_COMMIT_HASH), ( SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT, SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT_COMMIT_HASH, ), ] for dataset_name, commit_hash in datasets_names: factory = HubDatasetModuleFactory( dataset_name, commit_hash=commit_hash, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, AudioFolderConfig) assert isinstance(module_builder_config_v2, AudioFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesPatternsDict) assert isinstance(module_builder_config_v2.data_files, DataFilesPatternsDict) module_builder_config_v1._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) module_builder_config_v2._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["test", "train"] assert len(module_builder_config_v1.data_files["train"]) == 3 assert len(module_builder_config_v1.data_files["test"]) == 3 assert sorted(module_builder_config_v2.data_files) == ["test", "train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert len(module_builder_config_v2.data_files["test"]) == 1 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs if dataset_name == SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT: assert module_factory_result.builder_configs_parameters.default_config_name == "v1" else: assert module_factory_result.builder_configs_parameters.default_config_name is None @pytest.mark.integration def test_CachedDatasetModuleFactory(self): name = SAMPLE_DATASET_IDENTIFIER2 load_dataset_builder(name, cache_dir=self.cache_dir).download_and_prepare() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedDatasetModuleFactory( name, cache_dir=self.cache_dir, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.parametrize( "factory_class,requires_commit_hash", [ (CachedDatasetModuleFactory, False), (HubDatasetModuleFactory, True), (LocalDatasetModuleFactory, False), (PackagedDatasetModuleFactory, False), ], ) def test_module_factories(factory_class, requires_commit_hash): name = "dummy_name" if requires_commit_hash: factory = factory_class(name, commit_hash="foo") else: factory = factory_class(name) assert factory.name == name @pytest.mark.integration class LoadTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): self._caplog = caplog def setUp(self): self.cache_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.cache_dir) @pytest.mark.integration def test_offline_dataset_module_factory(self): repo_id = SAMPLE_DATASET_IDENTIFIER2 builder = load_dataset_builder(repo_id, cache_dir=self.cache_dir) builder.download_and_prepare() for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the repo id without an explicit path to remote or local actual file dataset_module = datasets.load.dataset_module_factory(repo_id, cache_dir=self.cache_dir) self.assertEqual(dataset_module.module_path, "datasets.packaged_modules.cache.cache") self.assertIn("Using the latest cached version of the dataset", self._caplog.text) @pytest.mark.integration def test_offline_dataset_module_factory_with_capital_letters_in_name(self): repo_id = SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME builder = load_dataset_builder(repo_id, cache_dir=self.cache_dir) builder.download_and_prepare() for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the repo id without an explicit path to remote or local actual file dataset_module = datasets.load.dataset_module_factory(repo_id, cache_dir=self.cache_dir) self.assertEqual(dataset_module.module_path, "datasets.packaged_modules.cache.cache") self.assertIn("Using the latest cached version of the dataset", self._caplog.text) def test_load_dataset_from_hub(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("_dummy") self.assertIn( "Dataset '_dummy' doesn't exist on the Hub", str(context.exception), ) with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("HuggingFaceFW/fineweb-edu", revision="0.0.0") self.assertIn( "Revision '0.0.0' doesn't exist for dataset 'HuggingFaceFW/fineweb-edu' on the Hub.", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("_dummy") if offline_simulation_mode != OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1: self.assertIn( "Couldn't reach '_dummy' on the Hub", str(context.exception), ) def test_load_dataset_namespace(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn( "hf-internal-testing/_dummy", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn("hf-internal-testing/_dummy", str(context.exception), msg=offline_simulation_mode) @pytest.mark.integration def test_load_dataset_builder_with_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) assert isinstance(builder, ImageFolder) assert builder.config.name == "default" assert builder.config.data_files is not None assert builder.config.drop_metadata is None with pytest.raises(ValueError): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, "non-existing-config") @pytest.mark.integration def test_load_dataset_builder_config_kwargs_passed_as_arguments(): builder_default = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) builder_custom = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert builder_custom.config.drop_metadata != builder_default.config.drop_metadata assert builder_custom.config.drop_metadata is True @pytest.mark.integration def test_load_dataset_builder_with_two_configs_in_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert isinstance(builder, AudioFolder) assert builder.config.name == "v1" assert builder.config.data_files is not None with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "non-existing-config") @pytest.mark.parametrize("serializer", [pickle, dill]) def test_load_dataset_builder_with_metadata_configs_pickable(serializer): builder = datasets.load_dataset_builder(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) builder_unpickled = serializer.loads(serializer.dumps(builder)) assert builder.BUILDER_CONFIGS == builder_unpickled.BUILDER_CONFIGS assert list(builder_unpickled.builder_configs) == ["custom"] assert isinstance(builder_unpickled.builder_configs["custom"], AudioFolderConfig) builder2 = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") builder2_unpickled = serializer.loads(serializer.dumps(builder2)) assert builder2.BUILDER_CONFIGS == builder2_unpickled.BUILDER_CONFIGS != builder_unpickled.BUILDER_CONFIGS assert list(builder2_unpickled.builder_configs) == ["v1", "v2"] assert isinstance(builder2_unpickled.builder_configs["v1"], AudioFolderConfig) assert isinstance(builder2_unpickled.builder_configs["v2"], AudioFolderConfig) def test_load_dataset_builder_for_absolute_data_dir(complex_data_dir): builder = datasets.load_dataset_builder(complex_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == Path(complex_data_dir).name assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_for_relative_data_dir(complex_data_dir): with set_current_working_directory_to_temp_dir(): relative_data_dir = "relative_data_dir" shutil.copytree(complex_data_dir, relative_data_dir) builder = datasets.load_dataset_builder(relative_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == relative_data_dir assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 @pytest.mark.integration def test_load_dataset_builder_for_community_dataset(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER2) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER2.split("/")[-1] assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_fail(): with pytest.raises(DatasetNotFoundError): datasets.load_dataset_builder("blabla") @pytest.mark.integration @pytest.mark.parametrize( "kwargs, expected_train_num_rows, expected_test_num_rows", [ ({}, 2, 2), ({"data_dir": "data1"}, 1, 1), # GH-6918: NonMatchingSplitsSizesError ({"data_files": "data1/train.txt"}, 1, None), # GH-6939: ExpectedMoreSplits ], ) def test_load_dataset_from_hub(kwargs, expected_train_num_rows, expected_test_num_rows): dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER3, **kwargs) assert dataset["train"].num_rows == expected_train_num_rows assert (dataset["test"].num_rows == expected_test_num_rows) if expected_test_num_rows else ("test" not in dataset) @pytest.mark.integration @pytest.mark.parametrize("stream_from_cache, ", [False, True]) def test_load_dataset_cached_from_hub(stream_from_cache, caplog): dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER3) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): caplog.clear() # Load dataset from cache dataset = datasets.load_dataset(SAMPLE_DATASET_IDENTIFIER3, streaming=stream_from_cache) assert len(dataset) == 2 assert "Using the latest cached version of the dataset" in caplog.text assert isinstance(next(iter(dataset["train"])), dict) with pytest.raises(DatasetNotFoundError) as exc_info: datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value) def test_load_dataset_streaming_gz_json(jsonl_gz_path): data_files = jsonl_gz_path ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.integration @pytest.mark.parametrize( "path", ["sample.jsonl", "sample.jsonl.gz", "sample.tar", "sample.jsonl.xz", "sample.zip", "sample.jsonl.zst"] ) def test_load_dataset_streaming_compressed_files(path): repo_id = "hf-internal-testing/compressed_files" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path}" if data_files[-3:] in ("zip", "tar"): # we need to glob "*" inside archives data_files = data_files[-3:] + "://*::" + data_files return # TODO(QL, albert): support re-add support for ZIP and TAR archives streaming ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == { "tokens": ["Ministeri", "de", "Justícia", "d'Espanya"], "ner_tags": [1, 2, 2, 2], "langs": ["ca", "ca", "ca", "ca"], "spans": ["PER: Ministeri de Justícia d'Espanya"], } @pytest.mark.parametrize("path_extension", ["csv", "csv.bz2"]) @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_streaming_csv(path_extension, streaming, csv_path, bz2_csv_path): paths = {"csv": csv_path, "csv.bz2": bz2_csv_path} data_files = str(paths[path_extension]) features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_csv_path", "zip_csv_with_dir_path", "csv_path"]) def test_load_dataset_zip_csv(data_file, streaming, zip_csv_path, zip_csv_with_dir_path, csv_path): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "csv_path": csv_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_jsonl_path", "zip_jsonl_with_dir_path", "jsonl_path"]) def test_load_dataset_zip_jsonl(data_file, streaming, zip_jsonl_path, zip_jsonl_with_dir_path, jsonl_path): data_file_paths = { "zip_jsonl_path": zip_jsonl_path, "zip_jsonl_with_dir_path": zip_jsonl_with_dir_path, "jsonl_path": jsonl_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_text_path", "zip_text_with_dir_path", "text_path"]) def test_load_dataset_zip_text(data_file, streaming, zip_text_path, zip_text_with_dir_path, text_path): data_file_paths = { "zip_text_path": zip_text_path, "zip_text_with_dir_path": zip_text_with_dir_path, "text_path": text_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 ds = load_dataset("text", split="train", data_files=data_files, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"text": "0"} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"text": "0"} @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_arrow(streaming, data_dir_with_arrow): ds = load_dataset("arrow", split="train", data_dir=data_dir_with_arrow, streaming=streaming) expected_size = 10 if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "foo"} ds_item_counter += 1 assert ds_item_counter == 10 else: assert ds.num_rows == 10 assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "foo"} def test_load_dataset_text_with_unicode_new_lines(text_path_with_unicode_new_lines): data_files = str(text_path_with_unicode_new_lines) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 3 def test_load_dataset_with_unsupported_extensions(text_dir_with_unsupported_extension): data_files = str(text_dir_with_unsupported_extension) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 4 @pytest.mark.integration def test_loading_from_the_datasets_hub_with_token(): true_request = requests.Session().request def assert_auth(method, url, *args, headers, **kwargs): assert headers["authorization"] == "Bearer foo" return true_request(method, url, *args, headers=headers, **kwargs) with patch("requests.Session.request") as mock_request: mock_request.side_effect = assert_auth with tempfile.TemporaryDirectory() as tmp_dir: with offline(): with pytest.raises((ConnectionError, requests.exceptions.ConnectionError)): load_dataset(SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER, cache_dir=tmp_dir, token="foo") mock_request.assert_called() @pytest.mark.integration def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data): ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_builder_private_dataset(hf_token, hf_private_dataset_repo_txt_data): builder = load_dataset_builder(hf_private_dataset_repo_txt_data, token=hf_token) assert isinstance(builder, DatasetBuilder) @pytest.mark.integration def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data): ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_config_kwargs_passed_as_arguments(): ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4) ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert list(ds_default["train"].features) == ["image", "caption"] assert list(ds_custom["train"].features) == ["image"] @require_sndfile @pytest.mark.integration def test_load_hub_dataset_with_single_config_in_metadata(): # load the same dataset but with no configurations (=with default parameters) ds = load_dataset(SAMPLE_DATASET_NO_CONFIGS_IN_METADATA) assert list(ds["train"].features) == ["audio", "label"] # assert label feature is here as expected by default assert len(ds["train"]) == 5 and len(ds["test"]) == 4 ds2 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) # single config -> no need to specify it assert list(ds2["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds2["train"]) == 3 and len(ds2["test"]) == 3 ds3 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "custom") assert list(ds3["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds3["train"]) == 3 and len(ds3["test"]) == 3 with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "default") @require_sndfile @pytest.mark.integration def test_load_hub_dataset_with_two_config_in_metadata(): ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert list(ds["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds2 = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2") assert list(ds2["train"].features) == [ "audio", "label", ] # assert param `drop_labels=False` from metadata is passed assert len(ds2["train"]) == 2 and len(ds2["test"]) == 1 with pytest.raises(ValueError): # config is required but not specified _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "default") ds_with_default = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT) # it's a dataset with the same data but "v1" config is marked as a default one assert list(ds_with_default["train"].features) == list(ds["train"].features) assert len(ds_with_default["train"]) == len(ds["train"]) and len(ds_with_default["test"]) == len(ds["test"]) @require_sndfile @pytest.mark.integration def test_load_hub_dataset_with_metadata_config_in_parallel(): # assert it doesn't fail (pickling of dynamically created class works) ds = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1", num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2", num_proc=2) assert "label" in ds["train"].features assert len(ds["train"]) == 2 and len(ds["test"]) == 1 @require_pil @pytest.mark.integration @pytest.mark.parametrize("streaming", [True]) def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming): ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=hf_token) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_items = list(ds) assert len(ds_items) == 2 def test_load_dataset_then_move_then_reload(data_dir, tmp_path, caplog): cache_dir1 = tmp_path / "cache1" cache_dir2 = tmp_path / "cache2" dataset = load_dataset(data_dir, split="train", cache_dir=cache_dir1, trust_remote_code=True) fingerprint1 = dataset._fingerprint del dataset os.rename(cache_dir1, cache_dir2) caplog.clear() with caplog.at_level(INFO, logger=get_logger().name): dataset = load_dataset(data_dir, split="train", cache_dir=cache_dir2) assert "Found cached dataset" in caplog.text assert dataset._fingerprint == fingerprint1, "for the caching mechanism to work, fingerprint should stay the same" dataset = load_dataset(data_dir, split="test", cache_dir=cache_dir2) assert dataset._fingerprint != fingerprint1 def test_load_dataset_builder_then_edit_then_load_again(tmp_path: Path): dataset_dir = tmp_path / "test_load_dataset_then_edit_then_load_again" dataset_dir.mkdir() with open(dataset_dir / "train.txt", "w") as f: f.write("Hello there") dataset_builder = load_dataset_builder(str(dataset_dir)) with open(dataset_dir / "train.txt", "w") as f: f.write("General Kenobi !") edited_dataset_builder = load_dataset_builder(str(dataset_dir)) assert dataset_builder.cache_dir != edited_dataset_builder.cache_dir @pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 50, 500]) def test_load_dataset_local_with_default_in_memory(max_in_memory_dataset_size, data_dir, monkeypatch): current_dataset_size = 148 if max_in_memory_dataset_size == "default": max_in_memory_dataset_size = 0 # default else: monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) if max_in_memory_dataset_size: expected_in_memory = current_dataset_size < max_in_memory_dataset_size else: expected_in_memory = False with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): dataset = load_dataset(data_dir) assert (dataset["train"].dataset_size < max_in_memory_dataset_size) is expected_in_memory @pytest.mark.integration def test_remote_data_files(): repo_id = "hf-internal-testing/raw_jsonl" filename = "wikiann-bn-validation.jsonl" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{filename}" ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item.keys() == {"langs", "ner_tags", "spans", "tokens"} def distributed_load_dataset(args): data_name, tmp_dir, datafiles = args dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles) return dataset def test_load_dataset_distributed(tmp_path, csv_path): num_workers = 5 args = "csv", str(tmp_path), csv_path with Pool(processes=num_workers) as pool: # start num_workers processes datasets = pool.map(distributed_load_dataset, [args] * num_workers) assert len(datasets) == num_workers assert all(len(dataset) == len(datasets[0]) > 0 for dataset in datasets) assert len(datasets[0].cache_files) > 0 assert all(dataset.cache_files == datasets[0].cache_files for dataset in datasets) def test_load_dataset_with_storage_options(mockfs): with mockfs.open("data.txt", "w") as f: f.write("Hello there\n") f.write("General Kenobi !") data_files = {"train": ["mock://data.txt"]} ds = load_dataset("text", data_files=data_files, storage_options=mockfs.storage_options) assert list(ds["train"]) == [{"text": "Hello there"}, {"text": "General Kenobi !"}] @require_pil def test_load_dataset_with_storage_options_with_decoding(mockfs, image_file): import PIL.Image filename = os.path.basename(image_file) with mockfs.open(filename, "wb") as fout: with open(image_file, "rb") as fin: fout.write(fin.read()) data_files = {"train": ["mock://" + filename]} ds = load_dataset("imagefolder", data_files=data_files, storage_options=mockfs.storage_options) assert len(ds["train"]) == 1 assert isinstance(ds["train"][0]["image"], PIL.Image.Image) def test_load_dataset_with_zip(zip_csv_path): path = str(zip_csv_path.parent) ds = load_dataset(path) assert list(ds.keys()) == ["train"] assert ds["train"].column_names == ["col_1", "col_2", "col_3"] assert ds["train"].num_rows == 8 assert ds["train"][0] == {"col_1": 0, "col_2": 0, "col_3": 0.0} @pytest.mark.integration def test_reload_old_cache_from_2_15(tmp_path: Path): cache_dir = tmp_path / "test_reload_old_cache_from_2_15" builder_cache_dir = ( cache_dir / "polinaeterna___audiofolder_two_configs_in_metadata/v2-374bfde4f55442bc/0.0.0/7896925d64deea5d" ) builder_cache_dir.mkdir(parents=True) arrow_path = builder_cache_dir / "audiofolder_two_configs_in_metadata-train.arrow" dataset_info_path = builder_cache_dir / "dataset_info.json" with dataset_info_path.open("w") as f: f.write("{}") arrow_path.touch() builder = load_dataset_builder( "polinaeterna/audiofolder_two_configs_in_metadata", "v2", data_files="v2/train/*", cache_dir=cache_dir.as_posix(), ) assert builder.cache_dir == builder_cache_dir.as_posix() # old cache from 2.15 builder = load_dataset_builder( "polinaeterna/audiofolder_two_configs_in_metadata", "v2", cache_dir=cache_dir.as_posix() ) assert ( builder.cache_dir == ( cache_dir / "polinaeterna___audiofolder_two_configs_in_metadata" / "v2" / "0.0.0" / str(builder.hash) ).as_posix() ) # new cache @pytest.mark.integration def test_update_dataset_card_data_with_standalone_yaml(): # Labels defined in .huggingface.yml because they are too long to be in README.md from datasets.utils.metadata import MetadataConfigs with patch( "datasets.utils.metadata.MetadataConfigs.from_dataset_card_data", side_effect=MetadataConfigs.from_dataset_card_data, ) as card_data_read_mock: builder = load_dataset_builder("datasets-maintainers/dataset-with-standalone-yaml") assert card_data_read_mock.call_args.args[0]["license"] is not None # from README.md assert card_data_read_mock.call_args.args[0]["dataset_info"] is not None # from standalone yaml assert card_data_read_mock.call_args.args[0]["tags"] == ["test"] # standalone yaml has precedence assert isinstance( builder.info.features["label"], datasets.ClassLabel ) # correctly loaded from long labels list in standalone yaml
datasets/tests/test_load.py/0
{ "file_path": "datasets/tests/test_load.py", "repo_id": "datasets", "token_count": 25595 }
111
import gc import inspect import logging import os import queue import threading from contextlib import nullcontext from dataclasses import dataclass from typing import Any, Callable, Dict, Optional, Union import pandas as pd import torch import torch.utils.benchmark as benchmark from diffusers.models.modeling_utils import ModelMixin from diffusers.utils.testing_utils import require_torch_gpu, torch_device logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s: %(message)s") logger = logging.getLogger(__name__) NUM_WARMUP_ROUNDS = 5 def benchmark_fn(f, *args, **kwargs): t0 = benchmark.Timer( stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}, num_threads=1, ) return float(f"{(t0.blocked_autorange().mean):.3f}") def flush(): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() # Adapted from https://github.com/lucasb-eyer/cnn_vit_benchmarks/blob/15b665ff758e8062131353076153905cae00a71f/main.py def calculate_flops(model, input_dict): try: from torchprofile import profile_macs except ModuleNotFoundError: raise # This is a hacky way to convert the kwargs to args as `profile_macs` cries about kwargs. sig = inspect.signature(model.forward) param_names = [ p.name for p in sig.parameters.values() if p.kind in ( inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, ) and p.name != "self" ] bound = sig.bind_partial(**input_dict) bound.apply_defaults() args = tuple(bound.arguments[name] for name in param_names) model.eval() with torch.no_grad(): macs = profile_macs(model, args) flops = 2 * macs # 1 MAC operation = 2 FLOPs (1 multiplication + 1 addition) return flops def calculate_params(model): return sum(p.numel() for p in model.parameters()) # Users can define their own in case this doesn't suffice. For most cases, # it should be sufficient. def model_init_fn(model_cls, group_offload_kwargs=None, layerwise_upcasting=False, **init_kwargs): model = model_cls.from_pretrained(**init_kwargs).eval() if group_offload_kwargs and isinstance(group_offload_kwargs, dict): model.enable_group_offload(**group_offload_kwargs) else: model.to(torch_device) if layerwise_upcasting: model.enable_layerwise_casting( storage_dtype=torch.float8_e4m3fn, compute_dtype=init_kwargs.get("torch_dtype", torch.bfloat16) ) return model @dataclass class BenchmarkScenario: name: str model_cls: ModelMixin model_init_kwargs: Dict[str, Any] model_init_fn: Callable get_model_input_dict: Callable compile_kwargs: Optional[Dict[str, Any]] = None @require_torch_gpu class BenchmarkMixin: def pre_benchmark(self): flush() torch.compiler.reset() def post_benchmark(self, model): model.cpu() flush() torch.compiler.reset() @torch.no_grad() def run_benchmark(self, scenario: BenchmarkScenario): # 0) Basic stats logger.info(f"Running scenario: {scenario.name}.") try: model = model_init_fn(scenario.model_cls, **scenario.model_init_kwargs) num_params = round(calculate_params(model) / 1e9, 2) try: flops = round(calculate_flops(model, input_dict=scenario.get_model_input_dict()) / 1e9, 2) except Exception as e: logger.info(f"Problem in calculating FLOPs:\n{e}") flops = None model.cpu() del model except Exception as e: logger.info(f"Error while initializing the model and calculating FLOPs:\n{e}") return {} self.pre_benchmark() # 1) plain stats results = {} plain = None try: plain = self._run_phase( model_cls=scenario.model_cls, init_fn=scenario.model_init_fn, init_kwargs=scenario.model_init_kwargs, get_input_fn=scenario.get_model_input_dict, compile_kwargs=None, ) except Exception as e: logger.info(f"Benchmark could not be run with the following error:\n{e}") return results # 2) compiled stats (if any) compiled = {"time": None, "memory": None} if scenario.compile_kwargs: try: compiled = self._run_phase( model_cls=scenario.model_cls, init_fn=scenario.model_init_fn, init_kwargs=scenario.model_init_kwargs, get_input_fn=scenario.get_model_input_dict, compile_kwargs=scenario.compile_kwargs, ) except Exception as e: logger.info(f"Compilation benchmark could not be run with the following error\n: {e}") if plain is None: return results # 3) merge result = { "scenario": scenario.name, "model_cls": scenario.model_cls.__name__, "num_params_B": num_params, "flops_G": flops, "time_plain_s": plain["time"], "mem_plain_GB": plain["memory"], "time_compile_s": compiled["time"], "mem_compile_GB": compiled["memory"], } if scenario.compile_kwargs: result["fullgraph"] = scenario.compile_kwargs.get("fullgraph", False) result["mode"] = scenario.compile_kwargs.get("mode", "default") else: result["fullgraph"], result["mode"] = None, None return result def run_bencmarks_and_collate(self, scenarios: Union[BenchmarkScenario, list[BenchmarkScenario]], filename: str): if not isinstance(scenarios, list): scenarios = [scenarios] record_queue = queue.Queue() stop_signal = object() def _writer_thread(): while True: item = record_queue.get() if item is stop_signal: break df_row = pd.DataFrame([item]) write_header = not os.path.exists(filename) df_row.to_csv(filename, mode="a", header=write_header, index=False) record_queue.task_done() record_queue.task_done() writer = threading.Thread(target=_writer_thread, daemon=True) writer.start() for s in scenarios: try: record = self.run_benchmark(s) if record: record_queue.put(record) else: logger.info(f"Record empty from scenario: {s.name}.") except Exception as e: logger.info(f"Running scenario ({s.name}) led to error:\n{e}") record_queue.put(stop_signal) logger.info(f"Results serialized to {filename=}.") def _run_phase( self, *, model_cls: ModelMixin, init_fn: Callable, init_kwargs: Dict[str, Any], get_input_fn: Callable, compile_kwargs: Optional[Dict[str, Any]], ) -> Dict[str, float]: # setup self.pre_benchmark() # init & (optional) compile model = init_fn(model_cls, **init_kwargs) if compile_kwargs: model.compile(**compile_kwargs) # build inputs inp = get_input_fn() # measure run_ctx = torch._inductor.utils.fresh_inductor_cache() if compile_kwargs else nullcontext() with run_ctx: for _ in range(NUM_WARMUP_ROUNDS): _ = model(**inp) time_s = benchmark_fn(lambda m, d: m(**d), model, inp) mem_gb = torch.cuda.max_memory_allocated() / (1024**3) mem_gb = round(mem_gb, 2) # teardown self.post_benchmark(model) del model return {"time": time_s, "memory": mem_gb}
diffusers/benchmarks/benchmarking_utils.py/0
{ "file_path": "diffusers/benchmarks/benchmarking_utils.py", "repo_id": "diffusers", "token_count": 3836 }
112
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ### Translating the Diffusers documentation into your language As part of our mission to democratize machine learning, we'd love to make the Diffusers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language 🙏. **🗞️ Open an issue** To get started, navigate to the [Issues](https://github.com/huggingface/diffusers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "🌐 Translating a New Language?" from the "New issue" button. Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list. **🍴 Fork the repository** First, you'll need to [fork the Diffusers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page. Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows: ```bash git clone https://github.com/<YOUR-USERNAME>/diffusers.git ``` **📋 Copy-paste the English version with a new language code** The documentation files are in one leading directory: - [`docs/source`](https://github.com/huggingface/diffusers/tree/main/docs/source): All the documentation materials are organized here by language. You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/diffusers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following: ```bash cd ~/path/to/diffusers/docs cp -r source/en source/<LANG-ID> ``` Here, `<LANG-ID>` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table. **✍️ Start translating** The fun part comes - translating the text! The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website. > 🙋 If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/<LANG-ID>/` directory! The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml): ```yaml - sections: - local: pipeline_tutorial # Do not change this! Use the same name for your .md file title: Pipelines for inference # Translate this! ... title: Tutorials # Translate this! ``` Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter. > 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/diffusers/issues) and tag @patrickvonplaten.
diffusers/docs/TRANSLATING.md/0
{ "file_path": "diffusers/docs/TRANSLATING.md", "repo_id": "diffusers", "token_count": 1100 }
113
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # UNet Some training methods - like LoRA and Custom Diffusion - typically target the UNet's attention layers, but these training methods can also target other non-attention layers. Instead of training all of a model's parameters, only a subset of the parameters are trained, which is faster and more efficient. This class is useful if you're *only* loading weights into a UNet. If you need to load weights into the text encoder or a text encoder and UNet, try using the [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] function instead. The [`UNet2DConditionLoadersMixin`] class provides functions for loading and saving weights, fusing and unfusing LoRAs, disabling and enabling LoRAs, and setting and deleting adapters. <Tip> To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide. </Tip> ## UNet2DConditionLoadersMixin [[autodoc]] loaders.unet.UNet2DConditionLoadersMixin
diffusers/docs/source/en/api/loaders/unet.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/unet.md", "repo_id": "diffusers", "token_count": 406 }
114
<!--Copyright 2025 The HuggingFace Team and The InstantX Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNetUnionModel ControlNetUnionModel is an implementation of ControlNet for Stable Diffusion XL. The ControlNet model was introduced in [ControlNetPlus](https://github.com/xinsir6/ControlNetPlus) by xinsir6. It supports multiple conditioning inputs without increasing computation. *We design a new architecture that can support 10+ control types in condition text-to-image generation and can generate high resolution images visually comparable with midjourney. The network is based on the original ControlNet architecture, we propose two new modules to: 1 Extend the original ControlNet to support different image conditions using the same network parameter. 2 Support multiple conditions input without increasing computation offload, which is especially important for designers who want to edit image in detail, different conditions use the same condition encoder, without adding extra computations or parameters.* ## Loading By default the [`ControlNetUnionModel`] should be loaded with [`~ModelMixin.from_pretrained`]. ```py from diffusers import StableDiffusionXLControlNetUnionPipeline, ControlNetUnionModel controlnet = ControlNetUnionModel.from_pretrained("xinsir/controlnet-union-sdxl-1.0") pipe = StableDiffusionXLControlNetUnionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet) ``` ## ControlNetUnionModel [[autodoc]] ControlNetUnionModel
diffusers/docs/source/en/api/models/controlnet_union.md/0
{ "file_path": "diffusers/docs/source/en/api/models/controlnet_union.md", "repo_id": "diffusers", "token_count": 486 }
115
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> > [!WARNING] > This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. # ControlNet-XS <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </div> ControlNet-XS was introduced in [ControlNet-XS](https://vislearn.github.io/ControlNet-XS/) by Denis Zavadski and Carsten Rother. It is based on the observation that the control model in the [original ControlNet](https://huggingface.co/papers/2302.05543) can be made much smaller and still produce good results. Like the original ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process. ControlNet-XS generates images with comparable quality to a regular ControlNet, but it is 20-25% faster ([see benchmark](https://github.com/UmerHA/controlnet-xs-benchmark/blob/main/Speed%20Benchmark.ipynb) with StableDiffusion-XL) and uses ~45% less memory. Here's the overview from the [project page](https://vislearn.github.io/ControlNet-XS/): *With increasing computing capabilities, current model architectures appear to follow the trend of simply upscaling all components without validating the necessity for doing so. In this project we investigate the size and architectural design of ControlNet [Zhang et al., 2023] for controlling the image generation process with stable diffusion-based models. We show that a new architecture with as little as 1% of the parameters of the base model achieves state-of-the art results, considerably better than ControlNet in terms of FID score. Hence we call it ControlNet-XS. We provide the code for controlling StableDiffusion-XL [Podell et al., 2023] (Model B, 48M Parameters) and StableDiffusion 2.1 [Rombach et al. 2022] (Model B, 14M Parameters), all under openrail license.* This model was contributed by [UmerHA](https://twitter.com/UmerHAdil). ❤️ <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## StableDiffusionControlNetXSPipeline [[autodoc]] StableDiffusionControlNetXSPipeline - all - __call__ ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/controlnetxs.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/controlnetxs.md", "repo_id": "diffusers", "token_count": 896 }
116
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky 2.1 Kandinsky 2.1 is created by [Arseniy Shakhmatov](https://github.com/cene555), [Anton Razzhigaev](https://github.com/razzant), [Aleksandr Nikolich](https://github.com/AlexWortega), [Vladimir Arkhipkin](https://github.com/oriBetelgeuse), [Igor Pavlov](https://github.com/boomb0om), [Andrey Kuznetsov](https://github.com/kuznetsoffandrey), and [Denis Dimitrov](https://github.com/denndimitrov). The description from it's GitHub page is: *Kandinsky 2.1 inherits best practicies from Dall-E 2 and Latent diffusion, while introducing some new ideas. As text and image encoder it uses CLIP model and diffusion image prior (mapping) between latent spaces of CLIP modalities. This approach increases the visual performance of the model and unveils new horizons in blending images and text-guided image manipulation.* The original codebase can be found at [ai-forever/Kandinsky-2](https://github.com/ai-forever/Kandinsky-2). <Tip> Check out the [Kandinsky Community](https://huggingface.co/kandinsky-community) organization on the Hub for the official model checkpoints for tasks like text-to-image, image-to-image, and inpainting. </Tip> <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## KandinskyPriorPipeline [[autodoc]] KandinskyPriorPipeline - all - __call__ - interpolate ## KandinskyPipeline [[autodoc]] KandinskyPipeline - all - __call__ ## KandinskyCombinedPipeline [[autodoc]] KandinskyCombinedPipeline - all - __call__ ## KandinskyImg2ImgPipeline [[autodoc]] KandinskyImg2ImgPipeline - all - __call__ ## KandinskyImg2ImgCombinedPipeline [[autodoc]] KandinskyImg2ImgCombinedPipeline - all - __call__ ## KandinskyInpaintPipeline [[autodoc]] KandinskyInpaintPipeline - all - __call__ ## KandinskyInpaintCombinedPipeline [[autodoc]] KandinskyInpaintCombinedPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/kandinsky.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/kandinsky.md", "repo_id": "diffusers", "token_count": 846 }
117
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LMSDiscreteScheduler `LMSDiscreteScheduler` is a linear multistep scheduler for discrete beta schedules. The scheduler is ported from and created by [Katherine Crowson](https://github.com/crowsonkb/), and the original implementation can be found at [crowsonkb/k-diffusion](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181). ## LMSDiscreteScheduler [[autodoc]] LMSDiscreteScheduler ## LMSDiscreteSchedulerOutput [[autodoc]] schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput
diffusers/docs/source/en/api/schedulers/lms_discrete.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/lms_discrete.md", "repo_id": "diffusers", "token_count": 335 }
118
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Community Projects Welcome to Community Projects. This space is dedicated to showcasing the incredible work and innovative applications created by our vibrant community using the `diffusers` library. This section aims to: - Highlight diverse and inspiring projects built with `diffusers` - Foster knowledge sharing within our community - Provide real-world examples of how `diffusers` can be leveraged Happy exploring, and thank you for being part of the Diffusers community! <table> <tr> <th>Project Name</th> <th>Description</th> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/carson-katri/dream-textures"> dream-textures </a></td> <td>Stable Diffusion built-in to Blender</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/megvii-research/HiDiffusion"> HiDiffusion </a></td> <td>Increases the resolution and speed of your diffusion model by only adding a single line of code</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/lllyasviel/IC-Light"> IC-Light </a></td> <td>IC-Light is a project to manipulate the illumination of images</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/InstantID/InstantID"> InstantID </a></td> <td>InstantID : Zero-shot Identity-Preserving Generation in Seconds</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/Sanster/IOPaint"> IOPaint </a></td> <td>Image inpainting tool powered by SOTA AI Model. Remove any unwanted object, defect, people from your pictures or erase and replace(powered by stable diffusion) any thing on your pictures.</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/bmaltais/kohya_ss"> Kohya </a></td> <td>Gradio GUI for Kohya's Stable Diffusion trainers</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/magic-research/magic-animate"> MagicAnimate </a></td> <td>MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/levihsu/OOTDiffusion"> OOTDiffusion </a></td> <td>Outfitting Fusion based Latent Diffusion for Controllable Virtual Try-on</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/vladmandic/automatic"> SD.Next </a></td> <td>SD.Next: Advanced Implementation of Stable Diffusion and other Diffusion-based generative image models</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/ashawkey/stable-dreamfusion"> stable-dreamfusion </a></td> <td>Text-to-3D & Image-to-3D & Mesh Exportation with NeRF + Diffusion</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/HVision-NKU/StoryDiffusion"> StoryDiffusion </a></td> <td>StoryDiffusion can create a magic story by generating consistent images and videos.</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/cumulo-autumn/StreamDiffusion"> StreamDiffusion </a></td> <td>A Pipeline-Level Solution for Real-Time Interactive Generation</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/Netwrck/stable-diffusion-server"> Stable Diffusion Server </a></td> <td>A server configured for Inpainting/Generation/img2img with one stable diffusion model</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/suzukimain/auto_diffusers"> Model Search </a></td> <td>Search models on Civitai and Hugging Face</td> </tr> <tr style="border-top: 2px solid black"> <td><a href="https://github.com/beinsezii/skrample"> Skrample </a></td> <td>Fully modular scheduler functions with 1st class diffusers integration.</td> </tr> </table>
diffusers/docs/source/en/community_projects.md/0
{ "file_path": "diffusers/docs/source/en/community_projects.md", "repo_id": "diffusers", "token_count": 1493 }
119
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LoopSequentialPipelineBlocks [`~modular_pipelines.LoopSequentialPipelineBlocks`] are a multi-block type that composes other [`~modular_pipelines.ModularPipelineBlocks`] together in a loop. Data flows circularly, using `intermediate_inputs` and `intermediate_outputs`, and each block is run iteratively. This is typically used to create a denoising loop which is iterative by default. This guide shows you how to create [`~modular_pipelines.LoopSequentialPipelineBlocks`]. ## Loop wrapper [`~modular_pipelines.LoopSequentialPipelineBlocks`], is also known as the *loop wrapper* because it defines the loop structure, iteration variables, and configuration. Within the loop wrapper, you need the following variables. - `loop_inputs` are user provided values and equivalent to [`~modular_pipelines.ModularPipelineBlocks.inputs`]. - `loop_intermediate_inputs` are intermediate variables from the [`~modular_pipelines.PipelineState`] and equivalent to [`~modular_pipelines.ModularPipelineBlocks.intermediate_inputs`]. - `loop_intermediate_outputs` are new intermediate variables created by the block and added to the [`~modular_pipelines.PipelineState`]. It is equivalent to [`~modular_pipelines.ModularPipelineBlocks.intermediate_outputs`]. - `__call__` method defines the loop structure and iteration logic. ```py import torch from diffusers.modular_pipelines import LoopSequentialPipelineBlocks, ModularPipelineBlocks, InputParam, OutputParam class LoopWrapper(LoopSequentialPipelineBlocks): model_name = "test" @property def description(self): return "I'm a loop!!" @property def loop_inputs(self): return [InputParam(name="num_steps")] @torch.no_grad() def __call__(self, components, state): block_state = self.get_block_state(state) # Loop structure - can be customized to your needs for i in range(block_state.num_steps): # loop_step executes all registered blocks in sequence components, block_state = self.loop_step(components, block_state, i=i) self.set_block_state(state, block_state) return components, state ``` The loop wrapper can pass additional arguments, like current iteration index, to the loop blocks. ## Loop blocks A loop block is a [`~modular_pipelines.ModularPipelineBlocks`], but the `__call__` method behaves differently. - It recieves the iteration variable from the loop wrapper. - It works directly with the [`~modular_pipelines.BlockState`] instead of the [`~modular_pipelines.PipelineState`]. - It doesn't require retrieving or updating the [`~modular_pipelines.BlockState`]. Loop blocks share the same [`~modular_pipelines.BlockState`] to allow values to accumulate and change for each iteration in the loop. ```py class LoopBlock(ModularPipelineBlocks): model_name = "test" @property def inputs(self): return [InputParam(name="x")] @property def intermediate_outputs(self): # outputs produced by this block return [OutputParam(name="x")] @property def description(self): return "I'm a block used inside the `LoopWrapper` class" def __call__(self, components, block_state, i: int): block_state.x += 1 return components, block_state ``` ## LoopSequentialPipelineBlocks Use the [`~modular_pipelines.LoopSequentialPipelineBlocks.from_blocks_dict`] method to add the loop block to the loop wrapper to create [`~modular_pipelines.LoopSequentialPipelineBlocks`]. ```py loop = LoopWrapper.from_blocks_dict({"block1": LoopBlock}) ``` Add more loop blocks to run within each iteration with [`~modular_pipelines.LoopSequentialPipelineBlocks.from_blocks_dict`]. This allows you to modify the blocks without changing the loop logic itself. ```py loop = LoopWrapper.from_blocks_dict({"block1": LoopBlock(), "block2": LoopBlock}) ```
diffusers/docs/source/en/modular_diffusers/loop_sequential_pipeline_blocks.md/0
{ "file_path": "diffusers/docs/source/en/modular_diffusers/loop_sequential_pipeline_blocks.md", "repo_id": "diffusers", "token_count": 1394 }
120
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # OpenVINO 🤗 [Optimum](https://github.com/huggingface/optimum-intel) provides Stable Diffusion pipelines compatible with OpenVINO to perform inference on a variety of Intel processors (see the [full list](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) of supported devices). You'll need to install 🤗 Optimum Intel with the `--upgrade-strategy eager` option to ensure [`optimum-intel`](https://github.com/huggingface/optimum-intel) is using the latest version: ```bash pip install --upgrade-strategy eager optimum["openvino"] ``` This guide will show you how to use the Stable Diffusion and Stable Diffusion XL (SDXL) pipelines with OpenVINO. ## Stable Diffusion To load and run inference, use the [`~optimum.intel.OVStableDiffusionPipeline`]. If you want to load a PyTorch model and convert it to the OpenVINO format on-the-fly, set `export=True`: ```python from optimum.intel import OVStableDiffusionPipeline model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) prompt = "sailing ship in storm by Rembrandt" image = pipeline(prompt).images[0] # Don't forget to save the exported model pipeline.save_pretrained("openvino-sd-v1-5") ``` To further speed-up inference, statically reshape the model. If you change any parameters such as the outputs height or width, you’ll need to statically reshape your model again. ```python # Define the shapes related to the inputs and desired outputs batch_size, num_images, height, width = 1, 1, 512, 512 # Statically reshape the model pipeline.reshape(batch_size, height, width, num_images) # Compile the model before inference pipeline.compile() image = pipeline( prompt, height=height, width=width, num_images_per_prompt=num_images, ).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/intel/openvino/stable_diffusion_v1_5_sail_boat_rembrandt.png"> </div> You can find more examples in the 🤗 Optimum [documentation](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion), and Stable Diffusion is supported for text-to-image, image-to-image, and inpainting. ## Stable Diffusion XL To load and run inference with SDXL, use the [`~optimum.intel.OVStableDiffusionXLPipeline`]: ```python from optimum.intel import OVStableDiffusionXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = OVStableDiffusionXLPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Rembrandt" image = pipeline(prompt).images[0] ``` To further speed-up inference, [statically reshape](#stable-diffusion) the model as shown in the Stable Diffusion section. You can find more examples in the 🤗 Optimum [documentation](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion-xl), and running SDXL in OpenVINO is supported for text-to-image and image-to-image.
diffusers/docs/source/en/optimization/open_vino.md/0
{ "file_path": "diffusers/docs/source/en/optimization/open_vino.md", "repo_id": "diffusers", "token_count": 1115 }
121
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # CogVideoX CogVideoX is a text-to-video generation model focused on creating more coherent videos aligned with a prompt. It achieves this using several methods. - a 3D variational autoencoder that compresses videos spatially and temporally, improving compression rate and video accuracy. - an expert transformer block to help align text and video, and a 3D full attention module for capturing and creating spatially and temporally accurate videos. The actual test of the video instruction dimension found that CogVideoX has good effects on consistent theme, dynamic information, consistent background, object information, smooth motion, color, scene, appearance style, and temporal style but cannot achieve good results with human action, spatial relationship, and multiple objects. Finetuning with Diffusers can help make up for these poor results. ## Data Preparation The training scripts accepts data in two formats. The first format is suited for small-scale training, and the second format uses a CSV format, which is more appropriate for streaming data for large-scale training. In the future, Diffusers will support the `<Video>` tag. ### Small format Two files where one file contains line-separated prompts and another file contains line-separated paths to video data (the path to video files must be relative to the path you pass when specifying `--instance_data_root`). Let's take a look at an example to understand this better! Assume you've specified `--instance_data_root` as `/dataset`, and that this directory contains the files: `prompts.txt` and `videos.txt`. The `prompts.txt` file should contain line-separated prompts: ``` A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction. A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures. ... ``` The `videos.txt` file should contain line-separate paths to video files. Note that the path should be _relative_ to the `--instance_data_root` directory. ``` videos/00000.mp4 videos/00001.mp4 ... ``` Overall, this is how your dataset would look like if you ran the `tree` command on the dataset root directory: ``` /dataset ├── prompts.txt ├── videos.txt ├── videos ├── videos/00000.mp4 ├── videos/00001.mp4 ├── ... ``` When using this format, the `--caption_column` must be `prompts.txt` and `--video_column` must be `videos.txt`. ### Stream format You could use a single CSV file. For the sake of this example, assume you have a `metadata.csv` file. The expected format is: ``` <CAPTION_COLUMN>,<PATH_TO_VIDEO_COLUMN> """A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction.""","""00000.mp4""" """A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures.""","""00001.mp4""" ... ``` In this case, the `--instance_data_root` should be the location where the videos are stored and `--dataset_name` should be either a path to local folder or a [`~datasets.load_dataset`] compatible dataset hosted on the Hub. Assuming you have videos of Minecraft gameplay at `https://huggingface.co/datasets/my-awesome-username/minecraft-videos`, you would have to specify `my-awesome-username/minecraft-videos`. When using this format, the `--caption_column` must be `<CAPTION_COLUMN>` and `--video_column` must be `<PATH_TO_VIDEO_COLUMN>`. You are not strictly restricted to the CSV format. Any format works as long as the `load_dataset` method supports the file format to load a basic `<PATH_TO_VIDEO_COLUMN>` and `<CAPTION_COLUMN>`. The reason for going through these dataset organization gymnastics for loading video data is because `load_dataset` does not fully support all kinds of video formats. > [!NOTE] > CogVideoX works best with long and descriptive LLM-augmented prompts for video generation. We recommend pre-processing your videos by first generating a summary using a VLM and then augmenting the prompts with an LLM. To generate the above captions, we use [MiniCPM-V-26](https://huggingface.co/openbmb/MiniCPM-V-2_6) and [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct). A very barebones and no-frills example for this is available [here](https://gist.github.com/a-r-r-o-w/4dee20250e82f4e44690a02351324a4a). The official recommendation for augmenting prompts is [ChatGLM](https://huggingface.co/THUDM?search_models=chatglm) and a length of 50-100 words is considered good. >![NOTE] > It is expected that your dataset is already pre-processed. If not, some basic pre-processing can be done by playing with the following parameters: > `--height`, `--width`, `--fps`, `--max_num_frames`, `--skip_frames_start` and `--skip_frames_end`. > Presently, all videos in your dataset should contain the same number of video frames when using a training batch size > 1. <!-- TODO: Implement frame packing in future to address above issue. --> ## Training You need to setup your development environment by installing the necessary requirements. The following packages are required: - Torch 2.0 or above based on the training features you are utilizing (might require latest or nightly versions for quantized/deepspeed training) - `pip install diffusers transformers accelerate peft huggingface_hub` for all things modeling and training related - `pip install datasets decord` for loading video training data - `pip install bitsandbytes` for using 8-bit Adam or AdamW optimizers for memory-optimized training - `pip install wandb` optionally for monitoring training logs - `pip install deepspeed` optionally for [DeepSpeed](https://github.com/microsoft/DeepSpeed) training - `pip install prodigyopt` optionally if you would like to use the Prodigy optimizer for training To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: - PyTorch ```bash cd examples/cogvideo pip install -r requirements.txt ``` And initialize an [🤗 Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if you use torch.compile, there can be dramatic speedups. The PEFT library is used as a backend for LoRA training, so make sure to have `peft>=0.6.0` installed in your environment. If you would like to push your model to the Hub after training is completed with a neat model card, make sure you're logged in: ```bash hf auth login # Alternatively, you could upload your model manually using: # hf upload my-cool-account-name/my-cool-lora-name /path/to/awesome/lora ``` Make sure your data is prepared as described in [Data Preparation](#data-preparation). When ready, you can begin training! Assuming you are training on 50 videos of a similar concept, we have found 1500-2000 steps to work well. The official recommendation, however, is 100 videos with a total of 4000 steps. Assuming you are training on a single GPU with a `--train_batch_size` of `1`: - 1500 steps on 50 videos would correspond to `30` training epochs - 4000 steps on 100 videos would correspond to `40` training epochs ```bash #!/bin/bash GPU_IDS="0" accelerate launch --gpu_ids $GPU_IDS examples/cogvideo/train_cogvideox_lora.py \ --pretrained_model_name_or_path THUDM/CogVideoX-2b \ --cache_dir <CACHE_DIR> \ --instance_data_root <PATH_TO_WHERE_VIDEO_FILES_ARE_STORED> \ --dataset_name my-awesome-name/my-awesome-dataset \ --caption_column <CAPTION_COLUMN> \ --video_column <PATH_TO_VIDEO_COLUMN> \ --id_token <ID_TOKEN> \ --validation_prompt "<ID_TOKEN> Spiderman swinging over buildings:::A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical performance" \ --validation_prompt_separator ::: \ --num_validation_videos 1 \ --validation_epochs 10 \ --seed 42 \ --rank 64 \ --lora_alpha 64 \ --mixed_precision fp16 \ --output_dir /raid/aryan/cogvideox-lora \ --height 480 --width 720 --fps 8 --max_num_frames 49 --skip_frames_start 0 --skip_frames_end 0 \ --train_batch_size 1 \ --num_train_epochs 30 \ --checkpointing_steps 1000 \ --gradient_accumulation_steps 1 \ --learning_rate 1e-3 \ --lr_scheduler cosine_with_restarts \ --lr_warmup_steps 200 \ --lr_num_cycles 1 \ --enable_slicing \ --enable_tiling \ --optimizer Adam \ --adam_beta1 0.9 \ --adam_beta2 0.95 \ --max_grad_norm 1.0 \ --report_to wandb ``` To better track our training experiments, we're using the following flags in the command above: * `--report_to wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Setting the `<ID_TOKEN>` is not necessary. From some limited experimentation, we found it works better (as it resembles [Dreambooth](https://huggingface.co/docs/diffusers/en/training/dreambooth) training) than without. When provided, the `<ID_TOKEN>` is appended to the beginning of each prompt. So, if your `<ID_TOKEN>` was `"DISNEY"` and your prompt was `"Spiderman swinging over buildings"`, the effective prompt used in training would be `"DISNEY Spiderman swinging over buildings"`. When not provided, you would either be training without any additional token or could augment your dataset to apply the token where you wish before starting the training. > [!NOTE] > You can pass `--use_8bit_adam` to reduce the memory requirements of training. > [!IMPORTANT] > The following settings have been tested at the time of adding CogVideoX LoRA training support: > - Our testing was primarily done on CogVideoX-2b. We will work on CogVideoX-5b and CogVideoX-5b-I2V soon > - One dataset comprised of 70 training videos of resolutions `200 x 480 x 720` (F x H x W). From this, by using frame skipping in data preprocessing, we created two smaller 49-frame and 16-frame datasets for faster experimentation and because the maximum limit recommended by the CogVideoX team is 49 frames. Out of the 70 videos, we created three groups of 10, 25 and 50 videos. All videos were similar in nature of the concept being trained. > - 25+ videos worked best for training new concepts and styles. > - We found that it is better to train with an identifier token that can be specified as `--id_token`. This is similar to Dreambooth-like training but normal finetuning without such a token works too. > - Trained concept seemed to work decently well when combined with completely unrelated prompts. We expect even better results if CogVideoX-5B is finetuned. > - The original repository uses a `lora_alpha` of `1`. We found this not suitable in many runs, possibly due to difference in modeling backends and training settings. Our recommendation is to set to the `lora_alpha` to either `rank` or `rank // 2`. > - If you're training on data whose captions generate bad results with the original model, a `rank` of 64 and above is good and also the recommendation by the team behind CogVideoX. If the generations are already moderately good on your training captions, a `rank` of 16/32 should work. We found that setting the rank too low, say `4`, is not ideal and doesn't produce promising results. > - The authors of CogVideoX recommend 4000 training steps and 100 training videos overall to achieve the best result. While that might yield the best results, we found from our limited experimentation that 2000 steps and 25 videos could also be sufficient. > - When using the Prodigy optimizer for training, one can follow the recommendations from [this](https://huggingface.co/blog/sdxl_lora_advanced_script) blog. Prodigy tends to overfit quickly. From my very limited testing, I found a learning rate of `0.5` to be suitable in addition to `--prodigy_use_bias_correction`, `prodigy_safeguard_warmup` and `--prodigy_decouple`. > - The recommended learning rate by the CogVideoX authors and from our experimentation with Adam/AdamW is between `1e-3` and `1e-4` for a dataset of 25+ videos. > > Note that our testing is not exhaustive due to limited time for exploration. Our recommendation would be to play around with the different knobs and dials to find the best settings for your data. <!-- TODO: Test finetuning with CogVideoX-5b and CogVideoX-5b-I2V and update scripts accordingly --> ## Inference Once you have trained a lora model, the inference can be done simply loading the lora weights into the `CogVideoXPipeline`. ```python import torch from diffusers import CogVideoXPipeline from diffusers.utils import export_to_video pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16) # pipe.load_lora_weights("/path/to/lora/weights", adapter_name="cogvideox-lora") # Or, pipe.load_lora_weights("my-awesome-hf-username/my-awesome-lora-name", adapter_name="cogvideox-lora") # If loading from the HF Hub pipe.to("cuda") # Assuming lora_alpha=32 and rank=64 for training. If different, set accordingly pipe.set_adapters(["cogvideox-lora"], [32 / 64]) prompt = "A vast, shimmering ocean flows gracefully under a twilight sky, its waves undulating in a mesmerizing dance of blues and greens. The surface glints with the last rays of the setting sun, casting golden highlights that ripple across the water. Seagulls soar above, their cries blending with the gentle roar of the waves. The horizon stretches infinitely, where the ocean meets the sky in a seamless blend of hues. Close-ups reveal the intricate patterns of the waves, capturing the fluidity and dynamic beauty of the sea in motion." frames = pipe(prompt, guidance_scale=6, use_dynamic_cfg=True).frames[0] export_to_video(frames, "output.mp4", fps=8) ``` ## Reduce memory usage While testing using the diffusers library, all optimizations included in the diffusers library were enabled. This scheme has not been tested for actual memory usage on devices outside of **NVIDIA A100 / H100** architectures. Generally, this scheme can be adapted to all **NVIDIA Ampere architecture** and above devices. If optimizations are disabled, memory consumption will multiply, with peak memory usage being about 3 times the value in the table. However, speed will increase by about 3-4 times. You can selectively disable some optimizations, including: ``` pipe.enable_sequential_cpu_offload() pipe.vae.enable_slicing() pipe.vae.enable_tiling() ``` + For multi-GPU inference, the `enable_sequential_cpu_offload()` optimization needs to be disabled. + Using INT8 models will slow down inference, which is done to accommodate lower-memory GPUs while maintaining minimal video quality loss, though inference speed will significantly decrease. + The CogVideoX-2B model was trained in `FP16` precision, and all CogVideoX-5B models were trained in `BF16` precision. We recommend using the precision in which the model was trained for inference. + [PytorchAO](https://github.com/pytorch/ao) and [Optimum-quanto](https://github.com/huggingface/optimum-quanto/) can be used to quantize the text encoder, transformer, and VAE modules to reduce the memory requirements of CogVideoX. This allows the model to run on free T4 Colabs or GPUs with smaller memory! Also, note that TorchAO quantization is fully compatible with `torch.compile`, which can significantly improve inference speed. FP8 precision must be used on devices with NVIDIA H100 and above, requiring source installation of `torch`, `torchao`, `diffusers`, and `accelerate` Python packages. CUDA 12.4 is recommended. + The inference speed tests also used the above memory optimization scheme. Without memory optimization, inference speed increases by about 10%. Only the `diffusers` version of the model supports quantization. + The model only supports English input; other languages can be translated into English for use via large model refinement. + The memory usage of model fine-tuning is tested in an `8 * H100` environment, and the program automatically uses `Zero 2` optimization. If a specific number of GPUs is marked in the table, that number or more GPUs must be used for fine-tuning. | **Attribute** | **CogVideoX-2B** | **CogVideoX-5B** | | ------------------------------------ | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | | **Model Name** | CogVideoX-2B | CogVideoX-5B | | **Inference Precision** | FP16* (Recommended), BF16, FP32, FP8*, INT8, Not supported INT4 | BF16 (Recommended), FP16, FP32, FP8*, INT8, Not supported INT4 | | **Single GPU Inference VRAM** | FP16: Using diffusers 12.5GB* INT8: Using diffusers with torchao 7.8GB* | BF16: Using diffusers 20.7GB* INT8: Using diffusers with torchao 11.4GB* | | **Multi GPU Inference VRAM** | FP16: Using diffusers 10GB* | BF16: Using diffusers 15GB* | | **Inference Speed** | Single A100: ~90 seconds, Single H100: ~45 seconds | Single A100: ~180 seconds, Single H100: ~90 seconds | | **Fine-tuning Precision** | FP16 | BF16 | | **Fine-tuning VRAM Consumption** | 47 GB (bs=1, LORA) 61 GB (bs=2, LORA) 62GB (bs=1, SFT) | 63 GB (bs=1, LORA) 80 GB (bs=2, LORA) 75GB (bs=1, SFT) |
diffusers/docs/source/en/training/cogvideox.md/0
{ "file_path": "diffusers/docs/source/en/training/cogvideox.md", "repo_id": "diffusers", "token_count": 6071 }
122
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Unconditional image generation Unconditional image generation models are not conditioned on text or images during training. It only generates images that resemble its training data distribution. This guide will explore the [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies: ```bash cd examples/unconditional_image_generation pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. ## Script parameters <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py) and let us know if you have any questions or concerns. </Tip> The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L55) function. It provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the bf16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_unconditional.py \ --mixed_precision="bf16" ``` Some basic and important parameters to specify include: - `--dataset_name`: the name of the dataset on the Hub or a local path to the dataset to train on - `--output_dir`: where to save the trained model - `--push_to_hub`: whether to push the trained model to the Hub - `--checkpointing_steps`: frequency of saving a checkpoint as the model trains; this is useful if training is interrupted, you can continue training from that checkpoint by adding `--resume_from_checkpoint` to your training command Bring your dataset, and let the training script handle everything else! ## Training script The code for preprocessing the dataset and the training loop is found in the [`main()`](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L275) function. If you need to adapt the training script, this is where you'll need to make your changes. The `train_unconditional` script [initializes a `UNet2DModel`](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L356) if you don't provide a model configuration. You can configure the UNet here if you'd like: ```py model = UNet2DModel( sample_size=args.resolution, in_channels=3, out_channels=3, layers_per_block=2, block_out_channels=(128, 128, 256, 256, 512, 512), down_block_types=( "DownBlock2D", "DownBlock2D", "DownBlock2D", "DownBlock2D", "AttnDownBlock2D", "DownBlock2D", ), up_block_types=( "UpBlock2D", "AttnUpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", ), ) ``` Next, the script initializes a [scheduler](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L418) and [optimizer](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L429): ```py # Initialize the scheduler accepts_prediction_type = "prediction_type" in set(inspect.signature(DDPMScheduler.__init__).parameters.keys()) if accepts_prediction_type: noise_scheduler = DDPMScheduler( num_train_timesteps=args.ddpm_num_steps, beta_schedule=args.ddpm_beta_schedule, prediction_type=args.prediction_type, ) else: noise_scheduler = DDPMScheduler(num_train_timesteps=args.ddpm_num_steps, beta_schedule=args.ddpm_beta_schedule) # Initialize the optimizer optimizer = torch.optim.AdamW( model.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Then it [loads a dataset](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L451) and you can specify how to [preprocess](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L455) it: ```py dataset = load_dataset("imagefolder", data_dir=args.train_data_dir, cache_dir=args.cache_dir, split="train") augmentations = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) ``` Finally, the [training loop](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L540) handles everything else such as adding noise to the images, predicting the noise residual, calculating the loss, saving checkpoints at specified steps, and saving and pushing the model to the Hub. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 <Tip warning={true}> A full training run takes 2 hours on 4xV100 GPUs. </Tip> <hfoptions id="launchtraining"> <hfoption id="single GPU"> ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --output_dir="ddpm-ema-flowers-64" \ --mixed_precision="fp16" \ --push_to_hub ``` </hfoption> <hfoption id="multi-GPU"> If you're training with more than one GPU, add the `--multi_gpu` parameter to the training command: ```bash accelerate launch --multi_gpu train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --output_dir="ddpm-ema-flowers-64" \ --mixed_precision="fp16" \ --push_to_hub ``` </hfoption> </hfoptions> The training script creates and saves a checkpoint file in your repository. Now you can load and use your trained model for inference: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128").to("cuda") image = pipeline().images[0] ```
diffusers/docs/source/en/training/unconditional_training.md/0
{ "file_path": "diffusers/docs/source/en/training/unconditional_training.md", "repo_id": "diffusers", "token_count": 2949 }
123
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Controlling image quality The components of a diffusion model, like the UNet and scheduler, can be optimized to improve the quality of generated images leading to better details. These techniques are especially useful if you don't have the resources to simply use a larger model for inference. You can enable these techniques during inference without any additional training. This guide will show you how to turn these techniques on in your pipeline and how to configure them to improve the quality of your generated images. ## Details [FreeU](https://hf.co/papers/2309.11497) improves image details by rebalancing the UNet's backbone and skip connection weights. The skip connections can cause the model to overlook some of the backbone semantics which may lead to unnatural image details in the generated image. This technique does not require any additional training and can be applied on the fly during inference for tasks like image-to-image and text-to-video. Use the [`~pipelines.StableDiffusionMixin.enable_freeu`] method on your pipeline and configure the scaling factors for the backbone (`b1` and `b2`) and skip connections (`s1` and `s2`). The number after each scaling factor corresponds to the stage in the UNet where the factor is applied. Take a look at the [FreeU](https://github.com/ChenyangSi/FreeU#parameters) repository for reference hyperparameters for different models. <hfoptions id="freeu"> <hfoption id="Stable Diffusion v1-5"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, safety_checker=None ).to("cuda") pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.5, b2=1.6) generator = torch.Generator(device="cpu").manual_seed(33) prompt = "" image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv15-no-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv15-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> <hfoption id="Stable Diffusion v2-1"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, safety_checker=None ).to("cuda") pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.4, b2=1.6) generator = torch.Generator(device="cpu").manual_seed(80) prompt = "A squirrel eating a burger" image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv21-no-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv21-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> <hfoption id="Stable Diffusion XL"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, ).to("cuda") pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4) generator = torch.Generator(device="cpu").manual_seed(13) prompt = "A squirrel eating a burger" image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-no-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> <hfoption id="Zeroscope"> ```py import torch from diffusers import DiffusionPipeline from diffusers.utils import export_to_video pipeline = DiffusionPipeline.from_pretrained( "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16 ).to("cuda") # values come from https://github.com/lyn-rgb/FreeU_Diffusers#video-pipelines pipeline.enable_freeu(b1=1.2, b2=1.4, s1=0.9, s2=0.2) prompt = "Confident teddy bear surfer rides the wave in the tropics" generator = torch.Generator(device="cpu").manual_seed(47) video_frames = pipeline(prompt, generator=generator).frames[0] export_to_video(video_frames, "teddy_bear.mp4", fps=10) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/video-no-freeu.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/video-freeu.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> </hfoptions> Call the [`pipelines.StableDiffusionMixin.disable_freeu`] method to disable FreeU. ```py pipeline.disable_freeu() ```
diffusers/docs/source/en/using-diffusers/image_quality.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/image_quality.md", "repo_id": "diffusers", "token_count": 2199 }
124
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Diffusion XL [[open-in-colab]] [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) (SDXL) is a powerful text-to-image generation model that iterates on the previous Stable Diffusion models in three key ways: 1. the UNet is 3x larger and SDXL combines a second text encoder (OpenCLIP ViT-bigG/14) with the original text encoder to significantly increase the number of parameters 2. introduces size and crop-conditioning to preserve training data from being discarded and gain more control over how a generated image should be cropped 3. introduces a two-stage model process; the *base* model (can also be run as a standalone model) generates an image as an input to the *refiner* model which adds additional high-quality details This guide will show you how to use SDXL for text-to-image, image-to-image, and inpainting. Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate invisible-watermark>=0.2.0 ``` <Tip warning={true}> We recommend installing the [invisible-watermark](https://pypi.org/project/invisible-watermark/) library to help identify images that are generated. If the invisible-watermark library is installed, it is used by default. To disable the watermarker: ```py pipeline = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=False) ``` </Tip> ## Load model checkpoints Model weights may be stored in separate subfolders on the Hub or locally, in which case, you should use the [`~StableDiffusionXLPipeline.from_pretrained`] method: ```py from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline import torch pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" ).to("cuda") ``` You can also use the [`~StableDiffusionXLPipeline.from_single_file`] method to load a model checkpoint stored in a single file format (`.ckpt` or `.safetensors`) from the Hub or locally: ```py from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline import torch pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", torch_dtype=torch.float16 ).to("cuda") refiner = StableDiffusionXLImg2ImgPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors", torch_dtype=torch.float16 ).to("cuda") ``` ## Text-to-image For text-to-image, pass a text prompt. By default, SDXL generates a 1024x1024 image for the best results. You can try setting the `height` and `width` parameters to 768x768 or 512x512, but anything below 512x512 is not likely to work. ```py from diffusers import AutoPipelineForText2Image import torch pipeline_text2image = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipeline_text2image(prompt=prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" alt="generated image of an astronaut in a jungle"/> </div> ## Image-to-image For image-to-image, SDXL works especially well with image sizes between 768x768 and 1024x1024. Pass an initial image, and a text prompt to condition the image with: ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image, make_image_grid # use from_pipe to avoid consuming additional memory when loading a checkpoint pipeline = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" init_image = load_image(url) prompt = "a dog catching a frisbee in the jungle" image = pipeline(prompt, image=init_image, strength=0.8, guidance_scale=10.5).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-img2img.png" alt="generated image of a dog catching a frisbee in a jungle"/> </div> ## Inpainting For inpainting, you'll need the original image and a mask of what you want to replace in the original image. Create a prompt to describe what you want to replace the masked area with. ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid # use from_pipe to avoid consuming additional memory when loading a checkpoint pipeline = AutoPipelineForInpainting.from_pipe(pipeline_text2image).to("cuda") img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" mask_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-inpaint-mask.png" init_image = load_image(img_url) mask_image = load_image(mask_url) prompt = "A deep sea diver floating" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.85, guidance_scale=12.5).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-inpaint.png" alt="generated image of a deep sea diver in a jungle"/> </div> ## Refine image quality SDXL includes a [refiner model](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0) specialized in denoising low-noise stage images to generate higher-quality images from the base model. There are two ways to use the refiner: 1. use the base and refiner models together to produce a refined image 2. use the base model to produce an image, and subsequently use the refiner model to add more details to the image (this is how SDXL was originally trained) ### Base + refiner model When you use the base and refiner model together to generate an image, this is known as an [*ensemble of expert denoisers*](https://research.nvidia.com/labs/dir/eDiff-I/). The ensemble of expert denoisers approach requires fewer overall denoising steps versus passing the base model's output to the refiner model, so it should be significantly faster to run. However, you won't be able to inspect the base model's output because it still contains a large amount of noise. As an ensemble of expert denoisers, the base model serves as the expert during the high-noise diffusion stage and the refiner model serves as the expert during the low-noise diffusion stage. Load the base and refiner model: ```py from diffusers import DiffusionPipeline import torch base = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") refiner = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", text_encoder_2=base.text_encoder_2, vae=base.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ).to("cuda") ``` To use this approach, you need to define the number of timesteps for each model to run through their respective stages. For the base model, this is controlled by the [`denoising_end`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end) parameter and for the refiner model, it is controlled by the [`denoising_start`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start) parameter. <Tip> The `denoising_end` and `denoising_start` parameters should be a float between 0 and 1. These parameters are represented as a proportion of discrete timesteps as defined by the scheduler. If you're also using the `strength` parameter, it'll be ignored because the number of denoising steps is determined by the discrete timesteps the model is trained on and the declared fractional cutoff. </Tip> Let's set `denoising_end=0.8` so the base model performs the first 80% of denoising the **high-noise** timesteps and set `denoising_start=0.8` so the refiner model performs the last 20% of denoising the **low-noise** timesteps. The base model output should be in **latent** space instead of a PIL image. ```py prompt = "A majestic lion jumping from a big stone at night" image = base( prompt=prompt, num_inference_steps=40, denoising_end=0.8, output_type="latent", ).images image = refiner( prompt=prompt, num_inference_steps=40, denoising_start=0.8, image=image, ).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_base.png" alt="generated image of a lion on a rock at night" /> <figcaption class="mt-2 text-center text-sm text-gray-500">default base model</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_refined.png" alt="generated image of a lion on a rock at night in higher quality" /> <figcaption class="mt-2 text-center text-sm text-gray-500">ensemble of expert denoisers</figcaption> </div> </div> The refiner model can also be used for inpainting in the [`StableDiffusionXLInpaintPipeline`]: ```py from diffusers import StableDiffusionXLInpaintPipeline from diffusers.utils import load_image, make_image_grid import torch base = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") refiner = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", text_encoder_2=base.text_encoder_2, vae=base.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ).to("cuda") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url) mask_image = load_image(mask_url) prompt = "A majestic tiger sitting on a bench" num_inference_steps = 75 high_noise_frac = 0.7 image = base( prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=num_inference_steps, denoising_end=high_noise_frac, output_type="latent", ).images image = refiner( prompt=prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, denoising_start=high_noise_frac, ).images[0] make_image_grid([init_image, mask_image, image.resize((512, 512))], rows=1, cols=3) ``` This ensemble of expert denoisers method works well for all available schedulers! ### Base to refiner model SDXL gets a boost in image quality by using the refiner model to add additional high-quality details to the fully-denoised image from the base model, in an image-to-image setting. Load the base and refiner models: ```py from diffusers import DiffusionPipeline import torch base = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") refiner = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", text_encoder_2=base.text_encoder_2, vae=base.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ).to("cuda") ``` <Tip warning={true}> You can use SDXL refiner with a different base model. For example, you can use the [Hunyuan-DiT](../../api/pipelines/hunyuandit) or [PixArt-Sigma](../../api/pipelines/pixart_sigma) pipelines to generate images with better prompt adherence. Once you have generated an image, you can pass it to the SDXL refiner model to enhance final generation quality. </Tip> Generate an image from the base model, and set the model output to **latent** space: ```py prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = base(prompt=prompt, output_type="latent").images[0] ``` Pass the generated image to the refiner model: ```py image = refiner(prompt=prompt, image=image[None, :]).images[0] ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/init_image.png" alt="generated image of an astronaut riding a green horse on Mars" /> <figcaption class="mt-2 text-center text-sm text-gray-500">base model</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/refined_image.png" alt="higher quality generated image of an astronaut riding a green horse on Mars" /> <figcaption class="mt-2 text-center text-sm text-gray-500">base model + refiner model</figcaption> </div> </div> For inpainting, load the base and the refiner model in the [`StableDiffusionXLInpaintPipeline`], remove the `denoising_end` and `denoising_start` parameters, and choose a smaller number of inference steps for the refiner. ## Micro-conditioning SDXL training involves several additional conditioning techniques, which are referred to as *micro-conditioning*. These include original image size, target image size, and cropping parameters. The micro-conditionings can be used at inference time to create high-quality, centered images. <Tip> You can use both micro-conditioning and negative micro-conditioning parameters thanks to classifier-free guidance. They are available in the [`StableDiffusionXLPipeline`], [`StableDiffusionXLImg2ImgPipeline`], [`StableDiffusionXLInpaintPipeline`], and [`StableDiffusionXLControlNetPipeline`]. </Tip> ### Size conditioning There are two types of size conditioning: - [`original_size`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.original_size) conditioning comes from upscaled images in the training batch (because it would be wasteful to discard the smaller images which make up almost 40% of the total training data). This way, SDXL learns that upscaling artifacts are not supposed to be present in high-resolution images. During inference, you can use `original_size` to indicate the original image resolution. Using the default value of `(1024, 1024)` produces higher-quality images that resemble the 1024x1024 images in the dataset. If you choose to use a lower resolution, such as `(256, 256)`, the model still generates 1024x1024 images, but they'll look like the low resolution images (simpler patterns, blurring) in the dataset. - [`target_size`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.target_size) conditioning comes from finetuning SDXL to support different image aspect ratios. During inference, if you use the default value of `(1024, 1024)`, you'll get an image that resembles the composition of square images in the dataset. We recommend using the same value for `target_size` and `original_size`, but feel free to experiment with other options! 🤗 Diffusers also lets you specify negative conditions about an image's size to steer generation away from certain image resolutions: ```py from diffusers import StableDiffusionXLPipeline import torch pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe( prompt=prompt, negative_original_size=(512, 512), negative_target_size=(1024, 1024), ).images[0] ``` <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/negative_conditions.png"/> <figcaption class="text-center">Images negatively conditioned on image resolutions of (128, 128), (256, 256), and (512, 512).</figcaption> </div> ### Crop conditioning Images generated by previous Stable Diffusion models may sometimes appear to be cropped. This is because images are actually cropped during training so that all the images in a batch have the same size. By conditioning on crop coordinates, SDXL *learns* that no cropping - coordinates `(0, 0)` - usually correlates with centered subjects and complete faces (this is the default value in 🤗 Diffusers). You can experiment with different coordinates if you want to generate off-centered compositions! ```py from diffusers import StableDiffusionXLPipeline import torch pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipeline(prompt=prompt, crops_coords_top_left=(256, 0)).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-cropped.png" alt="generated image of an astronaut in a jungle, slightly cropped"/> </div> You can also specify negative cropping coordinates to steer generation away from certain cropping parameters: ```py from diffusers import StableDiffusionXLPipeline import torch pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe( prompt=prompt, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images[0] image ``` ## Use a different prompt for each text-encoder SDXL uses two text-encoders, so it is possible to pass a different prompt to each text-encoder, which can [improve quality](https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201). Pass your original prompt to `prompt` and the second prompt to `prompt_2` (use `negative_prompt` and `negative_prompt_2` if you're using negative prompts): ```py from diffusers import StableDiffusionXLPipeline import torch pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") # prompt is passed to OAI CLIP-ViT/L-14 prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # prompt_2 is passed to OpenCLIP-ViT/bigG-14 prompt_2 = "Van Gogh painting" image = pipeline(prompt=prompt, prompt_2=prompt_2).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-double-prompt.png" alt="generated image of an astronaut in a jungle in the style of a van gogh painting"/> </div> The dual text-encoders also support textual inversion embeddings that need to be loaded separately as explained in the [SDXL textual inversion](textual_inversion_inference#stable-diffusion-xl) section. ## Optimizations SDXL is a large model, and you may need to optimize memory to get it to run on your hardware. Here are some tips to save memory and speed up inference. 1. Offload the model to the CPU with [`~StableDiffusionXLPipeline.enable_model_cpu_offload`] for out-of-memory errors: ```diff - base.to("cuda") - refiner.to("cuda") + base.enable_model_cpu_offload() + refiner.enable_model_cpu_offload() ``` 2. Use `torch.compile` for ~20% speed-up (you need `torch>=2.0`): ```diff + base.unet = torch.compile(base.unet, mode="reduce-overhead", fullgraph=True) + refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True) ``` 3. Enable [xFormers](../optimization/xformers) to run SDXL if `torch<2.0`: ```diff + base.enable_xformers_memory_efficient_attention() + refiner.enable_xformers_memory_efficient_attention() ``` ## Other resources If you're interested in experimenting with a minimal version of the [`UNet2DConditionModel`] used in SDXL, take a look at the [minSDXL](https://github.com/cloneofsimo/minSDXL) implementation which is written in PyTorch and directly compatible with 🤗 Diffusers.
diffusers/docs/source/en/using-diffusers/sdxl.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/sdxl.md", "repo_id": "diffusers", "token_count": 7092 }
125
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoPipeline Diffusersは様々なタスクをこなすことができ、テキストから画像、画像から画像、画像の修復など、複数のタスクに対して同じように事前学習された重みを再利用することができます。しかし、ライブラリや拡散モデルに慣れていない場合、どのタスクにどのパイプラインを使えばいいのかがわかりにくいかもしれません。例えば、 [stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) チェックポイントをテキストから画像に変換するために使用している場合、それぞれ[`StableDiffusionImg2ImgPipeline`]クラスと[`StableDiffusionInpaintPipeline`]クラスでチェックポイントをロードすることで、画像から画像や画像の修復にも使えることを知らない可能性もあります。 `AutoPipeline` クラスは、🤗 Diffusers の様々なパイプラインをよりシンプルするために設計されています。この汎用的でタスク重視のパイプラインによってタスクそのものに集中することができます。`AutoPipeline` は、使用するべき正しいパイプラインクラスを自動的に検出するため、特定のパイプラインクラス名を知らなくても、タスクのチェックポイントを簡単にロードできます。 <Tip> どのタスクがサポートされているかは、[AutoPipeline](../api/pipelines/auto_pipeline) のリファレンスをご覧ください。現在、text-to-image、image-to-image、inpaintingをサポートしています。 </Tip> このチュートリアルでは、`AutoPipeline` を使用して、事前に学習された重みが与えられたときに、特定のタスクを読み込むためのパイプラインクラスを自動的に推測する方法を示します。 ## タスクに合わせてAutoPipeline を選択する まずはチェックポイントを選ぶことから始めましょう。例えば、 [stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) チェックポイントでテキストから画像への変換したいなら、[`AutoPipelineForText2Image`]を使います: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") prompt = "peasant and dragon combat, wood cutting style, viking era, bevel with rune" image = pipeline(prompt, num_inference_steps=25).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png" alt="generated image of peasant fighting dragon in wood cutting style"/> </div> [`AutoPipelineForText2Image`] を具体的に見ていきましょう: 1. [`model_index.json`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/model_index.json) ファイルから `"stable-diffusion"` クラスを自動的に検出します。 2. `"stable-diffusion"` のクラス名に基づいて、テキストから画像へ変換する [`StableDiffusionPipeline`] を読み込みます。 同様に、画像から画像へ変換する場合、[`AutoPipelineForImage2Image`] は `model_index.json` ファイルから `"stable-diffusion"` チェックポイントを検出し、対応する [`StableDiffusionImg2ImgPipeline`] を読み込みます。また、入力画像にノイズの量やバリエーションの追加を決めるための強さなど、パイプラインクラスに固有の追加引数を渡すこともできます: ```py from diffusers import AutoPipelineForImage2Image import torch import requests from PIL import Image from io import BytesIO pipeline = AutoPipelineForImage2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") prompt = "a portrait of a dog wearing a pearl earring" url = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0f/1665_Girl_with_a_Pearl_Earring.jpg/800px-1665_Girl_with_a_Pearl_Earring.jpg" response = requests.get(url) image = Image.open(BytesIO(response.content)).convert("RGB") image.thumbnail((768, 768)) image = pipeline(prompt, image, num_inference_steps=200, strength=0.75, guidance_scale=10.5).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png" alt="generated image of a vermeer portrait of a dog wearing a pearl earring"/> </div> また、画像の修復を行いたい場合は、 [`AutoPipelineForInpainting`] が、同様にベースとなる[`StableDiffusionInpaintPipeline`]クラスを読み込みます: ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch pipeline = AutoPipelineForInpainting.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).convert("RGB") mask_image = load_image(mask_url).convert("RGB") prompt = "A majestic tiger sitting on a bench" image = pipeline(prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-inpaint.png" alt="generated image of a tiger sitting on a bench"/> </div> サポートされていないチェックポイントを読み込もうとすると、エラーになります: ```py from diffusers import AutoPipelineForImage2Image import torch pipeline = AutoPipelineForImage2Image.from_pretrained( "openai/shap-e-img2img", torch_dtype=torch.float16, use_safetensors=True ) "ValueError: AutoPipeline can't find a pipeline linked to ShapEImg2ImgPipeline for None" ``` ## 複数のパイプラインを使用する いくつかのワークフローや多くのパイプラインを読み込む場合、不要なメモリを使ってしまう再読み込みをするよりも、チェックポイントから同じコンポーネントを再利用する方がメモリ効率が良いです。たとえば、テキストから画像への変換にチェックポイントを使い、画像から画像への変換にまたチェックポイントを使いたい場合、[from_pipe()](https://huggingface.co/docs/diffusers/v0.25.1/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForImage2Image.from_pipe) メソッドを使用します。このメソッドは、以前読み込まれたパイプラインのコンポーネントを使うことで追加のメモリを消費することなく、新しいパイプラインを作成します。 [from_pipe()](https://huggingface.co/docs/diffusers/v0.25.1/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForImage2Image.from_pipe) メソッドは、元のパイプラインクラスを検出し、実行したいタスクに対応する新しいパイプラインクラスにマッピングします。例えば、テキストから画像への`"stable-diffusion"` クラスのパイプラインを読み込む場合: ```py from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image import torch pipeline_text2img = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) print(type(pipeline_text2img)) "<class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline'>" ``` そして、[from_pipe()] (https://huggingface.co/docs/diffusers/v0.25.1/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForImage2Image.from_pipe)は、もとの`"stable-diffusion"` パイプラインのクラスである [`StableDiffusionImg2ImgPipeline`] にマップします: ```py pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img) print(type(pipeline_img2img)) "<class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline'>" ``` 元のパイプラインにオプションとして引数(セーフティチェッカーの無効化など)を渡した場合、この引数も新しいパイプラインに渡されます: ```py from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image import torch pipeline_text2img = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, requires_safety_checker=False, ).to("cuda") pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img) print(pipeline_img2img.config.requires_safety_checker) "False" ``` 新しいパイプラインの動作を変更したい場合は、元のパイプラインの引数や設定を上書きすることができます。例えば、セーフティチェッカーをオンに戻し、`strength` 引数を追加します: ```py pipeline_img2img = AutoPipelineForImage2Image.from_pipe(pipeline_text2img, requires_safety_checker=True, strength=0.3) print(pipeline_img2img.config.requires_safety_checker) "True" ```
diffusers/docs/source/ja/tutorials/autopipeline.md/0
{ "file_path": "diffusers/docs/source/ja/tutorials/autopipeline.md", "repo_id": "diffusers", "token_count": 4154 }
126
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 추론을 위한 OpenVINO 사용 방법 🤗 [Optimum](https://github.com/huggingface/optimum-intel)은 OpenVINO와 호환되는 Stable Diffusion 파이프라인을 제공합니다. 이제 다양한 Intel 프로세서에서 OpenVINO Runtime으로 쉽게 추론을 수행할 수 있습니다. ([여기](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html)서 지원되는 전 기기 목록을 확인하세요). ## 설치 다음 명령어로 🤗 Optimum을 설치합니다: ```sh pip install optimum["openvino"] ``` ## Stable Diffusion 추론 OpenVINO 모델을 불러오고 OpenVINO 런타임으로 추론을 실행하려면 `StableDiffusionPipeline`을 `OVStableDiffusionPipeline`으로 교체해야 합니다. PyTorch 모델을 불러오고 즉시 OpenVINO 형식으로 변환하려는 경우 `export=True`로 설정합니다. ```python from optimum.intel.openvino import OVStableDiffusionPipeline model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) prompt = "a photo of an astronaut riding a horse on mars" images = pipe(prompt).images[0] ``` [Optimum 문서](https://huggingface.co/docs/optimum/intel/inference#export-and-inference-of-stable-diffusion-models)에서 (정적 reshaping과 모델 컴파일 등의) 더 많은 예시들을 찾을 수 있습니다.
diffusers/docs/source/ko/optimization/open_vino.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/open_vino.md", "repo_id": "diffusers", "token_count": 927 }
127
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual-Inversion [[open-in-colab]] [textual-inversion](https://huggingface.co/papers/2208.01618)은 소수의 예시 이미지에서 새로운 콘셉트를 포착하는 기법입니다. 이 기술은 원래 [Latent Diffusion](https://github.com/CompVis/latent-diffusion)에서 시연되었지만, 이후 [Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/conceptual/stable_diffusion)과 같은 유사한 다른 모델에도 적용되었습니다. 학습된 콘셉트는 text-to-image 파이프라인에서 생성된 이미지를 더 잘 제어하는 데 사용할 수 있습니다. 이 모델은 텍스트 인코더의 임베딩 공간에서 새로운 '단어'를 학습하여 개인화된 이미지 생성을 위한 텍스트 프롬프트 내에서 사용됩니다. ![Textual Inversion example](https://textual-inversion.github.io/static/images/editing/colorful_teapot.JPG) <small>By using just 3-5 images you can teach new concepts to a model such as Stable Diffusion for personalized image generation <a href="https://github.com/rinongal/textual_inversion">(image source)</a>.</small> 이 가이드에서는 textual-inversion으로 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) 모델을 학습하는 방법을 설명합니다. 이 가이드에서 사용된 모든 textual-inversion 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)에서 확인할 수 있습니다. 내부적으로 어떻게 작동하는지 자세히 살펴보고 싶으시다면 해당 링크를 참조해주시기 바랍니다. <Tip> [Stable Diffusion Textual Inversion Concepts Library](https://huggingface.co/sd-concepts-library)에는 커뮤니티에서 제작한 학습된 textual-inversion 모델들이 있습니다. 시간이 지남에 따라 더 많은 콘셉트들이 추가되어 유용한 리소스로 성장할 것입니다! </Tip> 시작하기 전에 학습을 위한 의존성 라이브러리들을 설치해야 합니다: ```bash pip install diffusers accelerate transformers ``` 의존성 라이브러리들의 설치가 완료되면, [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화시킵니다. ```bash accelerate config ``` 별도의 설정없이, 기본 🤗Accelerate 환경을 설정하려면 다음과 같이 하세요: ```bash accelerate config default ``` 또는 사용 중인 환경이 노트북과 같은 대화형 셸을 지원하지 않는다면, 다음과 같이 사용할 수 있습니다: ```py from accelerate.utils import write_basic_config write_basic_config() ``` 마지막으로, Memory-Efficient Attention을 통해 메모리 사용량을 줄이기 위해 [xFormers](https://huggingface.co/docs/diffusers/main/en/training/optimization/xformers)를 설치합니다. xFormers를 설치한 후, 학습 스크립트에 `--enable_xformers_memory_efficient_attention` 인자를 추가합니다. xFormers는 Flax에서 지원되지 않습니다. ## 허브에 모델 업로드하기 모델을 허브에 저장하려면, 학습 스크립트에 다음 인자를 추가해야 합니다. ```bash --push_to_hub ``` ## 체크포인트 저장 및 불러오기 학습중에 모델의 체크포인트를 정기적으로 저장하는 것이 좋습니다. 이렇게 하면 어떤 이유로든 학습이 중단된 경우 저장된 체크포인트에서 학습을 다시 시작할 수 있습니다. 학습 스크립트에 다음 인자를 전달하면 500단계마다 전체 학습 상태가 `output_dir`의 하위 폴더에 체크포인트로서 저장됩니다. ```bash --checkpointing_steps=500 ``` 저장된 체크포인트에서 학습을 재개하려면, 학습 스크립트와 재개할 특정 체크포인트에 다음 인자를 전달하세요. ```bash --resume_from_checkpoint="checkpoint-1500" ``` ## 파인 튜닝 학습용 데이터셋으로 [고양이 장난감 데이터셋](https://huggingface.co/datasets/diffusers/cat_toy_example)을 다운로드하여 디렉토리에 저장하세요. 여러분만의 고유한 데이터셋을 사용하고자 한다면, [학습용 데이터셋 만들기](https://huggingface.co/docs/diffusers/training/create_dataset) 가이드를 살펴보시기 바랍니다. ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download( "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes" ) ``` 모델의 리포지토리 ID(또는 모델 가중치가 포함된 디렉터리 경로)를 `MODEL_NAME` 환경 변수에 할당하고, 해당 값을 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인자에 전달합니다. 그리고 이미지가 포함된 디렉터리 경로를 `DATA_DIR` 환경 변수에 할당합니다. 이제 [학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py)를 실행할 수 있습니다. 스크립트는 다음 파일을 생성하고 리포지토리에 저장합니다. - `learned_embeds.bin` - `token_identifier.txt` - `type_of_concept.txt`. <Tip> 💡V100 GPU 1개를 기준으로 전체 학습에는 최대 1시간이 걸립니다. 학습이 완료되기를 기다리는 동안 궁금한 점이 있으면 아래 섹션에서 [textual-inversion이 어떻게 작동하는지](https://huggingface.co/docs/diffusers/training/text_inversion#how-it-works) 자유롭게 확인하세요 ! </Tip> <frameworkcontent> <pt> ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export DATA_DIR="./cat" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` <Tip> 💡학습 성능을 올리기 위해, 플레이스홀더 토큰(`<cat-toy>`)을 (단일한 임베딩 벡터가 아닌) 복수의 임베딩 벡터로 표현하는 것 역시 고려할 있습니다. 이러한 트릭이 모델이 보다 복잡한 이미지의 스타일(앞서 말한 콘셉트)을 더 잘 캡처하는 데 도움이 될 수 있습니다. 복수의 임베딩 벡터 학습을 활성화하려면 다음 옵션을 전달하십시오. ```bash --num_vectors=5 ``` </Tip> </pt> <jax> TPU에 액세스할 수 있는 경우, [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)를 사용하여 더 빠르게 모델을 학습시켜보세요. (물론 GPU에서도 작동합니다.) 동일한 설정에서 Flax 학습 스크립트는 PyTorch 학습 스크립트보다 최소 70% 더 빨라야 합니다! ⚡️ 시작하기 앞서 Flax에 대한 의존성 라이브러리들을 설치해야 합니다. ```bash pip install -U -r requirements_flax.txt ``` 모델의 리포지토리 ID(또는 모델 가중치가 포함된 디렉터리 경로)를 `MODEL_NAME` 환경 변수에 할당하고, 해당 값을 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인자에 전달합니다. 그런 다음 [학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)를 시작할 수 있습니다. ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="./cat" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` </jax> </frameworkcontent> ### 중간 로깅 모델의 학습 진행 상황을 추적하는 데 관심이 있는 경우, 학습 과정에서 생성된 이미지를 저장할 수 있습니다. 학습 스크립트에 다음 인수를 추가하여 중간 로깅을 활성화합니다. - `validation_prompt` : 샘플을 생성하는 데 사용되는 프롬프트(기본값은 `None`으로 설정되며, 이 때 중간 로깅은 비활성화됨) - `num_validation_images` : 생성할 샘플 이미지 수 - `validation_steps` : `validation_prompt`로부터 샘플 이미지를 생성하기 전 스텝의 수 ```bash --validation_prompt="A <cat-toy> backpack" --num_validation_images=4 --validation_steps=100 ``` ## 추론 모델을 학습한 후에는, 해당 모델을 [`StableDiffusionPipeline`]을 사용하여 추론에 사용할 수 있습니다. textual-inversion 스크립트는 기본적으로 textual-inversion을 통해 얻어진 임베딩 벡터만을 저장합니다. 해당 임베딩 벡터들은 텍스트 인코더의 임베딩 행렬에 추가되어 있습습니다. <frameworkcontent> <pt> <Tip> 💡 커뮤니티는 [sd-concepts-library](https://huggingface.co/sd-concepts-library) 라는 대규모의 textual-inversion 임베딩 벡터 라이브러리를 만들었습니다. textual-inversion 임베딩을 밑바닥부터 학습하는 대신, 해당 라이브러리에 본인이 찾는 textual-inversion 임베딩이 이미 추가되어 있지 않은지를 확인하는 것도 좋은 방법이 될 것 같습니다. </Tip> textual-inversion 임베딩 벡터을 불러오기 위해서는, 먼저 해당 임베딩 벡터를 학습할 때 사용한 모델을 불러와야 합니다. 여기서는 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/docs/diffusers/training/stable-diffusion-v1-5/stable-diffusion-v1-5) 모델이 사용되었다고 가정하고 불러오겠습니다. ```python from diffusers import StableDiffusionPipeline import torch model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") ``` 다음으로 `TextualInversionLoaderMixin.load_textual_inversion` 함수를 통해, textual-inversion 임베딩 벡터를 불러와야 합니다. 여기서 우리는 이전의 `<cat-toy>` 예제의 임베딩을 불러올 것입니다. ```python pipe.load_textual_inversion("sd-concepts-library/cat-toy") ``` 이제 플레이스홀더 토큰(`<cat-toy>`)이 잘 동작하는지를 확인하는 파이프라인을 실행할 수 있습니다. ```python prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50).images[0] image.save("cat-backpack.png") ``` `TextualInversionLoaderMixin.load_textual_inversion`은 Diffusers 형식으로 저장된 텍스트 임베딩 벡터를 로드할 수 있을 뿐만 아니라, [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 형식으로 저장된 임베딩 벡터도 로드할 수 있습니다. 이렇게 하려면, 먼저 [civitAI](https://civitai.com/models/3036?modelVersionId=8387)에서 임베딩 벡터를 다운로드한 다음 로컬에서 불러와야 합니다. ```python pipe.load_textual_inversion("./charturnerv2.pt") ``` </pt> <jax> 현재 Flax에 대한 `load_textual_inversion` 함수는 없습니다. 따라서 학습 후 textual-inversion 임베딩 벡터가 모델의 일부로서 저장되었는지를 확인해야 합니다. 그런 다음은 다른 Flax 모델과 마찬가지로 실행할 수 있습니다. ```python import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline model_path = "path-to-your-trained-model" pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) prompt = "A <cat-toy> backpack" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("cat-backpack.png") ``` </jax> </frameworkcontent> ## 작동 방식 ![Diagram from the paper showing overview](https://textual-inversion.github.io/static/images/training/training.JPG) <small>Architecture overview from the Textual Inversion <a href="https://textual-inversion.github.io/">blog post.</a></small> 일반적으로 텍스트 프롬프트는 모델에 전달되기 전에 임베딩으로 토큰화됩니다. textual-inversion은 비슷한 작업을 수행하지만, 위 다이어그램의 특수 토큰 `S*`로부터 새로운 토큰 임베딩 `v*`를 학습합니다. 모델의 아웃풋은 디퓨전 모델을 조정하는 데 사용되며, 디퓨전 모델이 단 몇 개의 예제 이미지에서 신속하고 새로운 콘셉트를 이해하는 데 도움을 줍니다. 이를 위해 textual-inversion은 제너레이터 모델과 학습용 이미지의 노이즈 버전을 사용합니다. 제너레이터는 노이즈가 적은 버전의 이미지를 예측하려고 시도하며 토큰 임베딩 `v*`은 제너레이터의 성능에 따라 최적화됩니다. 토큰 임베딩이 새로운 콘셉트를 성공적으로 포착하면 디퓨전 모델에 더 유용한 정보를 제공하고 노이즈가 적은 더 선명한 이미지를 생성하는 데 도움이 됩니다. 이러한 최적화 프로세스는 일반적으로 다양한 프롬프트와 이미지에 수천 번에 노출됨으로써 이루어집니다.
diffusers/docs/source/ko/training/text_inversion.md/0
{ "file_path": "diffusers/docs/source/ko/training/text_inversion.md", "repo_id": "diffusers", "token_count": 9111 }
128
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 스케줄러 diffusion 파이프라인은 diffusion 모델, 스케줄러 등의 컴포넌트들로 구성됩니다. 그리고 파이프라인 안의 일부 컴포넌트를 다른 컴포넌트로 교체하는 식의 커스터마이징 역시 가능합니다. 이와 같은 컴포넌트 커스터마이징의 가장 대표적인 예시가 바로 [스케줄러](../api/schedulers/overview.md)를 교체하는 것입니다. 스케쥴러는 다음과 같이 diffusion 시스템의 전반적인 디노이징 프로세스를 정의합니다. - 디노이징 스텝을 얼마나 가져가야 할까? - 확률적으로(stochastic) 혹은 확정적으로(deterministic)? - 디노이징 된 샘플을 찾아내기 위해 어떤 알고리즘을 사용해야 할까? 이러한 프로세스는 다소 난해하고, 디노이징 속도와 디노이징 퀄리티 사이의 트레이드 오프를 정의해야 하는 문제가 될 수 있습니다. 주어진 파이프라인에 어떤 스케줄러가 가장 적합한지를 정량적으로 판단하는 것은 매우 어려운 일입니다. 이로 인해 일단 해당 스케줄러를 직접 사용하여, 생성되는 이미지를 직접 눈으로 보며, 정성적으로 성능을 판단해보는 것이 추천되곤 합니다. ## 파이프라인 불러오기 먼저 스테이블 diffusion 파이프라인을 불러오도록 해보겠습니다. 물론 스테이블 diffusion을 사용하기 위해서는, 허깅페이스 허브에 등록된 사용자여야 하며, 관련 [라이센스](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)에 동의해야 한다는 점을 잊지 말아주세요. *역자 주: 다만, 현재 신규로 생성한 허깅페이스 계정에 대해서는 라이센스 동의를 요구하지 않는 것으로 보입니다!* ```python from huggingface_hub import login from diffusers import DiffusionPipeline import torch # first we need to login with our access token login() # Now we can download the pipeline pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16) ``` 다음으로, GPU로 이동합니다. ```python pipeline.to("cuda") ``` ## 스케줄러 액세스 스케줄러는 언제나 파이프라인의 컴포넌트로서 존재하며, 일반적으로 파이프라인 인스턴스 내에 `scheduler`라는 이름의 속성(property)으로 정의되어 있습니다. ```python pipeline.scheduler ``` **Output**: ``` PNDMScheduler { "_class_name": "PNDMScheduler", "_diffusers_version": "0.8.0.dev0", "beta_end": 0.012, "beta_schedule": "scaled_linear", "beta_start": 0.00085, "clip_sample": false, "num_train_timesteps": 1000, "set_alpha_to_one": false, "skip_prk_steps": true, "steps_offset": 1, "trained_betas": null } ``` 출력 결과를 통해, 우리는 해당 스케줄러가 [`PNDMScheduler`]의 인스턴스라는 것을 알 수 있습니다. 이제 [`PNDMScheduler`]와 다른 스케줄러들의 성능을 비교해보도록 하겠습니다. 먼저 테스트에 사용할 프롬프트를 다음과 같이 정의해보도록 하겠습니다. ```python prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition." ``` 다음으로 유사한 이미지 생성을 보장하기 위해서, 다음과 같이 랜덤시드를 고정해주도록 하겠습니다. ```python generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_pndm.png" width="400"/> <br> </p> ## 스케줄러 교체하기 다음으로 파이프라인의 스케줄러를 다른 스케줄러로 교체하는 방법에 대해 알아보겠습니다. 모든 스케줄러는 [`SchedulerMixin.compatibles`]라는 속성(property)을 갖고 있습니다. 해당 속성은 **호환 가능한** 스케줄러들에 대한 정보를 담고 있습니다. ```python pipeline.scheduler.compatibles ``` **Output**: ``` [diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler] ``` 호환되는 스케줄러들을 살펴보면 아래와 같습니다. - [`LMSDiscreteScheduler`], - [`DDIMScheduler`], - [`DPMSolverMultistepScheduler`], - [`EulerDiscreteScheduler`], - [`PNDMScheduler`], - [`DDPMScheduler`], - [`EulerAncestralDiscreteScheduler`]. 앞서 정의했던 프롬프트를 사용해서 각각의 스케줄러들을 비교해보도록 하겠습니다. 먼저 파이프라인 안의 스케줄러를 바꾸기 위해 [`ConfigMixin.config`] 속성과 [`ConfigMixin.from_config`] 메서드를 활용해보려고 합니다. ```python pipeline.scheduler.config ``` **Output**: ``` FrozenDict([('num_train_timesteps', 1000), ('beta_start', 0.00085), ('beta_end', 0.012), ('beta_schedule', 'scaled_linear'), ('trained_betas', None), ('skip_prk_steps', True), ('set_alpha_to_one', False), ('steps_offset', 1), ('_class_name', 'PNDMScheduler'), ('_diffusers_version', '0.8.0.dev0'), ('clip_sample', False)]) ``` 기존 스케줄러의 config를 호환 가능한 다른 스케줄러에 이식하는 것 역시 가능합니다. 다음 예시는 기존 스케줄러(`pipeline.scheduler`)를 다른 종류의 스케줄러(`DDIMScheduler`)로 바꾸는 코드입니다. 기존 스케줄러가 갖고 있던 config를 `.from_config` 메서드의 인자로 전달하는 것을 확인할 수 있습니다. ```python from diffusers import DDIMScheduler pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) ``` 이제 파이프라인을 실행해서 두 스케줄러 사이의 생성된 이미지의 퀄리티를 비교해봅시다. ```python generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_ddim.png" width="400"/> <br> </p> ## 스케줄러들 비교해보기 지금까지는 [`PNDMScheduler`]와 [`DDIMScheduler`] 스케줄러를 실행해보았습니다. 아직 비교해볼 스케줄러들이 더 많이 남아있으니 계속 비교해보도록 하겠습니다. [`LMSDiscreteScheduler`]을 일반적으로 더 좋은 결과를 보여줍니다. ```python from diffusers import LMSDiscreteScheduler pipeline.scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_lms.png" width="400"/> <br> </p> [`EulerDiscreteScheduler`]와 [`EulerAncestralDiscreteScheduler`] 고작 30번의 inference step만으로도 높은 퀄리티의 이미지를 생성하는 것을 알 수 있습니다. ```python from diffusers import EulerDiscreteScheduler pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_discrete.png" width="400"/> <br> </p> ```python from diffusers import EulerAncestralDiscreteScheduler pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_ancestral.png" width="400"/> <br> </p> 지금 이 문서를 작성하는 현시점 기준에선, [`DPMSolverMultistepScheduler`]가 시간 대비 가장 좋은 품질의 이미지를 생성하는 것 같습니다. 20번 정도의 스텝만으로도 실행될 수 있습니다. ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_dpm.png" width="400"/> <br> </p> 보시다시피 생성된 이미지들은 매우 비슷하고, 비슷한 퀄리티를 보이는 것 같습니다. 실제로 어떤 스케줄러를 선택할 것인가는 종종 특정 이용 사례에 기반해서 결정되곤 합니다. 결국 여러 종류의 스케줄러를 직접 실행시켜보고 눈으로 직접 비교해서 판단하는 게 좋은 선택일 것 같습니다. ## Flax에서 스케줄러 교체하기 JAX/Flax 사용자인 경우 기본 파이프라인 스케줄러를 변경할 수도 있습니다. 다음은 Flax Stable Diffusion 파이프라인과 초고속 [DDPM-Solver++ 스케줄러를](../api/schedulers/multistep_dpm_solver) 사용하여 추론을 실행하는 방법에 대한 예시입니다 . ```Python import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained( model_id, subfolder="scheduler" ) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, variant="bf16", dtype=jax.numpy.bfloat16, ) params["scheduler"] = scheduler_state # Generate 1 image per parallel device (8 on TPUv2-8 or TPUv3-8) prompt = "a photo of an astronaut riding a horse on mars" num_samples = jax.device_count() prompt_ids = pipeline.prepare_inputs([prompt] * num_samples) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 25 # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) ``` <Tip warning={true}> 다음 Flax 스케줄러는 *아직* Flax Stable Diffusion 파이프라인과 호환되지 않습니다. - `FlaxLMSDiscreteScheduler` - `FlaxDDPMScheduler` </Tip>
diffusers/docs/source/ko/using-diffusers/schedulers.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/schedulers.md", "repo_id": "diffusers", "token_count": 6923 }
129
<!--版权归2025年HuggingFace团队所有。保留所有权利。 根据Apache许可证2.0版("许可证")授权;除非符合许可证要求,否则不得使用此文件。您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言权限和限制。 --> # 🧨 Diffusers伦理准则 ## 前言 [Diffusers](https://huggingface.co/docs/diffusers/index)不仅提供预训练的diffusion模型,还是一个模块化工具箱,支持推理和训练功能。 鉴于该技术在实际场景中的应用及其可能对社会产生的负面影响,我们认为有必要制定项目伦理准则,以指导Diffusers库的开发、用户贡献和使用规范。 该技术涉及的风险仍在持续评估中,主要包括但不限于:艺术家版权问题、深度伪造滥用、不当情境下的色情内容生成、非自愿的人物模仿、以及加剧边缘群体压迫的有害社会偏见。我们将持续追踪风险,并根据社区反馈动态调整本准则。 ## 适用范围 Diffusers社区将在项目开发中贯彻以下伦理准则,并协调社区贡献的整合方式,特别是在涉及伦理敏感议题的技术决策时。 ## 伦理准则 以下准则具有普遍适用性,但我们主要在处理涉及伦理敏感问题的技术决策时实施。同时,我们承诺将根据技术发展带来的新兴风险持续调整这些原则: - **透明度**:我们承诺以透明方式管理PR(拉取请求),向用户解释决策依据,并公开技术选择过程。 - **一致性**:我们承诺为用户提供统一标准的项目管理,保持技术稳定性和连贯性。 - **简洁性**:为了让Diffusers库更易使用和开发,我们承诺保持项目目标精简且逻辑自洽。 - **可及性**:本项目致力于降低贡献门槛,即使非技术人员也能参与运营,从而使研究资源更广泛地服务于社区。 - **可复现性**:对于通过Diffusers库发布的上游代码、模型和数据集,我们将明确说明其可复现性。 - **责任性**:作为社区和团队,我们共同承担用户责任,通过风险预判和缓解措施来应对技术潜在危害。 ## 实施案例:安全功能与机制 团队持续开发技术和非技术工具,以应对diffusion技术相关的伦理与社会风险。社区反馈对于功能实施和风险意识提升具有不可替代的价值: - [**社区讨论区**](https://huggingface.co/docs/hub/repositories-pull-requests-discussions):促进社区成员就项目开展协作讨论。 - **偏见探索与评估**:Hugging Face团队提供[交互空间](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer)展示Stable Diffusion中的偏见。我们支持并鼓励此类偏见探索与评估工作。 - **部署安全强化**: - [**Safe Stable Diffusion**](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_safe):解决Stable Diffusion等基于未过滤网络爬取数据训练的模型容易产生不当内容的问题。相关论文:[Safe Latent Diffusion:缓解diffusion模型中的不当退化](https://huggingface.co/papers/2211.05105)。 - [**安全检测器**](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py):通过比对图像生成后嵌入空间中硬编码有害概念集的类别概率进行检测。有害概念列表经特殊处理以防逆向工程。 - **分阶段模型发布**:对于高度敏感的仓库,采用分级访问控制。这种阶段性发布机制让作者能更好地管控使用场景。 - **许可证制度**:采用新型[OpenRAILs](https://huggingface.co/blog/open_rail)许可协议,在保障开放访问的同时设置使用限制以确保更负责任的应用。
diffusers/docs/source/zh/conceptual/ethical_guidelines.md/0
{ "file_path": "diffusers/docs/source/zh/conceptual/ethical_guidelines.md", "repo_id": "diffusers", "token_count": 2687 }
130
<!--版权所有 2025 The HuggingFace Team。保留所有权利。 根据Apache许可证2.0版("许可证")授权;除非符合许可证,否则不得使用此文件。您可以在 http://www.apache.org/licenses/LICENSE-2.0 获取许可证的副本。 除非适用法律要求或书面同意,根据许可证分发的软件按"原样"分发,无任何明示或暗示的担保或条件。有关许可证下特定语言的管理权限和限制,请参阅许可证。 --> # 快速入门 模块化Diffusers是一个快速构建灵活和可定制管道的框架。模块化Diffusers的核心是[`ModularPipelineBlocks`],可以与其他块组合以适应新的工作流程。这些块被转换为[`ModularPipeline`],一个开发者可以使用的友好用户界面。 本文档将向您展示如何使用模块化框架实现[Differential Diffusion](https://differential-diffusion.github.io/)管道。 ## ModularPipelineBlocks [`ModularPipelineBlocks`]是*定义*,指定管道中单个步骤的组件、输入、输出和计算逻辑。有四种类型的块。 - [`ModularPipelineBlocks`]是最基本的单一步骤块。 - [`SequentialPipelineBlocks`]是一个多块,线性组合其他块。一个块的输出是下一个块的输入。 - [`LoopSequentialPipelineBlocks`]是一个多块,迭代运行,专为迭代工作流程设计。 - [`AutoPipelineBlocks`]是一个针对不同工作流程的块集合,它根据输入选择运行哪个块。它旨在方便地将多个工作流程打包到单个管道中。 [Differential Diffusion](https://differential-diffusion.github.io/)是一个图像到图像的工作流程。从`IMAGE2IMAGE_BLOCKS`预设开始,这是一个用于图像到图像生成的`ModularPipelineBlocks`集合。 ```py from diffusers.modular_pipelines.stable_diffusion_xl import IMAGE2IMAGE_BLOCKS IMAGE2IMAGE_BLOCKS = InsertableDict([ ("text_encoder", StableDiffusionXLTextEncoderStep), ("image_encoder", StableDiffusionXLVaeEncoderStep), ("input", StableDiffusionXLInputStep), ("set_timesteps", StableDiffusionXLImg2ImgSetTimestepsStep), ("prepare_latents", StableDiffusionXLImg2ImgPrepareLatentsStep), ("prepare_add_cond", StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep), ("denoise", StableDiffusionXLDenoiseStep), ("decode", StableDiffusionXLDecodeStep) ]) ``` ## 管道和块状态 模块化Diffusers使用*状态*在块之间通信数据。有两种类型的状态。 - [`PipelineState`]是一个全局状态,可用于跟踪所有块的所有输入和输出。 - [`BlockState`]是[`PipelineState`]中相关变量的局部视图,用于单个块。 ## 自定义块 [Differential Diffusion](https://differential-diffusion.github.io/) 与标准的图像到图像转换在其 `prepare_latents` 和 `denoise` 块上有所不同。所有其他块都可以重用,但你需要修改这两个。 通过复制和修改现有的块,为 `prepare_latents` 和 `denoise` 创建占位符 `ModularPipelineBlocks`。 打印 `denoise` 块,可以看到它由 [`LoopSequentialPipelineBlocks`] 组成,包含三个子块,`before_denoiser`、`denoiser` 和 `after_denoiser`。只需要修改 `before_denoiser` 子块,根据变化图为去噪器准备潜在输入。 ```py denoise_blocks = IMAGE2IMAGE_BLOCKS["denoise"]() print(denoise_blocks) ``` 用新的 `SDXLDiffDiffLoopBeforeDenoiser` 块替换 `StableDiffusionXLLoopBeforeDenoiser` 子块。 ```py # 复制现有块作为占位符 class SDXLDiffDiffPrepareLatentsStep(ModularPipelineBlocks): """Copied from StableDiffusionXLImg2ImgPrepareLatentsStep - will modify later""" # ... 与 StableDiffusionXLImg2ImgPrepareLatentsStep 相同的实现 class SDXLDiffDiffDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): block_classes = [SDXLDiffDiffLoopBeforeDenoiser, StableDiffusionXLLoopDenoiser, StableDiffusionXLLoopAfterDenoiser] block_names = ["before_denoiser", "denoiser", "after_denoiser"] ``` ### prepare_latents `prepare_latents` 块需要进行以下更改。 - 一个处理器来处理变化图 - 一个新的 `inputs` 来接受用户提供的变化图,`timestep` 用于预计算所有潜在变量和 `num_inference_steps` 来创建更新图像区域的掩码 - 更新 `__call__` 方法中的计算,用于处理变化图和创建掩码,并将其存储在 [`BlockState`] 中 ```diff class SDXLDiffDiffPrepareLatentsStep(ModularPipelineBlocks): @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("vae", AutoencoderKL), ComponentSpec("scheduler", EulerDiscreteScheduler), + ComponentSpec("mask_processor", VaeImageProcessor, config=FrozenDict({"do_normalize": False, "do_convert_grayscale": True})) ] @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("generator"), + InputParam("diffdiff_map", required=True), - InputParam("latent_timestep", required=True, type_hint=torch.Tensor), + InputParam("timesteps", type_hint=torch.Tensor), + InputParam("num_inference_steps", type_hint=int), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ + OutputParam("original_latents", type_hint=torch.Tensor), + OutputParam("diffdiff_masks", type_hint=torch.Tensor), ] def __call__(self, components, state: PipelineState): # ... existing logic ... + # Process change map and create masks + diffdiff_map = components.mask_processor.preprocess(block_state.diffdiff_map, height=latent_height, width=latent_width) + thresholds = torch.arange(block_state.num_inference_steps, dtype=diffdiff_map.dtype) / block_state.num_inference_steps + block_state.diffdiff_masks = diffdiff_map > (thresholds + (block_state.denoising_start or 0)) + block_state.original_latents = block_state.latents ``` ### 去噪 `before_denoiser` 子块需要进行以下更改。 - 新的 `inputs` 以接受 `denoising_start` 参数,`original_latents` 和 `diffdiff_masks` 来自 `prepare_latents` 块 - 更新 `__call__` 方法中的计算以应用 Differential Diffusion ```diff class SDXLDiffDiffLoopBeforeDenoiser(ModularPipelineBlocks): @property def description(self) -> str: return ( "Step within the denoising loop for differential diffusion that prepare the latent input for the denoiser" ) @property def inputs(self) -> List[str]: return [ InputParam("latents", required=True, type_hint=torch.Tensor), + InputParam("denoising_start"), + InputParam("original_latents", type_hint=torch.Tensor), + InputParam("diffdiff_masks", type_hint=torch.Tensor), ] def __call__(self, components, block_state, i, t): + # Apply differential diffusion logic + if i == 0 and block_state.denoising_start is None: + block_state.latents = block_state.original_latents[:1] + else: + block_state.mask = block_state.diffdiff_masks[i].unsqueeze(0).unsqueeze(1) + block_state.latents = block_state.original_latents[i] * block_state.mask + block_state.latents * (1 - block_state.mask) # ... rest of existing logic ... ``` ## 组装块 此时,您应该拥有创建 [`ModularPipeline`] 所需的所有块。 复制现有的 `IMAGE2IMAGE_BLOCKS` 预设,对于 `set_timesteps` 块,使用 `TEXT2IMAGE_BLOCKS` 中的 `set_timesteps`,因为 Differential Diffusion 不需要 `strength` 参数。 将 `prepare_latents` 和 `denoise` 块设置为您刚刚修改的 `SDXLDiffDiffPrepareLatentsStep` 和 `SDXLDiffDiffDenoiseStep` 块。 调用 [`SequentialPipelineBlocks.from_blocks_dict`] 在块上创建一个 `SequentialPipelineBlocks`。 ```py DIFFDIFF_BLOCKS = IMAGE2IMAGE_BLOCKS.copy() DIFFDIFF_BLOCKS["set_timesteps"] = TEXT2IMAGE_BLOCKS["set_timesteps"] DIFFDIFF_BLOCKS["prepare_latents"] = SDXLDiffDiffPrepareLatentsStep DIFFDIFF_BLOCKS["denoise"] = SDXLDiffDiffDenoiseStep dd_blocks = SequentialPipelineBlocks.from_blocks_dict(DIFFDIFF_BLOCKS) print(dd_blocks) ``` ## ModularPipeline 将 [`SequentialPipelineBlocks`] 转换为 [`ModularPipeline`],使用 [`ModularPipeline.init_pipeline`] 方法。这会初始化从 `modular_model_index.json` 文件加载的预期组件。通过调用 [`ModularPipeline.load_defau lt_components`]。 初始化[`ComponentManager`]时传入pipeline是一个好主意,以帮助管理不同的组件。一旦调用[`~ModularPipeline.load_default_components`],组件就会被注册到[`ComponentManager`]中,并且可以在工作流之间共享。下面的例子使用`collection`参数为组件分配了一个`"diffdiff"`标签,以便更好地组织。 ```py from diffusers.modular_pipelines import ComponentsManager components = ComponentManager() dd_pipeline = dd_blocks.init_pipeline("YiYiXu/modular-demo-auto", components_manager=components, collection="diffdiff") dd_pipeline.load_default_componenets(torch_dtype=torch.float16) dd_pipeline.to("cuda") ``` ## 添加工作流 可以向[`ModularPipeline`]添加其他工作流以支持更多功能,而无需从头重写整个pipeline。 本节演示如何添加IP-Adapter或ControlNet。 ### IP-Adapter Stable Diffusion XL已经有一个预设的IP-Adapter块,你可以使用,并且不需要对现有的Differential Diffusion pipeline进行任何更改。 ```py from diffusers.modular_pipelines.stable_diffusion_xl.encoders import StableDiffusionXLAutoIPAdapterStep ip_adapter_block = StableDiffusionXLAutoIPAdapterStep() ``` 使用[`sub_blocks.insert`]方法将其插入到[`ModularPipeline`]中。下面的例子在位置`0`插入了`ip_adapter_block`。打印pipeline可以看到`ip_adapter_block`被添加了,并且它需要一个`ip_adapter_image`。这也向pipeline添加了两个组件,`image_encoder`和`feature_extractor`。 ```py dd_blocks.sub_blocks.insert("ip_adapter", ip_adapter_block, 0) ``` 调用[`~ModularPipeline.init_pipeline`]来初始化一个[`ModularPipeline`],并使用[`~ModularPipeline.load_default_components`]加载模型组件。加载并设置IP-Adapter以运行pipeline。 ```py dd_pipeline = dd_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") dd_pipeline.load_default_components(torch_dtype=torch.float16) dd_pipeline.loader.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") dd_pipeline.loader.set_ip_adapter_scale(0.6) dd_pipeline = dd_pipeline.to(device) ip_adapter_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/diffdiff_orange.jpeg") image = load_image("https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/differential/20240329211129_4024911930.png?download=true") mask = load_image("https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/differential/gradient_mask.png?download=true") prompt = "a green pear" negative_prompt = "blurry" generator = torch.Generator(device=device).manual_seed(42) image = dd_pipeline( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, generator=generator, ip_adapter_image=ip_adapter_image, diffdiff_map=mask, image=image, output="images" )[0] ``` ### ControlNet Stable Diffusion XL 已经预设了一个可以立即使用的 ControlNet 块。 ```py from diffusers.modular_pipelines.stable_diffusion_xl.modular_blocks import StableDiffusionXLAutoControlNetInputStep control_input_block = StableDiffusionXLAutoControlNetInputStep() ``` 然而,它需要修改 `denoise` 块,因为那是 ControlNet 将控制信息注入到 UNet 的地方。 通过将 `StableDiffusionXLLoopDenoiser` 子块替换为 `StableDiffusionXLControlNetLoopDenoiser` 来修改 `denoise` 块。 ```py class SDXLDiffDiffControlNetDenoiseStep(StableDiffusionXLDenoiseLoopWrapper): block_classes = [SDXLDiffDiffLoopBeforeDenoiser, StableDiffusionXLControlNetLoopDenoiser, StableDiffusionXLDenoiseLoopAfterDenoiser] block_names = ["before_denoiser", "denoiser", "after_denoiser"] controlnet_denoise_block = SDXLDiffDiffControlNetDenoiseStep() ``` 插入 `controlnet_input` 块并用新的 `controlnet_denoise_block` 替换 `denoise` 块。初始化一个 [`ModularPipeline`] 并将 [`~ModularPipeline.load_default_components`] 加载到其中。 ```py dd_blocks.sub_blocks.insert("controlnet_input", control_input_block, 7) dd_blocks.sub_blocks["denoise"] = controlnet_denoise_block dd_pipeline = dd_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") dd_pipeline.load_default_components(torch_dtype=torch.float16) dd_pipeline = dd_pipeline.to(device) control_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/diffdiff_tomato_canny.jpeg") image = load_image("https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/differential/20240329211129_4024911930.png?download=true") mask = load_image("https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/differential/gradient_mask.png?download=true") prompt = "a green pear" negative_prompt = "blurry" generator = torch.Generator(device=device).manual_seed(42) image = dd_pipeline( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, generator=generator, control_image=control_image, controlnet_conditioning_scale=0.5, diffdiff_map=mask, image=image, output="images" )[0] ``` ### AutoPipelineBlocks 差分扩散、IP-Adapter 和 ControlNet 工作流可以通过使用 [`AutoPipelineBlocks`] 捆绑到一个单一的 [`ModularPipeline`] 中。这允许根据输入如 `control_image` 或 `ip_adapter_image` 自动选择要运行的子块。如果没有传递这些输入,则默认为差分扩散。 使用 `block_trigger_inputs` 仅在提供 `control_image` 输入时运行 `SDXLDiffDiffControlNetDenoiseStep` 块。否则,使用 `SDXLDiffDiffDenoiseStep`。 ```py class SDXLDiffDiffAutoDenoiseStep(AutoPipelineBlocks): block_classes = [SDXLDiffDiffControlNetDenoiseStep, SDXLDiffDiffDenoiseStep] block_names = ["contr olnet_denoise", "denoise"] block_trigger_inputs = ["controlnet_cond", None] ``` 添加 `ip_adapter` 和 `controlnet_input` 块。 ```py DIFFDIFF_AUTO_BLOCKS = IMAGE2IMAGE_BLOCKS.copy() DIFFDIFF_AUTO_BLOCKS["prepare_latents"] = SDXLDiffDiffPrepareLatentsStep DIFFDIFF_AUTO_BLOCKS["set_timesteps"] = TEXT2IMAGE_BLOCKS["set_timesteps"] DIFFDIFF_AUTO_BLOCKS["denoise"] = SDXLDiffDiffAutoDenoiseStep DIFFDIFF_AUTO_BLOCKS.insert("ip_adapter", StableDiffusionXLAutoIPAdapterStep, 0) DIFFDIFF_AUTO_BLOCKS.insert("controlnet_input",StableDiffusionXLControlNetAutoInput, 7) ``` 调用 [`SequentialPipelineBlocks.from_blocks_dict`] 来创建一个 [`SequentialPipelineBlocks`] 并创建一个 [`ModularPipeline`] 并加载模型组件以运行。 ```py dd_auto_blocks = SequentialPipelineBlocks.from_blocks_dict(DIFFDIFF_AUTO_BLOCKS) dd_pipeline = dd_auto_blocks.init_pipeline("YiYiXu/modular-demo-auto", collection="diffdiff") dd_pipeline.load_default_components(torch_dtype=torch.float16) ``` ## 分享 使用 [`~ModularPipeline.save_pretrained`] 将您的 [`ModularPipeline`] 添加到 Hub,并将 `push_to_hub` 参数设置为 `True`。 ```py dd_pipeline.save_pretrained("YiYiXu/test_modular_doc", push_to_hub=True) ``` 其他用户可以使用 [`~ModularPipeline.from_pretrained`] 加载 [`ModularPipeline`]。 ```py import torch from diffusers.modular_pipelines import ModularPipeline, ComponentsManager components = ComponentsManager() diffdiff_pipeline = ModularPipeline.from_pretrained("YiYiXu/modular-diffdiff-0704", trust_remote_code=True, components_manager=components, collection="diffdiff") diffdiff_pipeline.load_default_components(torch_dtype=torch.float16) ```
diffusers/docs/source/zh/modular_diffusers/quickstart.md/0
{ "file_path": "diffusers/docs/source/zh/modular_diffusers/quickstart.md", "repo_id": "diffusers", "token_count": 7558 }
131
<!--版权所有 2025 The HuggingFace Team。保留所有权利。 根据 Apache 许可证 2.0 版(“许可证”)授权;除非遵守许可证,否则不得使用此文件。 您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,不附带任何明示或暗示的担保或条件。请参阅许可证以了解具体的语言管理权限和限制。 --> # 令牌合并 [令牌合并](https://huggingface.co/papers/2303.17604)(ToMe)在基于 Transformer 的网络的前向传递中逐步合并冗余令牌/补丁,这可以加速 [`StableDiffusionPipeline`] 的推理延迟。 从 `pip` 安装 ToMe: ```bash pip install tomesd ``` 您可以使用 [`tomesd`](https://github.com/dbolya/tomesd) 库中的 [`apply_patch`](https://github.com/dbolya/tomesd?tab=readme-ov-file#usage) 函数: ```diff from diffusers import StableDiffusionPipeline import torch import tomesd pipeline = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") + tomesd.apply_patch(pipeline, ratio=0.5) image = pipeline("a photo of an astronaut riding a horse on mars").images[0] ``` `apply_patch` 函数公开了多个[参数](https://github.com/dbolya/tomesd#usage),以帮助在管道推理速度和生成令牌的质量之间取得平衡。最重要的参数是 `ratio`,它控制在前向传递期间合并的令牌数量。 如[论文](https://huggingface.co/papers/2303.17604)中所述,ToMe 可以在显著提升推理速度的同时,很大程度上保留生成图像的质量。通过增加 `ratio`,您可以进一步加速推理,但代价是图像质量有所下降。 为了测试生成图像的质量,我们从 [Parti Prompts](https://parti.research.google/) 中采样了一些提示,并使用 [`StableDiffusionPipeline`] 进行了推理,设置如下: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/tome/tome_samples.png"> </div> 我们没有注意到生成样本的质量有任何显著下降,您可以在此 [WandB 报告](https://wandb.ai/sayakpaul/tomesd-results/runs/23j4bj3i?workspace=)中查看生成的样本。如果您有兴趣重现此实验,请使用此[脚本](https://gist.github.com/sayakpaul/8cac98d7f22399085a060992f411ecbd)。 ## 基准测试 我们还在启用 [xFormers](https://huggingface.co/docs/diffusers/optimization/xformers) 的情况下,对 [`StableDiffusionPipeline`] 上 `tomesd` 的影响进行了基准测试,涵盖了多个图像分辨率。结果 结果是从以下开发环境中的A100和V100 GPU获得的: ```bash - `diffusers` 版本:0.15.1 - Python 版本:3.8.16 - PyTorch 版本(GPU?):1.13.1+cu116 (True) - Huggingface_hub 版本:0.13.2 - Transformers 版本:4.27.2 - Accelerate 版本:0.18.0 - xFormers 版本:0.0.16 - tomesd 版本:0.1.2 ``` 要重现此基准测试,请随意使用此[脚本](https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335)。结果以秒为单位报告,并且在适用的情况下,我们报告了使用ToMe和ToMe + xFormers时相对于原始管道的加速百分比。 | **GPU** | **分辨率** | **批处理大小** | **原始** | **ToMe** | **ToMe + xFormers** | |----------|----------------|----------------|-------------|----------------|---------------------| | **A100** | 512 | 10 | 6.88 | 5.26 (+23.55%) | 4.69 (+31.83%) | | | 768 | 10 | OOM | 14.71 | 11 | | | | 8 | OOM | 11.56 | 8.84 | | | | 4 | OOM | 5.98 | 4.66 | | | | 2 | 4.99 | 3.24 (+35.07%) | 2.1 (+37.88%) | | | | 1 | 3.29 | 2.24 (+31.91%) | 2.03 (+38.3%) | | | 1024 | 10 | OOM | OOM | OOM | | | | 8 | OOM | OOM | OOM | | | | 4 | OOM | 12.51 | 9.09 | | | | 2 | OOM | 6.52 | 4.96 | | | | 1 | 6.4 | 3.61 (+43.59%) | 2.81 (+56.09%) | | **V100** | 512 | 10 | OOM | 10.03 | 9.29 | | | | 8 | OOM | 8.05 | 7.47 | | | | 4 | 5.7 | 4.3 (+24.56%) | 3.98 (+30.18%) | | | | 2 | 3.14 | 2.43 (+22.61%) | 2.27 (+27.71%) | | | | 1 | 1.88 | 1.57 (+16.49%) | 1.57 (+16.49%) | | | 768 | 10 | OOM | OOM | 23.67 | | | | 8 | OOM | OOM | 18.81 | | | | 4 | OOM | 11.81 | 9.7 | | | | 2 | OOM | 6.27 | 5.2 | | | | 1 | 5.43 | 3.38 (+37.75%) | 2.82 (+48.07%) | | | 1024 | 10 | OOM | 如上表所示,`tomesd` 带来的加速效果在更大的图像分辨率下变得更加明显。有趣的是,使用 `tomesd` 可以在更高分辨率如 1024x1024 上运行管道。您可能还可以通过 [`torch.compile`](fp16#torchcompile) 进一步加速推理。
diffusers/docs/source/zh/optimization/tome.md/0
{ "file_path": "diffusers/docs/source/zh/optimization/tome.md", "repo_id": "diffusers", "token_count": 3963 }
132
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ConsisID [ConsisID](https://github.com/PKU-YuanGroup/ConsisID)是一种身份保持的文本到视频生成模型,其通过频率分解在生成的视频中保持面部一致性。它具有以下特点: - 基于频率分解:将人物ID特征解耦为高频和低频部分,从频域的角度分析DIT架构的特性,并且基于此特性设计合理的控制信息注入方式。 - 一致性训练策略:我们提出粗到细训练策略、动态掩码损失、动态跨脸损失,进一步提高了模型的泛化能力和身份保持效果。 - 推理无需微调:之前的方法在推理前,需要对输入id进行case-by-case微调,时间和算力开销较大,而我们的方法是tuning-free的。 本指南将指导您使用 ConsisID 生成身份保持的视频。 ## Load Model Checkpoints 模型权重可以存储在Hub上或本地的单独子文件夹中,在这种情况下,您应该使用 [`~DiffusionPipeline.from_pretrained`] 方法。 ```python # !pip install consisid_eva_clip insightface facexlib import torch from diffusers import ConsisIDPipeline from diffusers.pipelines.consisid.consisid_utils import prepare_face_models, process_face_embeddings_infer from huggingface_hub import snapshot_download # Download ckpts snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir="BestWishYsh/ConsisID-preview") # Load face helper model to preprocess input face image face_helper_1, face_helper_2, face_clip_model, face_main_model, eva_transform_mean, eva_transform_std = prepare_face_models("BestWishYsh/ConsisID-preview", device="cuda", dtype=torch.bfloat16) # Load consisid base model pipe = ConsisIDPipeline.from_pretrained("BestWishYsh/ConsisID-preview", torch_dtype=torch.bfloat16) pipe.to("cuda") ``` ## Identity-Preserving Text-to-Video 对于身份保持的文本到视频生成,需要输入文本提示和包含清晰面部(例如,最好是半身或全身)的图像。默认情况下,ConsisID 会生成 720x480 的视频以获得最佳效果。 ```python from diffusers.utils import export_to_video prompt = "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel." image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_input.png?download=true" id_cond, id_vit_hidden, image, face_kps = process_face_embeddings_infer(face_helper_1, face_clip_model, face_helper_2, eva_transform_mean, eva_transform_std, face_main_model, "cuda", torch.bfloat16, image, is_align_face=True) video = pipe(image=image, prompt=prompt, num_inference_steps=50, guidance_scale=6.0, use_dynamic_cfg=False, id_vit_hidden=id_vit_hidden, id_cond=id_cond, kps_cond=face_kps, generator=torch.Generator("cuda").manual_seed(42)) export_to_video(video.frames[0], "output.mp4", fps=8) ``` <table> <tr> <th style="text-align: center;">Face Image</th> <th style="text-align: center;">Video</th> <th style="text-align: center;">Description</th </tr> <tr> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_0.png?download=true" style="height: auto; width: 600px;"></td> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_0.gif?download=true" style="height: auto; width: 2000px;"></td> <td>The video, in a beautifully crafted animated style, features a confident woman riding a horse through a lush forest clearing. Her expression is focused yet serene as she adjusts her wide-brimmed hat with a practiced hand. She wears a flowy bohemian dress, which moves gracefully with the rhythm of the horse, the fabric flowing fluidly in the animated motion. The dappled sunlight filters through the trees, casting soft, painterly patterns on the forest floor. Her posture is poised, showing both control and elegance as she guides the horse with ease. The animation's gentle, fluid style adds a dreamlike quality to the scene, with the woman’s calm demeanor and the peaceful surroundings evoking a sense of freedom and harmony.</td> </tr> <tr> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_1.png?download=true" style="height: auto; width: 600px;"></td> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_1.gif?download=true" style="height: auto; width: 2000px;"></td> <td>The video, in a captivating animated style, shows a woman standing in the center of a snowy forest, her eyes narrowed in concentration as she extends her hand forward. She is dressed in a deep blue cloak, her breath visible in the cold air, which is rendered with soft, ethereal strokes. A faint smile plays on her lips as she summons a wisp of ice magic, watching with focus as the surrounding trees and ground begin to shimmer and freeze, covered in delicate ice crystals. The animation’s fluid motion brings the magic to life, with the frost spreading outward in intricate, sparkling patterns. The environment is painted with soft, watercolor-like hues, enhancing the magical, dreamlike atmosphere. The overall mood is serene yet powerful, with the quiet winter air amplifying the delicate beauty of the frozen scene.</td> </tr> <tr> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_2.png?download=true" style="height: auto; width: 600px;"></td> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_2.gif?download=true" style="height: auto; width: 2000px;"></td> <td>The animation features a whimsical portrait of a balloon seller standing in a gentle breeze, captured with soft, hazy brushstrokes that evoke the feel of a serene spring day. His face is framed by a gentle smile, his eyes squinting slightly against the sun, while a few wisps of hair flutter in the wind. He is dressed in a light, pastel-colored shirt, and the balloons around him sway with the wind, adding a sense of playfulness to the scene. The background blurs softly, with hints of a vibrant market or park, enhancing the light-hearted, yet tender mood of the moment.</td> </tr> <tr> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_3.png?download=true" style="height: auto; width: 600px;"></td> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_3.gif?download=true" style="height: auto; width: 2000px;"></td> <td>The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel.</td> </tr> <tr> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_4.png?download=true" style="height: auto; width: 600px;"></td> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_4.gif?download=true" style="height: auto; width: 2000px;"></td> <td>The video features a baby wearing a bright superhero cape, standing confidently with arms raised in a powerful pose. The baby has a determined look on their face, with eyes wide and lips pursed in concentration, as if ready to take on a challenge. The setting appears playful, with colorful toys scattered around and a soft rug underfoot, while sunlight streams through a nearby window, highlighting the fluttering cape and adding to the impression of heroism. The overall atmosphere is lighthearted and fun, with the baby's expressions capturing a mix of innocence and an adorable attempt at bravery, as if truly ready to save the day.</td> </tr> </table> ## Resources 通过以下资源了解有关 ConsisID 的更多信息: - 一段 [视频](https://www.youtube.com/watch?v=PhlgC-bI5SQ) 演示了 ConsisID 的主要功能; - 有关更多详细信息,请参阅研究论文 [Identity-Preserving Text-to-Video Generation by Frequency Decomposition](https://hf.co/papers/2411.17440)。
diffusers/docs/source/zh/using-diffusers/consisid.md/0
{ "file_path": "diffusers/docs/source/zh/using-diffusers/consisid.md", "repo_id": "diffusers", "token_count": 3518 }
133
from typing import Optional import torch from PIL import Image from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers.image_processor import VaeImageProcessor from diffusers.utils import ( deprecate, ) class EDICTPipeline(DiffusionPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, mixing_coeff: float = 0.93, leapfrog_steps: bool = True, ): self.mixing_coeff = mixing_coeff self.leapfrog_steps = leapfrog_steps super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prompt( self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False ): text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) prompt_embeds = self.text_encoder(text_inputs.input_ids.to(self.device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=self.device) if do_classifier_free_guidance: uncond_tokens = "" if negative_prompt is None else negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device)).last_hidden_state prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def denoise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor): x = self.mixing_coeff * x + (1 - self.mixing_coeff) * y y = self.mixing_coeff * y + (1 - self.mixing_coeff) * x return [x, y] def noise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor): y = (y - (1 - self.mixing_coeff) * x) / self.mixing_coeff x = (x - (1 - self.mixing_coeff) * y) / self.mixing_coeff return [x, y] def _get_alpha_and_beta(self, t: torch.Tensor): # as self.alphas_cumprod is always in cpu t = int(t) alpha_prod = self.scheduler.alphas_cumprod[t] if t >= 0 else self.scheduler.final_alpha_cumprod return alpha_prod, 1 - alpha_prod def noise_step( self, base: torch.Tensor, model_input: torch.Tensor, model_output: torch.Tensor, timestep: torch.Tensor, ): prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep) alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep) a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5 b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5 next_model_input = (base - b_t * model_output) / a_t return model_input, next_model_input.to(base.dtype) def denoise_step( self, base: torch.Tensor, model_input: torch.Tensor, model_output: torch.Tensor, timestep: torch.Tensor, ): prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep) alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep) a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5 b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5 next_model_input = a_t * base + b_t * model_output return model_input, next_model_input.to(base.dtype) @torch.no_grad() def decode_latents(self, latents: torch.Tensor): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) return image @torch.no_grad() def prepare_latents( self, image: Image.Image, text_embeds: torch.Tensor, timesteps: torch.Tensor, guidance_scale: float, generator: Optional[torch.Generator] = None, ): do_classifier_free_guidance = guidance_scale > 1.0 image = image.to(device=self.device, dtype=text_embeds.dtype) latent = self.vae.encode(image).latent_dist.sample(generator) latent = self.vae.config.scaling_factor * latent coupled_latents = [latent.clone(), latent.clone()] for i, t in tqdm(enumerate(timesteps), total=len(timesteps)): coupled_latents = self.noise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1]) # j - model_input index, k - base index for j in range(2): k = j ^ 1 if self.leapfrog_steps: if i % 2 == 0: k, j = j, k model_input = coupled_latents[j] base = coupled_latents[k] latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) base, model_input = self.noise_step( base=base, model_input=model_input, model_output=noise_pred, timestep=t, ) coupled_latents[k] = model_input return coupled_latents @torch.no_grad() def __call__( self, base_prompt: str, target_prompt: str, image: Image.Image, guidance_scale: float = 3.0, num_inference_steps: int = 50, strength: float = 0.8, negative_prompt: Optional[str] = None, generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", ): do_classifier_free_guidance = guidance_scale > 1.0 image = self.image_processor.preprocess(image) base_embeds = self._encode_prompt(base_prompt, negative_prompt, do_classifier_free_guidance) target_embeds = self._encode_prompt(target_prompt, negative_prompt, do_classifier_free_guidance) self.scheduler.set_timesteps(num_inference_steps, self.device) t_limit = num_inference_steps - int(num_inference_steps * strength) fwd_timesteps = self.scheduler.timesteps[t_limit:] bwd_timesteps = fwd_timesteps.flip(0) coupled_latents = self.prepare_latents(image, base_embeds, bwd_timesteps, guidance_scale, generator) for i, t in tqdm(enumerate(fwd_timesteps), total=len(fwd_timesteps)): # j - model_input index, k - base index for k in range(2): j = k ^ 1 if self.leapfrog_steps: if i % 2 == 1: k, j = j, k model_input = coupled_latents[j] base = coupled_latents[k] latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=target_embeds).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) base, model_input = self.denoise_step( base=base, model_input=model_input, model_output=noise_pred, timestep=t, ) coupled_latents[k] = model_input coupled_latents = self.denoise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1]) # either one is fine final_latent = coupled_latents[0] if output_type not in ["latent", "pt", "np", "pil"]: deprecation_message = ( f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: " "`pil`, `np`, `pt`, `latent`" ) deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False) output_type = "np" if output_type == "latent": image = final_latent else: image = self.decode_latents(final_latent) image = self.image_processor.postprocess(image, output_type=output_type) return image
diffusers/examples/community/edict_pipeline.py/0
{ "file_path": "diffusers/examples/community/edict_pipeline.py", "repo_id": "diffusers", "token_count": 4682 }
134
import inspect import re from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTokenizer import diffusers from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import logging try: from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE except ImportError: ORT_TO_NP_TYPE = { "tensor(bool)": np.bool_, "tensor(int8)": np.int8, "tensor(uint8)": np.uint8, "tensor(int16)": np.int16, "tensor(uint16)": np.uint16, "tensor(int32)": np.int32, "tensor(uint32)": np.uint32, "tensor(int64)": np.int64, "tensor(uint64)": np.uint64, "tensor(float16)": np.float16, "tensor(float)": np.float32, "tensor(double)": np.float64, } try: from diffusers.utils import PIL_INTERPOLATION except ImportError: if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ logger = logging.get_logger(__name__) # pylint: disable=invalid-name re_attention = re.compile( r""" \\\(| \\\)| \\\[| \\]| \\\\| \\| \(| \[| :([+-]?[.\d]+)\)| \)| ]| [^\\()\[\]:]+| : """, re.X, ) def parse_prompt_attention(text): """ Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \\( - literal character '(' \\[ - literal character '[' \\) - literal character ')' \\] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] """ res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 def multiply_range(start_position, multiplier): for p in range(start_position, len(res)): res[p][1] *= multiplier for m in re_attention.finditer(text): text = m.group(0) weight = m.group(1) if text.startswith("\\"): res.append([text[1:], 1.0]) elif text == "(": round_brackets.append(len(res)) elif text == "[": square_brackets.append(len(res)) elif weight is not None and len(round_brackets) > 0: multiply_range(round_brackets.pop(), float(weight)) elif text == ")" and len(round_brackets) > 0: multiply_range(round_brackets.pop(), round_bracket_multiplier) elif text == "]" and len(square_brackets) > 0: multiply_range(square_brackets.pop(), square_bracket_multiplier) else: res.append([text, 1.0]) for pos in round_brackets: multiply_range(pos, round_bracket_multiplier) for pos in square_brackets: multiply_range(pos, square_bracket_multiplier) if len(res) == 0: res = [["", 1.0]] # merge runs of identical weights i = 0 while i + 1 < len(res): if res[i][1] == res[i + 1][1]: res[i][0] += res[i + 1][0] res.pop(i + 1) else: i += 1 return res def get_prompts_with_weights(pipe, prompt: List[str], max_length: int): r""" Tokenize a list of prompts and return its tokens with weights of each token. No padding, starting or ending token is included. """ tokens = [] weights = [] truncated = False for text in prompt: texts_and_weights = parse_prompt_attention(text) text_token = [] text_weight = [] for word, weight in texts_and_weights: # tokenize and discard the starting and the ending token token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1] text_token += list(token) # copy the weight by length of token text_weight += [weight] * len(token) # stop if the text is too long (longer than truncation limit) if len(text_token) > max_length: truncated = True break # truncate if len(text_token) > max_length: truncated = True text_token = text_token[:max_length] text_weight = text_weight[:max_length] tokens.append(text_token) weights.append(text_weight) if truncated: logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples") return tokens, weights def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77): r""" Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length. """ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2) weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length for i in range(len(tokens)): tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos] if no_boseos_middle: weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i])) else: w = [] if len(weights[i]) == 0: w = [1.0] * weights_length else: for j in range(max_embeddings_multiples): w.append(1.0) # weight for starting token in this chunk w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))] w.append(1.0) # weight for ending token in this chunk w += [1.0] * (weights_length - len(w)) weights[i] = w[:] return tokens, weights def get_unweighted_text_embeddings( pipe, text_input: np.array, chunk_length: int, no_boseos_middle: Optional[bool] = True, ): """ When the length of tokens is a multiple of the capacity of the text encoder, it should be split into chunks and sent to the text encoder individually. """ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2) if max_embeddings_multiples > 1: text_embeddings = [] for i in range(max_embeddings_multiples): # extract the i-th chunk text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy() # cover the head and the tail by the starting and the ending tokens text_input_chunk[:, 0] = text_input[0, 0] text_input_chunk[:, -1] = text_input[0, -1] text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0] if no_boseos_middle: if i == 0: # discard the ending token text_embedding = text_embedding[:, :-1] elif i == max_embeddings_multiples - 1: # discard the starting token text_embedding = text_embedding[:, 1:] else: # discard both starting and ending tokens text_embedding = text_embedding[:, 1:-1] text_embeddings.append(text_embedding) text_embeddings = np.concatenate(text_embeddings, axis=1) else: text_embeddings = pipe.text_encoder(input_ids=text_input)[0] return text_embeddings def get_weighted_text_embeddings( pipe, prompt: Union[str, List[str]], uncond_prompt: Optional[Union[str, List[str]]] = None, max_embeddings_multiples: Optional[int] = 4, no_boseos_middle: Optional[bool] = False, skip_parsing: Optional[bool] = False, skip_weighting: Optional[bool] = False, **kwargs, ): r""" Prompts can be assigned with local weights using brackets. For example, prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. Args: pipe (`OnnxStableDiffusionPipeline`): Pipe to provide access to the tokenizer and the text encoder. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. uncond_prompt (`str` or `List[str]`): The unconditional prompt or prompts for guide the image generation. If unconditional prompt is provided, the embeddings of prompt and uncond_prompt are concatenated. max_embeddings_multiples (`int`, *optional*, defaults to `1`): The max multiple length of prompt embeddings compared to the max output length of text encoder. no_boseos_middle (`bool`, *optional*, defaults to `False`): If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and ending token in each of the chunk in the middle. skip_parsing (`bool`, *optional*, defaults to `False`): Skip the parsing of brackets. skip_weighting (`bool`, *optional*, defaults to `False`): Skip the weighting. When the parsing is skipped, it is forced True. """ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 if isinstance(prompt, str): prompt = [prompt] if not skip_parsing: prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2) if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2) else: prompt_tokens = [ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids ] prompt_weights = [[1.0] * len(token) for token in prompt_tokens] if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens = [ token[1:-1] for token in pipe.tokenizer( uncond_prompt, max_length=max_length, truncation=True, return_tensors="np", ).input_ids ] uncond_weights = [[1.0] * len(token) for token in uncond_tokens] # round up the longest length of tokens to a multiple of (model_max_length - 2) max_length = max([len(token) for token in prompt_tokens]) if uncond_prompt is not None: max_length = max(max_length, max([len(token) for token in uncond_tokens])) max_embeddings_multiples = min( max_embeddings_multiples, (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1, ) max_embeddings_multiples = max(1, max_embeddings_multiples) max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 # pad the length of tokens and weights bos = pipe.tokenizer.bos_token_id eos = pipe.tokenizer.eos_token_id pad = getattr(pipe.tokenizer, "pad_token_id", eos) prompt_tokens, prompt_weights = pad_tokens_and_weights( prompt_tokens, prompt_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) prompt_tokens = np.array(prompt_tokens, dtype=np.int32) if uncond_prompt is not None: uncond_tokens, uncond_weights = pad_tokens_and_weights( uncond_tokens, uncond_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) uncond_tokens = np.array(uncond_tokens, dtype=np.int32) # get the embeddings text_embeddings = get_unweighted_text_embeddings( pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, ) prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype) if uncond_prompt is not None: uncond_embeddings = get_unweighted_text_embeddings( pipe, uncond_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, ) uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype) # assign weights to the prompts and normalize in the sense of mean # TODO: should we normalize by chunk or in a whole (current implementation)? if (not skip_parsing) and (not skip_weighting): previous_mean = text_embeddings.mean(axis=(-2, -1)) text_embeddings *= prompt_weights[:, :, None] text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None] if uncond_prompt is not None: previous_mean = uncond_embeddings.mean(axis=(-2, -1)) uncond_embeddings *= uncond_weights[:, :, None] uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None] # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if uncond_prompt is not None: return text_embeddings, uncond_embeddings return text_embeddings def preprocess_image(image): w, h = image.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) return 2.0 * image - 1.0 def preprocess_mask(mask, scale_factor=8): mask = mask.convert("L") w, h = mask.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) mask = mask[None].transpose(0, 1, 2, 3) # what does this step do? mask = 1 - mask # repaint white, keep black return mask class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing weighting in prompt. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) """ if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"): def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: SchedulerMixin, safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, requires_safety_checker=requires_safety_checker, ) self.__init__additional__() else: def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: SchedulerMixin, safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, ): super().__init__( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.__init__additional__() def __init__additional__(self): self.unet.config.in_channels = 4 self.vae_scale_factor = 8 def _encode_prompt( self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, max_embeddings_multiples, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. """ batch_size = len(prompt) if isinstance(prompt, list) else 1 if negative_prompt is None: negative_prompt = [""] * batch_size elif isinstance(negative_prompt, str): negative_prompt = [negative_prompt] * batch_size if batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) text_embeddings, uncond_embeddings = get_weighted_text_embeddings( pipe=self, prompt=prompt, uncond_prompt=negative_prompt if do_classifier_free_guidance else None, max_embeddings_multiples=max_embeddings_multiples, ) text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0) if do_classifier_free_guidance: uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0) text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) return text_embeddings def check_inputs(self, prompt, height, width, strength, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def get_timesteps(self, num_inference_steps, strength, is_text2img): if is_text2img: return self.scheduler.timesteps, num_inference_steps else: # get the original timestep using init_timestep offset = self.scheduler.config.get("steps_offset", 0) init_timestep = int(num_inference_steps * strength) + offset init_timestep = min(init_timestep, num_inference_steps) t_start = max(num_inference_steps - init_timestep + offset, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def run_safety_checker(self, image): if self.safety_checker is not None: safety_checker_input = self.feature_extractor( self.numpy_to_pil(image), return_tensors="np" ).pixel_values.astype(image.dtype) # There will throw an error if use safety_checker directly and batchsize>1 images, has_nsfw_concept = [], [] for i in range(image.shape[0]): image_i, has_nsfw_concept_i = self.safety_checker( clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] ) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / 0.18215 * latents # image = self.vae_decoder(latent_sample=latents)[0] # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 image = np.concatenate( [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] ) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None): if image is None: shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") # scale the initial noise by the standard deviation required by the scheduler latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy() return latents, None, None else: init_latents = self.vae_encoder(sample=image)[0] init_latents = 0.18215 * init_latents init_latents = np.concatenate([init_latents] * batch_size, axis=0) init_latents_orig = init_latents shape = init_latents.shape # add noise to latents using the timesteps noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype) latents = self.scheduler.add_noise( torch.from_numpy(init_latents), torch.from_numpy(noise), timestep ).numpy() return latents, init_latents_orig, noise @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, image: Union[np.ndarray, PIL.Image.Image] = None, mask_image: Union[np.ndarray, PIL.Image.Image] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, strength: float = 0.8, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[np.ndarray] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, strength, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_embeddings = self._encode_prompt( prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, max_embeddings_multiples, ) dtype = text_embeddings.dtype # 4. Preprocess image and mask if isinstance(image, PIL.Image.Image): image = preprocess_image(image) if image is not None: image = image.astype(dtype) if isinstance(mask_image, PIL.Image.Image): mask_image = preprocess_mask(mask_image, self.vae_scale_factor) if mask_image is not None: mask = mask_image.astype(dtype) mask = np.concatenate([mask] * batch_size * num_images_per_prompt) else: mask = None # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps) timestep_dtype = next( (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" ) timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents, init_latents_orig, noise = self.prepare_latents( image, latent_timestep, batch_size * num_images_per_prompt, height, width, dtype, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.numpy() # predict the noise residual noise_pred = self.unet( sample=latent_model_input, timestep=np.array([t], dtype=timestep_dtype), encoder_hidden_states=text_embeddings, ) noise_pred = noise_pred[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 scheduler_output = self.scheduler.step( torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs ) latents = scheduler_output.prev_sample.numpy() if mask is not None: # masking init_latents_proper = self.scheduler.add_noise( torch.from_numpy(init_latents_orig), torch.from_numpy(noise), t, ).numpy() latents = (init_latents_proper * mask) + (latents * (1 - mask)) # call the callback, if provided if i % callback_steps == 0: if callback is not None: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if is_cancelled_callback is not None and is_cancelled_callback(): return None # 9. Post-processing image = self.decode_latents(latents) # 10. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image) # 11. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return image, has_nsfw_concept return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def text2img( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[np.ndarray] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function for text-to-image generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) def img2img( self, image: Union[np.ndarray, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function for image-to-image generation. Args: image (`np.ndarray` or `PIL.Image.Image`): `Image`, or ndarray representing an image batch, that will be used as the starting point for the process. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, ) def inpaint( self, image: Union[np.ndarray, PIL.Image.Image], mask_image: Union[np.ndarray, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function for inpaint. Args: image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. This is the image whose masked region will be inpainted. mask_image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` is 1, the denoising process will be run on the masked area for the full number of iterations specified in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. num_inference_steps (`int`, *optional*, defaults to 50): The reference number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`, as explained above. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, **kwargs, )
diffusers/examples/community/lpw_stable_diffusion_onnx.py/0
{ "file_path": "diffusers/examples/community/lpw_stable_diffusion_onnx.py", "repo_id": "diffusers", "token_count": 24244 }
135
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Based on [Style Aligned Image Generation via Shared Attention](https://huggingface.co/papers/2312.02133). # Authors: Amir Hertz, Andrey Voynov, Shlomi Fruchter, Daniel Cohen-Or # Project Page: https://style-aligned-gen.github.io/ # Code: https://github.com/google/style-aligned # # Adapted to Diffusers by [Aryan V S](https://github.com/a-r-r-o-w/). import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import ( Attention, AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from typing import List >>> import torch >>> from diffusers.pipelines.pipeline_utils import DiffusionPipeline >>> from PIL import Image >>> model_id = "a-r-r-o-w/dreamshaper-xl-turbo" >>> pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", custom_pipeline="pipeline_sdxl_style_aligned") >>> pipe = pipe.to("cuda") # Enable memory saving techniques >>> pipe.enable_vae_slicing() >>> pipe.enable_vae_tiling() >>> prompt = [ ... "a toy train. macro photo. 3d game asset", ... "a toy airplane. macro photo. 3d game asset", ... "a toy bicycle. macro photo. 3d game asset", ... "a toy car. macro photo. 3d game asset", ... ] >>> negative_prompt = "low quality, worst quality, " >>> # Enable StyleAligned >>> pipe.enable_style_aligned( ... share_group_norm=False, ... share_layer_norm=False, ... share_attention=True, ... adain_queries=True, ... adain_keys=True, ... adain_values=False, ... full_attention_share=False, ... shared_score_scale=1.0, ... shared_score_shift=0.0, ... only_self_level=0.0, >>> ) >>> # Run inference >>> images = pipe( ... prompt=prompt, ... negative_prompt=negative_prompt, ... guidance_scale=2, ... height=1024, ... width=1024, ... num_inference_steps=10, ... generator=torch.Generator().manual_seed(42), >>> ).images >>> # Disable StyleAligned if you do not wish to use it anymore >>> pipe.disable_style_aligned() ``` """ def expand_first(feat: torch.Tensor, scale: float = 1.0) -> torch.Tensor: b = feat.shape[0] feat_style = torch.stack((feat[0], feat[b // 2])).unsqueeze(1) if scale == 1: feat_style = feat_style.expand(2, b // 2, *feat.shape[1:]) else: feat_style = feat_style.repeat(1, b // 2, 1, 1, 1) feat_style = torch.cat([feat_style[:, :1], scale * feat_style[:, 1:]], dim=1) return feat_style.reshape(*feat.shape) def concat_first(feat: torch.Tensor, dim: int = 2, scale: float = 1.0) -> torch.Tensor: feat_style = expand_first(feat, scale=scale) return torch.cat((feat, feat_style), dim=dim) def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> Tuple[torch.Tensor, torch.Tensor]: feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt() feat_mean = feat.mean(dim=-2, keepdims=True) return feat_mean, feat_std def adain(feat: torch.Tensor) -> torch.Tensor: feat_mean, feat_std = calc_mean_std(feat) feat_style_mean = expand_first(feat_mean) feat_style_std = expand_first(feat_std) feat = (feat - feat_mean) / feat_std feat = feat * feat_style_std + feat_style_mean return feat def get_switch_vec(total_num_layers, level): if level == 0: return torch.zeros(total_num_layers, dtype=torch.bool) if level == 1: return torch.ones(total_num_layers, dtype=torch.bool) to_flip = level > 0.5 if to_flip: level = 1 - level num_switch = int(level * total_num_layers) vec = torch.arange(total_num_layers) vec = vec % (total_num_layers // num_switch) vec = vec == 0 if to_flip: vec = ~vec return vec class SharedAttentionProcessor(AttnProcessor2_0): def __init__( self, share_attention: bool = True, adain_queries: bool = True, adain_keys: bool = True, adain_values: bool = False, full_attention_share: bool = False, shared_score_scale: float = 1.0, shared_score_shift: float = 0.0, ): r"""Shared Attention Processor as proposed in the StyleAligned paper.""" super().__init__() self.share_attention = share_attention self.adain_queries = adain_queries self.adain_keys = adain_keys self.adain_values = adain_values self.full_attention_share = full_attention_share self.shared_score_scale = shared_score_scale self.shared_score_shift = shared_score_shift def shifted_scaled_dot_product_attention( self, attn: Attention, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor ) -> torch.Tensor: logits = torch.einsum("bhqd,bhkd->bhqk", query, key) * attn.scale logits[:, :, :, query.shape[2] :] += self.shared_score_shift probs = logits.softmax(-1) return torch.einsum("bhqk,bhkd->bhqd", probs, value) def shared_call( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ): residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if self.adain_queries: query = adain(query) if self.adain_keys: key = adain(key) if self.adain_values: value = adain(value) if self.share_attention: key = concat_first(key, -2, scale=self.shared_score_scale) value = concat_first(value, -2) if self.shared_score_shift != 0: hidden_states = self.shifted_scaled_dot_product_attention(attn, query, key, value) else: hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) else: hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ): if self.full_attention_share: b, n, d = hidden_states.shape k = 2 hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d) # hidden_states = einops.rearrange(hidden_states, "(k b) n d -> k (b n) d", k=2) hidden_states = super().__call__( attn, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **kwargs, ) hidden_states = hidden_states.view(k, b, n, d).permute(0, 1, 3, 2).contiguous().view(-1, n, d) # hidden_states = einops.rearrange(hidden_states, "k (b n) d -> (k b) n d", n=n) else: hidden_states = self.shared_call(attn, hidden_states, hidden_states, attention_mask, **kwargs) return hidden_states # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class StyleAlignedSDXLPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This pipeline also adds experimental support for [StyleAligned](https://huggingface.co/papers/2312.02133). It can be enabled/disabled using `.enable_style_aligned()` or `.disable_style_aligned()` respectively. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start def prepare_latents( self, image, mask, width, height, num_channels_latents, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True, latents=None, is_strength_max=True, return_noise=False, return_image_latents=False, ): batch_size *= num_images_per_prompt if image is None: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents elif mask is None: if not isinstance(image, (torch.Tensor, Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents else: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def _enable_shared_attention_processors( self, share_attention: bool, adain_queries: bool, adain_keys: bool, adain_values: bool, full_attention_share: bool, shared_score_scale: float, shared_score_shift: float, only_self_level: float, ): r"""Helper method to enable usage of Shared Attention Processor.""" attn_procs = {} num_self_layers = len([name for name in self.unet.attn_processors.keys() if "attn1" in name]) only_self_vec = get_switch_vec(num_self_layers, only_self_level) for i, name in enumerate(self.unet.attn_processors.keys()): is_self_attention = "attn1" in name if is_self_attention: if only_self_vec[i // 2]: attn_procs[name] = AttnProcessor2_0() else: attn_procs[name] = SharedAttentionProcessor( share_attention=share_attention, adain_queries=adain_queries, adain_keys=adain_keys, adain_values=adain_values, full_attention_share=full_attention_share, shared_score_scale=shared_score_scale, shared_score_shift=shared_score_shift, ) else: attn_procs[name] = AttnProcessor2_0() self.unet.set_attn_processor(attn_procs) def _disable_shared_attention_processors(self): r""" Helper method to disable usage of the Shared Attention Processor. All processors are reset to the default Attention Processor for pytorch versions above 2.0. """ attn_procs = {} for i, name in enumerate(self.unet.attn_processors.keys()): attn_procs[name] = AttnProcessor2_0() self.unet.set_attn_processor(attn_procs) def _register_shared_norm(self, share_group_norm: bool = True, share_layer_norm: bool = True): r"""Helper method to register shared group/layer normalization layers.""" def register_norm_forward(norm_layer: Union[nn.GroupNorm, nn.LayerNorm]) -> Union[nn.GroupNorm, nn.LayerNorm]: if not hasattr(norm_layer, "orig_forward"): setattr(norm_layer, "orig_forward", norm_layer.forward) orig_forward = norm_layer.orig_forward def forward_(hidden_states: torch.Tensor) -> torch.Tensor: n = hidden_states.shape[-2] hidden_states = concat_first(hidden_states, dim=-2) hidden_states = orig_forward(hidden_states) return hidden_states[..., :n, :] norm_layer.forward = forward_ return norm_layer def get_norm_layers(pipeline_, norm_layers_: Dict[str, List[Union[nn.GroupNorm, nn.LayerNorm]]]): if isinstance(pipeline_, nn.LayerNorm) and share_layer_norm: norm_layers_["layer"].append(pipeline_) if isinstance(pipeline_, nn.GroupNorm) and share_group_norm: norm_layers_["group"].append(pipeline_) else: for layer in pipeline_.children(): get_norm_layers(layer, norm_layers_) norm_layers = {"group": [], "layer": []} get_norm_layers(self.unet, norm_layers) norm_layers_list = [] for key in ["group", "layer"]: for layer in norm_layers[key]: norm_layers_list.append(register_norm_forward(layer)) return norm_layers_list @property def style_aligned_enabled(self): r"""Returns whether StyleAligned has been enabled in the pipeline or not.""" return hasattr(self, "_style_aligned_norm_layers") and self._style_aligned_norm_layers is not None def enable_style_aligned( self, share_group_norm: bool = True, share_layer_norm: bool = True, share_attention: bool = True, adain_queries: bool = True, adain_keys: bool = True, adain_values: bool = False, full_attention_share: bool = False, shared_score_scale: float = 1.0, shared_score_shift: float = 0.0, only_self_level: float = 0.0, ): r""" Enables the StyleAligned mechanism as in https://huggingface.co/papers/2312.02133. Args: share_group_norm (`bool`, defaults to `True`): Whether or not to use shared group normalization layers. share_layer_norm (`bool`, defaults to `True`): Whether or not to use shared layer normalization layers. share_attention (`bool`, defaults to `True`): Whether or not to use attention sharing between batch images. adain_queries (`bool`, defaults to `True`): Whether or not to apply the AdaIn operation on attention queries. adain_keys (`bool`, defaults to `True`): Whether or not to apply the AdaIn operation on attention keys. adain_values (`bool`, defaults to `False`): Whether or not to apply the AdaIn operation on attention values. full_attention_share (`bool`, defaults to `False`): Whether or not to use full attention sharing between all images in a batch. Can lead to content leakage within each batch and some loss in diversity. shared_score_scale (`float`, defaults to `1.0`): Scale for shared attention. """ self._style_aligned_norm_layers = self._register_shared_norm(share_group_norm, share_layer_norm) self._enable_shared_attention_processors( share_attention=share_attention, adain_queries=adain_queries, adain_keys=adain_keys, adain_values=adain_values, full_attention_share=full_attention_share, shared_score_scale=shared_score_scale, shared_score_shift=shared_score_shift, only_self_level=only_self_level, ) def disable_style_aligned(self): r"""Disables the StyleAligned mechanism if it had been previously enabled.""" if self.style_aligned_enabled: for layer in self._style_aligned_norm_layers: layer.forward = layer.orig_forward self._style_aligned_norm_layers = None self._disable_shared_attention_processors() # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.Tensor] = None, strength: float = 0.3, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, prompt_2=prompt_2, height=height, width=width, callback_steps=callback_steps, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 4. Preprocess image and mask_image if image is not None: image = self.image_processor.preprocess(image, height=height, width=width) image = image.to(device=self.device, dtype=prompt_embeds.dtype) if mask_image is not None: mask = self.mask_processor.preprocess(mask_image, height=height, width=width) mask = mask.to(device=self.device, dtype=prompt_embeds.dtype) if masked_image_latents is not None: masked_image = masked_image_latents elif image.shape[1] == 4: # if image is in latent space, we can't mask it masked_image = None else: masked_image = image * (mask < 0.5) else: mask = None # 4. Prepare timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) if image is not None: timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 add_noise = True if self.denoising_start is None else False # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 latents = self.prepare_latents( image=image, mask=mask, width=width, height=height, num_channels_latents=num_channels_latents, timestep=latent_timestep, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, dtype=prompt_embeds.dtype, device=device, generator=generator, add_noise=add_noise, latents=latents, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents, ) if mask is not None: if return_image_latents: latents, noise, image_latents = latents else: latents, noise = latents mask, masked_image_latents = self.prepare_mask_latents( mask=mask, masked_image=masked_image, batch_size=batch_size * num_images_per_prompt, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, do_classifier_free_guidance=self.do_classifier_free_guidance, ) # Check that sizes of mask, masked image and latents match if num_channels_unet == 9: # default case for runwayml/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) elif num_channels_unet != 4: raise ValueError( f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) image_embeds = image_embeds.to(device) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 8.1 Apply denoising_end if ( self.denoising_end is not None and isinstance(self.denoising_end, float) and self.denoising_end > 0 and self.denoising_end < 1 ): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 9. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None: added_cond_kwargs["image_embeds"] = image_embeds noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if mask is not None and num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/pipeline_sdxl_style_aligned.py/0
{ "file_path": "diffusers/examples/community/pipeline_sdxl_style_aligned.py", "repo_id": "diffusers", "token_count": 42215 }
136
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import T5EncoderModel, T5TokenizerFast from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.loaders import FromSingleFileMixin, LTXVideoLoraLoaderMixin from diffusers.models.autoencoders import AutoencoderKLLTXVideo from diffusers.models.transformers import LTXVideoTransformer3DModel from diffusers.pipelines.ltx.pipeline_output import LTXPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers.utils import export_to_video >>> from examples.community.pipeline_stg_ltx import LTXSTGPipeline >>> pipe = LTXSTGPipeline.from_pretrained("Lightricks/LTX-Video", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = "A woman with light skin, wearing a blue jacket and a black hat with a veil, looks down and to her right, then back up as she speaks; she has brown hair styled in an updo, light brown eyebrows, and is wearing a white collared shirt under her jacket; the camera remains stationary on her face as she speaks; the background is out of focus, but shows trees and people in period clothing; the scene is captured in real-life footage." >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" >>> # Configure STG mode options >>> stg_applied_layers_idx = [19] # Layer indices from 0 to 41 >>> stg_scale = 1.0 # Set 0.0 for CFG >>> do_rescaling = False >>> video = pipe( ... prompt=prompt, ... negative_prompt=negative_prompt, ... width=704, ... height=480, ... num_frames=161, ... num_inference_steps=50, ... stg_applied_layers_idx=stg_applied_layers_idx, ... stg_scale=stg_scale, ... do_rescaling=do_rescaling, >>> ).frames[0] >>> export_to_video(video, "output.mp4", fps=24) ``` """ def forward_with_stg( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states_ptb = hidden_states[2:] encoder_hidden_states_ptb = encoder_hidden_states[2:] batch_size = hidden_states.size(0) norm_hidden_states = self.norm1(hidden_states) num_ada_params = self.scale_shift_table.shape[0] ada_values = self.scale_shift_table[None, None] + temb.reshape(batch_size, temb.size(1), num_ada_params, -1) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ada_values.unbind(dim=2) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa attn_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=None, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + attn_hidden_states * gate_msa attn_hidden_states = self.attn2( hidden_states, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=None, attention_mask=encoder_attention_mask, ) hidden_states = hidden_states + attn_hidden_states norm_hidden_states = self.norm2(hidden_states) * (1 + scale_mlp) + shift_mlp ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + ff_output * gate_mlp hidden_states[2:] = hidden_states_ptb encoder_hidden_states[2:] = encoder_hidden_states_ptb return hidden_states # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.16, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class LTXSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin): r""" Pipeline for text-to-video generation. Reference: https://github.com/Lightricks/LTX-Video Args: transformer ([`LTXVideoTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLLTXVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLLTXVideo, text_encoder: T5EncoderModel, tokenizer: T5TokenizerFast, transformer: LTXVideoTransformer3DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.transformer_spatial_patch_size = ( self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128 ) def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.mochi.pipeline_mochi.MochiPipeline.encode_prompt with 256->128 def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) @staticmethod def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor: # Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p]. # The patch dimensions are then permuted and collapsed into the channel dimension of shape: # [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor). # dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features batch_size, num_channels, num_frames, height, width = latents.shape post_patch_num_frames = num_frames // patch_size_t post_patch_height = height // patch_size post_patch_width = width // patch_size latents = latents.reshape( batch_size, -1, post_patch_num_frames, patch_size_t, post_patch_height, patch_size, post_patch_width, patch_size, ) latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) return latents @staticmethod def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents @staticmethod def _normalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Normalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = (latents - latents_mean) * scaling_factor / latents_std return latents @staticmethod def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents def prepare_latents( self, batch_size: int = 1, num_channels_latents: int = 128, height: int = 512, width: int = 704, num_frames: int = 161, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) height = height // self.vae_spatial_compression_ratio width = width // self.vae_spatial_compression_ratio num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 shape = (batch_size, num_channels_latents, num_frames, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def do_spatio_temporal_guidance(self): return self._stg_scale > 0.0 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 704, num_frames: int = 161, frame_rate: int = 25, num_inference_steps: int = 50, timesteps: List[int] = None, guidance_scale: float = 3, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, decode_timestep: Union[float, List[float]] = 0.0, decode_noise_scale: Optional[Union[float, List[float]]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 128, stg_applied_layers_idx: Optional[List[int]] = [19], stg_scale: Optional[float] = 1.0, do_rescaling: Optional[bool] = False, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, defaults to `512`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, defaults to `704`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, defaults to `161`): The number of video frames to generate num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, defaults to `3 `): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. decode_timestep (`float`, defaults to `0.0`): The timestep at which generated video is decoded. decode_noise_scale (`float`, defaults to `None`): The interpolation factor between random noise and denoised latents at the decode timestep. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `128 `): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ltx.LTXPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._stg_scale = stg_scale self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False if self.do_spatio_temporal_guidance: for i in stg_applied_layers_idx: self.transformer.transformer_blocks[i].forward = types.MethodType( forward_with_stg, self.transformer.transformer_blocks[i] ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Prepare text embeddings ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat( [negative_prompt_attention_mask, prompt_attention_mask, prompt_attention_mask], dim=0 ) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, ) # 5. Prepare timesteps latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio video_sequence_length = latent_num_frames * latent_height * latent_width sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) mu = calculate_shift( video_sequence_length, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.16), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Prepare micro-conditions latent_frame_rate = frame_rate / self.vae_temporal_compression_ratio rope_interpolation_scale = ( 1 / latent_frame_rate, self.vae_spatial_compression_ratio, self.vae_spatial_compression_ratio, ) # 7. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 2) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 3) else: latent_model_input = latents latent_model_input = latent_model_input.to(prompt_embeds.dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, encoder_attention_mask=prompt_attention_mask, num_frames=latent_num_frames, height=latent_height, width=latent_width, rope_interpolation_scale=rope_interpolation_scale, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred.float() if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text, noise_pred_perturb = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + self._stg_scale * (noise_pred_text - noise_pred_perturb) ) if do_rescaling: rescaling_scale = 0.7 factor = noise_pred_text.std() / noise_pred.std() factor = rescaling_scale * factor + (1 - rescaling_scale) noise_pred = noise_pred * factor # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": video = latents else: latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) latents = latents.to(prompt_embeds.dtype) if not self.vae.config.timestep_conditioning: timestep = None else: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) if not isinstance(decode_timestep, list): decode_timestep = [decode_timestep] * batch_size if decode_noise_scale is None: decode_noise_scale = decode_timestep elif not isinstance(decode_noise_scale, list): decode_noise_scale = [decode_noise_scale] * batch_size timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype) decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[ :, None, None, None, None ] latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise video = self.vae.decode(latents, timestep, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LTXPipelineOutput(frames=video)
diffusers/examples/community/pipeline_stg_ltx.py/0
{ "file_path": "diffusers/examples/community/pipeline_stg_ltx.py", "repo_id": "diffusers", "token_count": 18853 }
137
# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, ControlNetModel, UNet2DConditionModel, logging from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( PIL_INTERPOLATION, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import numpy as np >>> import torch >>> from PIL import Image >>> from stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation >>> from diffusers import ControlNetModel, UniPCMultistepScheduler >>> from diffusers.utils import load_image >>> def ade_palette(): return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255]] >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small") >>> image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small") >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16) >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) >>> pipe.enable_xformers_memory_efficient_attention() >>> pipe.enable_model_cpu_offload() >>> def image_to_seg(image): pixel_values = image_processor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = image_segmentor(pixel_values) seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3 palette = np.array(ade_palette()) for label, color in enumerate(palette): color_seg[seg == label, :] = color color_seg = color_seg.astype(np.uint8) seg_image = Image.fromarray(color_seg) return seg_image >>> image = load_image( "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" ) >>> mask_image = load_image( "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" ) >>> controlnet_conditioning_image = image_to_seg(image) >>> image = pipe( "Face of a yellow cat, high resolution, sitting on a park bench", image, mask_image, controlnet_conditioning_image, num_inference_steps=20, ).images[0] >>> image.save("out.png") ``` """ def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image def prepare_mask_image(mask_image): if isinstance(mask_image, torch.Tensor): if mask_image.ndim == 2: # Batch and add channel dim for single mask mask_image = mask_image.unsqueeze(0).unsqueeze(0) elif mask_image.ndim == 3 and mask_image.shape[0] == 1: # Single mask, the 0'th dimension is considered to be # the existing batch size of 1 mask_image = mask_image.unsqueeze(0) elif mask_image.ndim == 3 and mask_image.shape[0] != 1: # Batch of mask, the 0'th dimension is considered to be # the batching dimension mask_image = mask_image.unsqueeze(1) # Binarize mask mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 else: # preprocess mask if isinstance(mask_image, (PIL.Image.Image, np.ndarray)): mask_image = [mask_image] if isinstance(mask_image, list) and isinstance(mask_image[0], PIL.Image.Image): mask_image = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask_image], axis=0) mask_image = mask_image.astype(np.float32) / 255.0 elif isinstance(mask_image, list) and isinstance(mask_image[0], np.ndarray): mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 mask_image = torch.from_numpy(mask_image) return mask_image def prepare_controlnet_conditioning_image( controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance, ): if not isinstance(controlnet_conditioning_image, torch.Tensor): if isinstance(controlnet_conditioning_image, PIL.Image.Image): controlnet_conditioning_image = [controlnet_conditioning_image] if isinstance(controlnet_conditioning_image[0], PIL.Image.Image): controlnet_conditioning_image = [ np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in controlnet_conditioning_image ] controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0) controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0 controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2) controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image) elif isinstance(controlnet_conditioning_image[0], torch.Tensor): controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0) image_batch_size = controlnet_conditioning_image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0) controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype) if do_classifier_free_guidance: controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2) return controlnet_conditioning_image class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline, StableDiffusionMixin): """ Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list: raise TypeError( "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors" ) if image_is_pil: image_batch_size = 1 elif image_is_tensor: image_batch_size = image.shape[0] elif image_is_pil_list: image_batch_size = len(image) elif image_is_tensor_list: image_batch_size = len(image) else: raise ValueError("controlnet condition image is not valid") if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] else: raise ValueError("prompt or prompt_embeds are not valid") if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) def check_inputs( self, prompt, image, mask_image, controlnet_conditioning_image, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, controlnet_conditioning_scale=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # check controlnet condition image if isinstance(self.controlnet, ControlNetModel): self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel): if not isinstance(controlnet_conditioning_image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") if len(controlnet_conditioning_image) != len(self.controlnet.nets): raise ValueError( "For multiple controlnets: `image` must have the same length as the number of controlnets." ) for image_ in controlnet_conditioning_image: self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if isinstance(self.controlnet, ControlNetModel): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif isinstance(self.controlnet, MultiControlNetModel): if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor): raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor") if isinstance(image, PIL.Image.Image) and not isinstance(mask_image, PIL.Image.Image): raise TypeError("if `image` is a PIL image, `mask_image` must also be a PIL image") if isinstance(image, torch.Tensor): if image.ndim != 3 and image.ndim != 4: raise ValueError("`image` must have 3 or 4 dimensions") if mask_image.ndim != 2 and mask_image.ndim != 3 and mask_image.ndim != 4: raise ValueError("`mask_image` must have 2, 3, or 4 dimensions") if image.ndim == 3: image_batch_size = 1 image_channels, image_height, image_width = image.shape elif image.ndim == 4: image_batch_size, image_channels, image_height, image_width = image.shape else: assert False if mask_image.ndim == 2: mask_image_batch_size = 1 mask_image_channels = 1 mask_image_height, mask_image_width = mask_image.shape elif mask_image.ndim == 3: mask_image_channels = 1 mask_image_batch_size, mask_image_height, mask_image_width = mask_image.shape elif mask_image.ndim == 4: mask_image_batch_size, mask_image_channels, mask_image_height, mask_image_width = mask_image.shape if image_channels != 3: raise ValueError("`image` must have 3 channels") if mask_image_channels != 1: raise ValueError("`mask_image` must have 1 channel") if image_batch_size != mask_image_batch_size: raise ValueError("`image` and `mask_image` mush have the same batch sizes") if image_height != mask_image_height or image_width != mask_image_width: raise ValueError("`image` and `mask_image` must have the same height and width dimensions") if image.min() < -1 or image.max() > 1: raise ValueError("`image` should be in range [-1, 1]") if mask_image.min() < 0 or mask_image.max() > 1: raise ValueError("`mask_image` should be in range [0, 1]") else: mask_image_channels = 1 image_channels = 3 single_image_latent_channels = self.vae.config.latent_channels total_latent_channels = single_image_latent_channels * 2 + mask_image_channels if total_latent_channels != self.unet.config.in_channels: raise ValueError( f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received" f" non inpainting latent channels: {single_image_latent_channels}," f" mask channels: {mask_image_channels}, and masked image channels: {single_image_latent_channels}." f" Please verify the config of `pipeline.unet` and the `mask_image` and `image` inputs." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_mask_latents(self, mask_image, batch_size, height, width, dtype, device, do_classifier_free_guidance): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask_image = F.interpolate(mask_image, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask_image = mask_image.to(device=device, dtype=dtype) # duplicate mask for each generation per prompt, using mps friendly method if mask_image.shape[0] < batch_size: if not batch_size % mask_image.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask_image.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1) mask_image = torch.cat([mask_image] * 2) if do_classifier_free_guidance else mask_image mask_image_latents = mask_image return mask_image_latents def prepare_masked_image_latents( self, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): masked_image = masked_image.to(device=device, dtype=dtype) # encode the mask image into latents space so we can concatenate it to the latents if isinstance(generator, list): masked_image_latents = [ self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i]) for i in range(batch_size) ] masked_image_latents = torch.cat(masked_image_latents, dim=0) else: masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator) masked_image_latents = self.vae.config.scaling_factor * masked_image_latents # duplicate masked_image_latents for each generation per prompt, using mps friendly method if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return masked_image_latents def _default_height_width(self, height, width, image): if isinstance(image, list): image = image[0] if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[3] height = (height // 8) * 8 # round down to nearest multiple of 8 if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[2] width = (width // 8) * 8 # round down to nearest multiple of 8 return height, width @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, mask_image: Union[torch.Tensor, PIL.Image.Image] = None, controlnet_conditioning_image: Union[ torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image] ] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. mask_image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. controlnet_conditioning_image (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]` or `List[PIL.Image.Image]`): The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. PIL.Image.Image` can also be accepted as an image. The control image is automatically resized to fit the output image. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height, width = self._default_height_width(height, width, controlnet_conditioning_image) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, image, mask_image, controlnet_conditioning_image, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, controlnet_conditioning_scale, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets) # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare mask, image, and controlnet_conditioning_image image = prepare_image(image) mask_image = prepare_mask_image(mask_image) # condition image(s) if isinstance(self.controlnet, ControlNetModel): controlnet_conditioning_image = prepare_controlnet_conditioning_image( controlnet_conditioning_image=controlnet_conditioning_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) elif isinstance(self.controlnet, MultiControlNetModel): controlnet_conditioning_images = [] for image_ in controlnet_conditioning_image: image_ = prepare_controlnet_conditioning_image( controlnet_conditioning_image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_conditioning_images.append(image_) controlnet_conditioning_image = controlnet_conditioning_images else: assert False masked_image = image * (mask_image < 0.5) # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) mask_image_latents = self.prepare_mask_latents( mask_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, do_classifier_free_guidance, ) masked_image_latents = self.prepare_masked_image_latents( masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, do_classifier_free_guidance, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance non_inpainting_latent_model_input = ( torch.cat([latents] * 2) if do_classifier_free_guidance else latents ) non_inpainting_latent_model_input = self.scheduler.scale_model_input( non_inpainting_latent_model_input, t ) inpainting_latent_model_input = torch.cat( [non_inpainting_latent_model_input, mask_image_latents, masked_image_latents], dim=1 ) down_block_res_samples, mid_block_res_sample = self.controlnet( non_inpainting_latent_model_input, t, encoder_hidden_states=prompt_embeds, controlnet_cond=controlnet_conditioning_image, conditioning_scale=controlnet_conditioning_scale, return_dict=False, ) # predict the noise residual noise_pred = self.unet( inpainting_latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if output_type == "latent": image = latents has_nsfw_concept = None elif output_type == "pil": # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # 10. Convert to PIL image = self.numpy_to_pil(image) else: # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/stable_diffusion_controlnet_inpaint.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_controlnet_inpaint.py", "repo_id": "diffusers", "token_count": 23771 }
138
import inspect from typing import List, Optional, Tuple, Union import torch from torch.nn import functional as F from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from diffusers import ( DiffusionPipeline, ImagePipelineOutput, PriorTransformer, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel, ) from diffusers.pipelines.unclip import UnCLIPTextProjModel from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def slerp(val, low, high): """ Find the interpolation point between the 'low' and 'high' values for the given 'val'. See https://en.wikipedia.org/wiki/Slerp for more details on the topic. """ low_norm = low / torch.norm(low) high_norm = high / torch.norm(high) omega = torch.acos((low_norm * high_norm)) so = torch.sin(omega) res = (torch.sin((1.0 - val) * omega) / so) * low + (torch.sin(val * omega) / so) * high return res class UnCLIPTextInterpolationPipeline(DiffusionPipeline): """ Pipeline for prompt-to-prompt interpolation on CLIP text embeddings and using the UnCLIP / Dall-E to decode them to images. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): The decoder to invert the image embedding into an image. super_res_first ([`UNet2DModel`]): Super resolution unet. Used in all but the last step of the super resolution diffusion process. super_res_last ([`UNet2DModel`]): Super resolution unet. Used in the last step of the super resolution diffusion process. prior_scheduler ([`UnCLIPScheduler`]): Scheduler used in the prior denoising process. Just a modified DDPMScheduler. decoder_scheduler ([`UnCLIPScheduler`]): Scheduler used in the decoder denoising process. Just a modified DDPMScheduler. super_res_scheduler ([`UnCLIPScheduler`]): Scheduler used in the super resolution denoising process. Just a modified DDPMScheduler. """ prior: PriorTransformer decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer super_res_first: UNet2DModel super_res_last: UNet2DModel prior_scheduler: UnCLIPScheduler decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.__init__ def __init__( self, prior: PriorTransformer, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, super_res_first: UNet2DModel, super_res_last: UNet2DModel, prior_scheduler: UnCLIPScheduler, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler, ): super().__init__() self.register_modules( prior=prior, decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, super_res_first=super_res_first, super_res_last=super_res_last, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_encoder_hidden_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask @torch.no_grad() def __call__( self, start_prompt: str, end_prompt: str, steps: int = 5, prior_num_inference_steps: int = 25, decoder_num_inference_steps: int = 25, super_res_num_inference_steps: int = 7, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, enable_sequential_cpu_offload=True, gpu_id=0, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: start_prompt (`str`): The prompt to start the image generation interpolation from. end_prompt (`str`): The prompt to end the image generation interpolation at. steps (`int`, *optional*, defaults to 5): The number of steps over which to interpolate from start_prompt to end_prompt. The pipeline returns the same number of images as this value. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the prior. More denoising steps usually lead to a higher quality image at the expense of slower inference. decoder_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality image at the expense of slower inference. super_res_num_inference_steps (`int`, *optional*, defaults to 7): The number of denoising steps for super resolution. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. decoder_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. enable_sequential_cpu_offload (`bool`, *optional*, defaults to `True`): If True, offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. gpu_id (`int`, *optional*, defaults to `0`): The gpu_id to be passed to enable_sequential_cpu_offload. Only works when enable_sequential_cpu_offload is set to True. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. """ if not isinstance(start_prompt, str) or not isinstance(end_prompt, str): raise ValueError( f"`start_prompt` and `end_prompt` should be of type `str` but got {type(start_prompt)} and" f" {type(end_prompt)} instead" ) if enable_sequential_cpu_offload: self.enable_sequential_cpu_offload(gpu_id=gpu_id) device = self._execution_device # Turn the prompts into embeddings. inputs = self.tokenizer( [start_prompt, end_prompt], padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ) inputs.to(device) text_model_output = self.text_encoder(**inputs) text_attention_mask = torch.max(inputs.attention_mask[0], inputs.attention_mask[1]) text_attention_mask = torch.cat([text_attention_mask.unsqueeze(0)] * steps).to(device) # Interpolate from the start to end prompt using slerp and add the generated images to an image output pipeline batch_text_embeds = [] batch_last_hidden_state = [] for interp_val in torch.linspace(0, 1, steps): text_embeds = slerp(interp_val, text_model_output.text_embeds[0], text_model_output.text_embeds[1]) last_hidden_state = slerp( interp_val, text_model_output.last_hidden_state[0], text_model_output.last_hidden_state[1] ) batch_text_embeds.append(text_embeds.unsqueeze(0)) batch_last_hidden_state.append(last_hidden_state.unsqueeze(0)) batch_text_embeds = torch.cat(batch_text_embeds) batch_last_hidden_state = torch.cat(batch_last_hidden_state) text_model_output = CLIPTextModelOutput( text_embeds=batch_text_embeds, last_hidden_state=batch_last_hidden_state ) batch_size = text_model_output[0].shape[0] do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( prompt=None, device=device, num_images_per_prompt=1, do_classifier_free_guidance=do_classifier_free_guidance, text_model_output=text_model_output, text_attention_mask=text_attention_mask, ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prompt_embeds.dtype, device, generator, None, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents # done prior # decoder text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_encoder_hidden_states, do_classifier_free_guidance=do_classifier_free_guidance, ) if device.type == "mps": # HACK: MPS: There is a panic when padding bool tensors, # so cast to int tensor for the pad and back to bool afterwards text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, None, self.decoder_scheduler, ) for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 decoder_latents = self.decoder_scheduler.step( noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents # done decoder # super res self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size super_res_latents = self.prepare_latents( (batch_size, channels, height, width), image_small.dtype, device, generator, None, self.super_res_scheduler, ) if device.type == "mps": # MPS does not support many interpolations image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if "antialias" in inspect.signature(F.interpolate).parameters: interpolate_antialias["antialias"] = True image_upscaled = F.interpolate( image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias ) for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): # no classifier free guidance if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet( sample=latent_model_input, timestep=t, ).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 super_res_latents = self.super_res_scheduler.step( noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample image = super_res_latents # done super res # post processing image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/community/unclip_text_interpolation.py/0
{ "file_path": "diffusers/examples/community/unclip_text_interpolation.py", "repo_id": "diffusers", "token_count": 10699 }
139
# DreamBooth training example [DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. The `train_dreambooth.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Install the requirements in the `examples/dreambooth` folder as shown below. ```bash cd examples/dreambooth pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Dog toy example Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example. Let's first download it locally: ```python from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` And launch the training using: **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 \ --push_to_hub ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Training on a 16GB GPU: With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. To install `bitsandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Training on a 12GB GPU: It is possible to run dreambooth on a 12GB GPU by using the following optimizations: - [gradient checkpointing and the 8-bit optimizer](#training-on-a-16gb-gpu) - [xformers](#training-with-xformers) - [setting grads to none](#set-grads-to-none) ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Training on a 8 GB GPU: By using [DeepSpeed](https://www.deepspeed.ai/) it's possible to offload some tensors from VRAM to either CPU or NVME allowing to train with less VRAM. DeepSpeed needs to be enabled with `accelerate config`. During configuration answer yes to "Do you want to use DeepSpeed?". With DeepSpeed stage 2, fp16 mixed precision and offloading both parameters and optimizer state to cpu it's possible to train on under 8 GB VRAM with a drawback of requiring significantly more RAM (about 25 GB). See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. Changing the default Adam optimizer to DeepSpeed's special version of Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but enabling it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer does not seem to be compatible with DeepSpeed at the moment. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch --mixed_precision="fp16" train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --sample_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Fine-tune text encoder with the UNet. The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam \ --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --push_to_hub ``` ### Using DreamBooth for pipelines other than Stable Diffusion The [AltDiffusion pipeline](https://huggingface.co/docs/diffusers/api/pipelines/alt_diffusion) also supports dreambooth fine-tuning. The process is the same as above, all you need to do is replace the `MODEL_NAME` like this: ``` export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion-m9" or export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion" ``` ### Inference Once you have trained a model using the above command, you can run inference simply using the `StableDiffusionPipeline`. Make sure to include the `identifier` (e.g. sks in above example) in your prompt. ```python from diffusers import StableDiffusionPipeline import torch model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ``` ### Inference from a training checkpoint You can also perform inference from one of the checkpoints saved during the training process, if you used the `--checkpointing_steps` argument. Please, refer to [the documentation](https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint) to see how to do it. ## Training with Low-Rank Adaptation of Large Language Models (LoRA) Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen* In a nutshell, LoRA allows to adapt pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114) - Rank-decomposition matrices have significantly fewer parameters than the original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted towards new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. ### Training Let's get started with a simple example. We will re-use the dog example of the [previous section](#dog-toy-example). First, you need to set-up your dreambooth training example as is explained in the [installation section](#Installing-the-dependencies). Next, let's download the dog dataset. Download images from [here](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ) and save them in a directory. Make sure to set `INSTANCE_DIR` to the name of your directory further below. This will be our training data. Now, you can launch the training. Here we will use [Stable Diffusion 1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5). **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** **___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [wandb](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training and pass `--report_to="wandb"` to automatically log images.___** ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export INSTANCE_DIR="dog" export OUTPUT_DIR="path-to-save-model" ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag. ```bash hf auth login ``` Now we can start training! ```bash accelerate launch train_dreambooth_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --checkpointing_steps=100 \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=50 \ --seed="0" \ --push_to_hub ``` **___Note: When using LoRA we can use a much higher learning rate compared to vanilla dreambooth. Here we use *1e-4* instead of the usual *2e-6*.___** The final LoRA embedding weights have been uploaded to [patrickvonplaten/lora_dreambooth_dog_example](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example). **___Note: [The final weights](https://huggingface.co/patrickvonplaten/lora/blob/main/pytorch_attn_procs.bin) are only 3 MB in size which is orders of magnitudes smaller than the original model.** The training results are summarized [here](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5). You can use the `Step` slider to see how the model learned the features of our subject while the model trained. Optionally, we can also train additional LoRA layers for the text encoder. Specify the `--train_text_encoder` argument above for that. If you're interested to know more about how we enable this support, check out this [PR](https://github.com/huggingface/diffusers/pull/2918). With the default hyperparameters from the above, the training seems to go in a positive direction. Check out [this panel](https://wandb.ai/sayakpaul/dreambooth-lora/reports/test-23-04-17-17-00-13---Vmlldzo0MDkwNjMy). The trained LoRA layers are available [here](https://huggingface.co/sayakpaul/dreambooth). ### Inference After training, LoRA weights can be loaded very easily into the original pipeline. First, you need to load the original pipeline: ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("base-model-name").to("cuda") ``` Next, we can load the adapter layers into the pipeline with the [`load_lora_weights` function](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters#lora). ```python pipe.load_lora_weights("path-to-the-lora-checkpoint") ``` Finally, we can run the model in inference. ```python image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] ``` If you are loading the LoRA parameters from the Hub and if the Hub repository has a `base_model` tag (such as [this](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example/blob/main/README.md?code=true#L4)), then you can do: ```py from huggingface_hub.repocard import RepoCard lora_model_id = "patrickvonplaten/lora_dreambooth_dog_example" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) ... ``` If you used `--train_text_encoder` during training, then use `pipe.load_lora_weights()` to load the LoRA weights. For example: ```python from huggingface_hub.repocard import RepoCard from diffusers import StableDiffusionPipeline import torch lora_model_id = "sayakpaul/dreambooth-text-encoder-test" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.load_lora_weights(lora_model_id) image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] ``` Note that the use of [`StableDiffusionLoraLoaderMixin.load_lora_weights`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights) is preferred to [`UNet2DConditionLoadersMixin.load_attn_procs`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs) for loading LoRA parameters. This is because `StableDiffusionLoraLoaderMixin.load_lora_weights` can handle the following situations: * LoRA parameters that don't have separate identifiers for the UNet and the text encoder (such as [`"patrickvonplaten/lora_dreambooth_dog_example"`](https://huggingface.co/patrickvonplaten/lora_dreambooth_dog_example)). So, you can just do: ```py pipe.load_lora_weights(lora_model_path) ``` * LoRA parameters that have separate identifiers for the UNet and the text encoder such as: [`"sayakpaul/dreambooth"`](https://huggingface.co/sayakpaul/dreambooth). ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. ____Note: The flax example don't yet support features like gradient checkpoint, gradient accumulation etc, so to use flax for faster training we will need >30GB cards.___ Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -U -r requirements_flax.txt ``` ### Training without prior preservation loss ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="dog" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --max_train_steps=400 ``` ### Training with prior preservation loss ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Fine-tune text encoder with the UNet. ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="dog" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=2e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation. You can also use Dreambooth to train the specialized in-painting model. See [the script in the research folder for details](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/dreambooth_inpaint). ### Set grads to none To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument. More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html ### Experimental results You can refer to [this blog post](https://huggingface.co/blog/dreambooth) that discusses some of DreamBooth experiments in detail. Specifically, it recommends a set of DreamBooth-specific tips and tricks that we have found to work well for a variety of subjects. ## IF You can use the lora and full dreambooth scripts to train the text to image [IF model](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0) and the stage II upscaler [IF model](https://huggingface.co/DeepFloyd/IF-II-L-v1.0). Note that IF has a predicted variance, and our finetuning scripts only train the models predicted error, so for finetuned IF models we switch to a fixed variance schedule. The full finetuning scripts will update the scheduler config for the full saved model. However, when loading saved LoRA weights, you must also update the pipeline's scheduler config. ```py from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0") pipe.load_lora_weights("<lora weights path>") # Update scheduler config to fixed variance schedule pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small") ``` Additionally, a few alternative cli flags are needed for IF. `--resolution=64`: IF is a pixel space diffusion model. In order to operate on un-compressed pixels, the input images are of a much smaller resolution. `--pre_compute_text_embeddings`: IF uses [T5](https://huggingface.co/docs/transformers/model_doc/t5) for its text encoder. In order to save GPU memory, we pre compute all text embeddings and then de-allocate T5. `--tokenizer_max_length=77`: T5 has a longer default text length, but the default IF encoding procedure uses a smaller number. `--text_encoder_use_attention_mask`: T5 passes the attention mask to the text encoder. ### Tips and Tricks We find LoRA to be sufficient for finetuning the stage I model as the low resolution of the model makes representing finegrained detail hard regardless. For common and/or not-visually complex object concepts, you can get away with not-finetuning the upscaler. Just be sure to adjust the prompt passed to the upscaler to remove the new token from the instance prompt. I.e. if your stage I prompt is "a sks dog", use "a dog" for your stage II prompt. For finegrained detail like faces that aren't present in the original training set, we find that full finetuning of the stage II upscaler is better than LoRA finetuning stage II. For finegrained detail like faces, we find that lower learning rates along with larger batch sizes work best. For stage II, we find that lower learning rates are also needed. We found experimentally that the DDPM scheduler with the default larger number of denoising steps to sometimes work better than the DPM Solver scheduler used in the training scripts. ### Stage II additional validation images The stage II validation requires images to upscale, we can download a downsized version of the training set: ```py from huggingface_hub import snapshot_download local_dir = "./dog_downsized" snapshot_download( "diffusers/dog-example-downsized", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` ### IF stage I LoRA Dreambooth This training configuration requires ~28 GB VRAM. ```sh export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_dog_lora" accelerate launch train_dreambooth_lora.py \ --report_to wandb \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a sks dog" \ --resolution=64 \ --train_batch_size=4 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --scale_lr \ --max_train_steps=1200 \ --validation_prompt="a sks dog" \ --validation_epochs=25 \ --checkpointing_steps=100 \ --pre_compute_text_embeddings \ --tokenizer_max_length=77 \ --text_encoder_use_attention_mask ``` ### IF stage II LoRA Dreambooth `--validation_images`: These images are upscaled during validation steps. `--class_labels_conditioning=timesteps`: Pass additional conditioning to the UNet needed for stage II. `--learning_rate=1e-6`: Lower learning rate than stage I. `--resolution=256`: The upscaler expects higher resolution inputs ```sh export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_dog_upscale" export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" python train_dreambooth_lora.py \ --report_to wandb \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a sks dog" \ --resolution=256 \ --train_batch_size=4 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-6 \ --max_train_steps=2000 \ --validation_prompt="a sks dog" \ --validation_epochs=100 \ --checkpointing_steps=500 \ --pre_compute_text_embeddings \ --tokenizer_max_length=77 \ --text_encoder_use_attention_mask \ --validation_images $VALIDATION_IMAGES \ --class_labels_conditioning=timesteps ``` ### IF Stage I Full Dreambooth `--skip_save_text_encoder`: When training the full model, this will skip saving the entire T5 with the finetuned model. You can still load the pipeline with a T5 loaded from the original model. `use_8bit_adam`: Due to the size of the optimizer states, we recommend training the full XL IF model with 8bit adam. `--learning_rate=1e-7`: For full dreambooth, IF requires very low learning rates. With higher learning rates model quality will degrade. Note that it is likely the learning rate can be increased with larger batch sizes. Using 8bit adam and a batch size of 4, the model can be trained in ~48 GB VRAM. `--validation_scheduler`: Set a particular scheduler via a string. We found that it is better to use the DDPMScheduler for validation when training DeepFloyd IF. ```sh export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_if" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=64 \ --train_batch_size=4 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-7 \ --max_train_steps=150 \ --validation_prompt "a photo of sks dog" \ --validation_steps 25 \ --text_encoder_use_attention_mask \ --tokenizer_max_length 77 \ --pre_compute_text_embeddings \ --use_8bit_adam \ --set_grads_to_none \ --skip_save_text_encoder \ --validation_scheduler DDPMScheduler \ --push_to_hub ``` ### IF Stage II Full Dreambooth `--learning_rate=5e-6`: With a smaller effective batch size of 4, we found that we required learning rates as low as 1e-8. `--resolution=256`: The upscaler expects higher resolution inputs `--train_batch_size=2` and `--gradient_accumulation_steps=6`: We found that full training of stage II particularly with faces required large effective batch sizes. ```sh export MODEL_NAME="DeepFloyd/IF-II-L-v1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="dreambooth_dog_upscale" export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png" accelerate launch train_dreambooth.py \ --report_to wandb \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a sks dog" \ --resolution=256 \ --train_batch_size=2 \ --gradient_accumulation_steps=6 \ --learning_rate=5e-6 \ --max_train_steps=2000 \ --validation_prompt="a sks dog" \ --validation_steps=150 \ --checkpointing_steps=500 \ --pre_compute_text_embeddings \ --tokenizer_max_length=77 \ --text_encoder_use_attention_mask \ --validation_images $VALIDATION_IMAGES \ --class_labels_conditioning timesteps \ --validation_scheduler DDPMScheduler\ --push_to_hub ``` ## Stable Diffusion XL We support fine-tuning of the UNet shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) with DreamBooth and LoRA via the `train_dreambooth_lora_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md). ## Dataset We support 🤗 [Datasets](https://huggingface.co/docs/datasets/index), you can find a dataset on the [Hugging Face Hub](https://huggingface.co/datasets) or use your own. The quickest way to get started with your custom dataset is 🤗 Datasets' [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder). We need to create a file `metadata.jsonl` in the directory with our images: ``` {"file_name": "01.jpg", "prompt": "prompt 01"} {"file_name": "02.jpg", "prompt": "prompt 02"} ``` If we have a directory with image-text pairs e.g. `01.jpg` and `01.txt` then `convert_to_imagefolder.py` can create `metadata.jsonl`. ```sh python convert_to_imagefolder.py --path my_dataset/ ``` We use `--dataset_name` and `--caption_column` with training scripts. ``` --dataset_name=my_dataset/ --caption_column=prompt ```
diffusers/examples/dreambooth/README.md/0
{ "file_path": "diffusers/examples/dreambooth/README.md", "repo_id": "diffusers", "token_count": 10165 }
140
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import shutil import sys import tempfile from diffusers import DiffusionPipeline, UNet2DConditionModel sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBooth(ExamplesTestsAccelerate): def test_dreambooth(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_dreambooth_if(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path hf-internal-testing/tiny-if-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --pre_compute_text_embeddings --tokenizer_max_length=77 --text_encoder_use_attention_mask """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_dreambooth_checkpointing(self): instance_prompt = "photo" pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --instance_data_dir docs/source/en/imgs --instance_prompt {instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) # check can run the original fully trained output pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(instance_prompt, num_inference_steps=1) # check checkpoint directories exist self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-2"))) self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-4"))) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(instance_prompt, num_inference_steps=1) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 7 total steps resuming from checkpoint 4 resume_run_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --instance_data_dir docs/source/en/imgs --instance_prompt {instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(instance_prompt, num_inference_steps=1) # check old checkpoints do not exist self.assertFalse(os.path.isdir(os.path.join(tmpdir, "checkpoint-2"))) # check new checkpoints exist self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-4"))) self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-6"))) def test_dreambooth_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) resume_run_args = f""" examples/dreambooth/train_dreambooth.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
diffusers/examples/dreambooth/test_dreambooth.py/0
{ "file_path": "diffusers/examples/dreambooth/test_dreambooth.py", "repo_id": "diffusers", "token_count": 4466 }
141
# InstructPix2Pix training example [InstructPix2Pix](https://huggingface.co/papers/2211.09800) is a method to fine-tune text-conditioned diffusion models such that they can follow an edit instruction for an input image. Models fine-tuned using this method take the following as inputs: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png" alt="instructpix2pix-inputs" width=600/> </p> The output is an "edited" image that reflects the edit instruction applied on the input image: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/> </p> The `train_instruct_pix2pix.py` script shows how to implement the training procedure and adapt it for Stable Diffusion. ***Disclaimer: Even though `train_instruct_pix2pix.py` implements the InstructPix2Pix training procedure while being faithful to the [original implementation](https://github.com/timothybrooks/instruct-pix2pix) we have only tested it on a [small-scale dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples). This can impact the end results. For better results, we recommend longer training runs with a larger dataset. [Here](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) you can find a large dataset for InstructPix2Pix training.*** ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### Toy example As mentioned before, we'll use a [small toy dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) for training. The dataset is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) used in the InstructPix2Pix paper. Configure environment variables such as the dataset identifier and the Stable Diffusion checkpoint: ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export DATASET_ID="fusing/instructpix2pix-1000-samples" ``` Now, we can launch training: ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` Additionally, we support performing validation inference to monitor training progress with Weights and Biases. You can enable this feature with `report_to="wandb"`: ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \ --validation_prompt="make the mountains snowy" \ --seed=42 \ --report_to=wandb \ --push_to_hub ``` We recommend this type of validation as it can be useful for model debugging. Note that you need `wandb` installed to use this. You can install `wandb` by running `pip install wandb`. [Here](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), you can find an example training run that includes some validation samples and the training hyperparameters. ***Note: In the original paper, the authors observed that even when the model is trained with an image resolution of 256x256, it generalizes well to bigger resolutions such as 512x512. This is likely because of the larger dataset they used during training.*** ## Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \ --pretrained_model_name_or_path=stable-diffusion-v1-5/stable-diffusion-v1-5 \ --dataset_name=sayakpaul/instructpix2pix-1000-samples \ --use_ema \ --enable_xformers_memory_efficient_attention \ --resolution=512 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` ## Inference Once training is complete, we can perform inference: ```python import PIL import requests import torch from diffusers import StableDiffusionInstructPix2PixPipeline model_id = "your_model_id" # <- replace this pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") generator = torch.Generator("cuda").manual_seed(0) url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png" def download_image(url): image = PIL.Image.open(requests.get(url, stream=True).raw) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image image = download_image(url) prompt = "wipe out the lake" num_inference_steps = 20 image_guidance_scale = 1.5 guidance_scale = 10 edited_image = pipe(prompt, image=image, num_inference_steps=num_inference_steps, image_guidance_scale=image_guidance_scale, guidance_scale=guidance_scale, generator=generator, ).images[0] edited_image.save("edited_image.png") ``` An example model repo obtained using this training script can be found here - [sayakpaul/instruct-pix2pix](https://huggingface.co/sayakpaul/instruct-pix2pix). We encourage you to play with the following three parameters to control speed and quality during performance: * `num_inference_steps` * `image_guidance_scale` * `guidance_scale` Particularly, `image_guidance_scale` and `guidance_scale` can have a profound impact on the generated ("edited") image (see [here](https://twitter.com/RisingSayak/status/1628392199196151808?s=20) for an example). If you're looking for some interesting ways to use the InstructPix2Pix training methodology, we welcome you to check out this blog post: [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd). ## Stable Diffusion XL There's an equivalent `train_instruct_pix2pix_sdxl.py` script for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). Please refer to the docs [here](./README_sdxl.md) to learn more.
diffusers/examples/instruct_pix2pix/README.md/0
{ "file_path": "diffusers/examples/instruct_pix2pix/README.md", "repo_id": "diffusers", "token_count": 2742 }
142
import numpy as np import numpy.core.multiarray as multiarray import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch.serialization import add_safe_globals from diffusers import DDPMScheduler, UNet1DModel add_safe_globals( [ multiarray._reconstruct, np.ndarray, np.dtype, np.dtype(np.float32).type, np.dtype(np.float64).type, np.dtype(np.int32).type, np.dtype(np.int64).type, type(np.dtype(np.float32)), type(np.dtype(np.float64)), type(np.dtype(np.int32)), type(np.dtype(np.int64)), ] ) """ An example of using HuggingFace's diffusers library for diffusion policy, generating smooth movement trajectories. This implements a robot control model for pushing a T-shaped block into a target area. The model takes in the robot arm position, block position, and block angle, then outputs a sequence of 16 (x,y) positions for the robot arm to follow. """ class ObservationEncoder(nn.Module): """ Converts raw robot observations (positions/angles) into a more compact representation state_dim (int): Dimension of the input state vector (default: 5) [robot_x, robot_y, block_x, block_y, block_angle] - Input shape: (batch_size, state_dim) - Output shape: (batch_size, 256) """ def __init__(self, state_dim): super().__init__() self.net = nn.Sequential(nn.Linear(state_dim, 512), nn.ReLU(), nn.Linear(512, 256)) def forward(self, x): return self.net(x) class ObservationProjection(nn.Module): """ Takes the encoded observation and transforms it into 32 values that represent the current robot/block situation. These values are used as additional contextual information during the diffusion model's trajectory generation. - Input: 256-dim vector (padded to 512) Shape: (batch_size, 256) - Output: 32 contextual information values for the diffusion model Shape: (batch_size, 32) """ def __init__(self): super().__init__() self.weight = nn.Parameter(torch.randn(32, 512)) self.bias = nn.Parameter(torch.zeros(32)) def forward(self, x): # pad 256-dim input to 512-dim with zeros if x.size(-1) == 256: x = torch.cat([x, torch.zeros(*x.shape[:-1], 256, device=x.device)], dim=-1) return nn.functional.linear(x, self.weight, self.bias) class DiffusionPolicy: """ Implements diffusion policy for generating robot arm trajectories. Uses diffusion to generate sequences of positions for a robot arm, conditioned on the current state of the robot and the block it needs to push. The model expects observations in pixel coordinates (0-512 range) and block angle in radians. It generates trajectories as sequences of (x,y) coordinates also in the 0-512 range. """ def __init__(self, state_dim=5, device="cpu"): self.device = device # define valid ranges for inputs/outputs self.stats = { "obs": {"min": torch.zeros(5), "max": torch.tensor([512, 512, 512, 512, 2 * np.pi])}, "action": {"min": torch.zeros(2), "max": torch.full((2,), 512)}, } self.obs_encoder = ObservationEncoder(state_dim).to(device) self.obs_projection = ObservationProjection().to(device) # UNet model that performs the denoising process # takes in concatenated action (2 channels) and context (32 channels) = 34 channels # outputs predicted action (2 channels for x,y coordinates) self.model = UNet1DModel( sample_size=16, # length of trajectory sequence in_channels=34, out_channels=2, layers_per_block=2, # number of layers per each UNet block block_out_channels=(128,), # number of output neurons per layer in each block down_block_types=("DownBlock1D",), # reduce the resolution of data up_block_types=("UpBlock1D",), # increase the resolution of data ).to(device) # noise scheduler that controls the denoising process self.noise_scheduler = DDPMScheduler( num_train_timesteps=100, # number of denoising steps beta_schedule="squaredcos_cap_v2", # type of noise schedule ) # load pre-trained weights from HuggingFace checkpoint = torch.load( hf_hub_download("dorsar/diffusion_policy", "push_tblock.pt"), weights_only=True, map_location=device ) self.model.load_state_dict(checkpoint["model_state_dict"]) self.obs_encoder.load_state_dict(checkpoint["encoder_state_dict"]) self.obs_projection.load_state_dict(checkpoint["projection_state_dict"]) # scales data to [-1, 1] range for neural network processing def normalize_data(self, data, stats): return ((data - stats["min"]) / (stats["max"] - stats["min"])) * 2 - 1 # converts normalized data back to original range def unnormalize_data(self, ndata, stats): return ((ndata + 1) / 2) * (stats["max"] - stats["min"]) + stats["min"] @torch.no_grad() def predict(self, observation): """ Generates a trajectory of robot arm positions given the current state. Args: observation (torch.Tensor): Current state [robot_x, robot_y, block_x, block_y, block_angle] Shape: (batch_size, 5) Returns: torch.Tensor: Sequence of (x,y) positions for the robot arm to follow Shape: (batch_size, 16, 2) where: - 16 is the number of steps in the trajectory - 2 is the (x,y) coordinates in pixel space (0-512) The function first encodes the observation, then uses it to condition a diffusion process that gradually denoises random trajectories into smooth, purposeful movements. """ observation = observation.to(self.device) normalized_obs = self.normalize_data(observation, self.stats["obs"]) # encode the observation into context values for the diffusion model cond = self.obs_projection(self.obs_encoder(normalized_obs)) # keeps first & second dimension sizes unchanged, and multiplies last dimension by 16 cond = cond.view(normalized_obs.shape[0], -1, 1).expand(-1, -1, 16) # initialize action with noise - random noise that will be refined into a trajectory action = torch.randn((observation.shape[0], 2, 16), device=self.device) # denoise # at each step `t`, the current noisy trajectory (`action`) & conditioning info (context) are # fed into the model to predict a denoised trajectory, then uses self.noise_scheduler.step to # apply this prediction & slightly reduce the noise in `action` more self.noise_scheduler.set_timesteps(100) for t in self.noise_scheduler.timesteps: model_output = self.model(torch.cat([action, cond], dim=1), t) action = self.noise_scheduler.step(model_output.sample, t, action).prev_sample action = action.transpose(1, 2) # reshape to [batch, 16, 2] action = self.unnormalize_data(action, self.stats["action"]) # scale back to coordinates return action if __name__ == "__main__": policy = DiffusionPolicy() # sample of a single observation # robot arm starts in center, block is slightly left and up, rotated 90 degrees obs = torch.tensor( [ [ 256.0, # robot arm x position (middle of screen) 256.0, # robot arm y position (middle of screen) 200.0, # block x position 300.0, # block y position np.pi / 2, # block angle (90 degrees) ] ] ) action = policy.predict(obs) print("Action shape:", action.shape) # should be [1, 16, 2] - one trajectory of 16 x,y positions print("\nPredicted trajectory:") for i, (x, y) in enumerate(action[0]): print(f"Step {i:2d}: x={x:6.1f}, y={y:6.1f}")
diffusers/examples/reinforcement_learning/diffusion_policy.py/0
{ "file_path": "diffusers/examples/reinforcement_learning/diffusion_policy.py", "repo_id": "diffusers", "token_count": 3227 }
143
# [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) by [colossalai](https://github.com/hpcaitech/ColossalAI.git) [DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. The `train_dreambooth_colossalai.py` script shows how to implement the training procedure and adapt it for stable diffusion. By accommodating model data in CPU and GPU and moving the data to the computing device when necessary, [Gemini](https://www.colossalai.org/docs/advanced_tutorials/meet_gemini), the Heterogeneous Memory Manager of [Colossal-AI](https://github.com/hpcaitech/ColossalAI) can breakthrough the GPU memory wall by using GPU and CPU memory (composed of CPU DRAM or nvme SSD memory) together at the same time. Moreover, the model scale can be further improved by combining heterogeneous training with the other parallel approaches, such as data parallel, tensor parallel and pipeline parallel. ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -r requirements.txt ``` ## Install [ColossalAI](https://github.com/hpcaitech/ColossalAI.git) **From PyPI** ```bash pip install colossalai ``` **From source** ```bash git clone https://github.com/hpcaitech/ColossalAI.git cd ColossalAI # install colossalai pip install . ``` ## Dataset for Teyvat BLIP captions Dataset used to train [Teyvat characters text to image model](https://github.com/hpcaitech/ColossalAI/tree/main/examples/images/diffusion). BLIP generated captions for characters images from [genshin-impact fandom wiki](https://genshin-impact.fandom.com/wiki/Character#Playable_Characters)and [biligame wiki for genshin impact](https://wiki.biligame.com/ys/%E8%A7%92%E8%89%B2). For each row the dataset contains `image` and `text` keys. `image` is a varying size PIL png, and `text` is the accompanying text caption. Only a train split is provided. The `text` include the tag `Teyvat`, `Name`,`Element`, `Weapon`, `Region`, `Model type`, and `Description`, the `Description` is captioned with the [pre-trained BLIP model](https://github.com/salesforce/BLIP). ## Training The argument `placement` can be `cpu`, `auto`, `cuda`, with `cpu` the GPU RAM required can be minimized to 4GB but will deceleration, with `cuda` you can also reduce GPU memory by half but accelerated training, with `auto` a more balanced solution for speed and memory can be obtained。 **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" torchrun --nproc_per_node 2 train_dreambooth_colossalai.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 \ --placement="cuda" ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" torchrun --nproc_per_node 2 train_dreambooth_colossalai.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=800 \ --placement="cuda" ``` ## Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt. ```python from diffusers import StableDiffusionPipeline import torch model_id = "path-to-save-model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ```
diffusers/examples/research_projects/colossalai/README.md/0
{ "file_path": "diffusers/examples/research_projects/colossalai/README.md", "repo_id": "diffusers", "token_count": 1660 }
144
# Dreambooth for the inpainting model This script was added by @thedarkzeno . Please note that this script is not actively maintained, you can open an issue and tag @thedarkzeno or @patil-suraj though. ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training with gradient checkpointing and 8-bit optimizer: With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Fine-tune text encoder with the UNet. The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ ```bash export MODEL_NAME="runwayml/stable-diffusion-inpainting" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth_inpaint.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam \ --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ```
diffusers/examples/research_projects/dreambooth_inpaint/README.md/0
{ "file_path": "diffusers/examples/research_projects/dreambooth_inpaint/README.md", "repo_id": "diffusers", "token_count": 1501 }
145
import argparse import itertools import json import os import random import time from pathlib import Path import torch import torch.nn.functional as F from accelerate import Accelerator from accelerate.utils import ProjectConfiguration from ip_adapter.ip_adapter import ImageProjModel from ip_adapter.utils import is_torch2_available from PIL import Image from torchvision import transforms from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel if is_torch2_available(): from ip_adapter.attention_processor import AttnProcessor2_0 as AttnProcessor from ip_adapter.attention_processor import IPAttnProcessor2_0 as IPAttnProcessor else: from ip_adapter.attention_processor import AttnProcessor, IPAttnProcessor # Dataset class MyDataset(torch.utils.data.Dataset): def __init__( self, json_file, tokenizer, size=512, t_drop_rate=0.05, i_drop_rate=0.05, ti_drop_rate=0.05, image_root_path="" ): super().__init__() self.tokenizer = tokenizer self.size = size self.i_drop_rate = i_drop_rate self.t_drop_rate = t_drop_rate self.ti_drop_rate = ti_drop_rate self.image_root_path = image_root_path self.data = json.load(open(json_file)) # list of dict: [{"image_file": "1.png", "text": "A dog"}] self.transform = transforms.Compose( [ transforms.Resize(self.size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(self.size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) self.clip_image_processor = CLIPImageProcessor() def __getitem__(self, idx): item = self.data[idx] text = item["text"] image_file = item["image_file"] # read image raw_image = Image.open(os.path.join(self.image_root_path, image_file)) image = self.transform(raw_image.convert("RGB")) clip_image = self.clip_image_processor(images=raw_image, return_tensors="pt").pixel_values # drop drop_image_embed = 0 rand_num = random.random() if rand_num < self.i_drop_rate: drop_image_embed = 1 elif rand_num < (self.i_drop_rate + self.t_drop_rate): text = "" elif rand_num < (self.i_drop_rate + self.t_drop_rate + self.ti_drop_rate): text = "" drop_image_embed = 1 # get text and tokenize text_input_ids = self.tokenizer( text, max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt", ).input_ids return { "image": image, "text_input_ids": text_input_ids, "clip_image": clip_image, "drop_image_embed": drop_image_embed, } def __len__(self): return len(self.data) def collate_fn(data): images = torch.stack([example["image"] for example in data]) text_input_ids = torch.cat([example["text_input_ids"] for example in data], dim=0) clip_images = torch.cat([example["clip_image"] for example in data], dim=0) drop_image_embeds = [example["drop_image_embed"] for example in data] return { "images": images, "text_input_ids": text_input_ids, "clip_images": clip_images, "drop_image_embeds": drop_image_embeds, } class IPAdapter(torch.nn.Module): """IP-Adapter""" def __init__(self, unet, image_proj_model, adapter_modules, ckpt_path=None): super().__init__() self.unet = unet self.image_proj_model = image_proj_model self.adapter_modules = adapter_modules if ckpt_path is not None: self.load_from_checkpoint(ckpt_path) def forward(self, noisy_latents, timesteps, encoder_hidden_states, image_embeds): ip_tokens = self.image_proj_model(image_embeds) encoder_hidden_states = torch.cat([encoder_hidden_states, ip_tokens], dim=1) # Predict the noise residual noise_pred = self.unet(noisy_latents, timesteps, encoder_hidden_states).sample return noise_pred def load_from_checkpoint(self, ckpt_path: str): # Calculate original checksums orig_ip_proj_sum = torch.sum(torch.stack([torch.sum(p) for p in self.image_proj_model.parameters()])) orig_adapter_sum = torch.sum(torch.stack([torch.sum(p) for p in self.adapter_modules.parameters()])) state_dict = torch.load(ckpt_path, map_location="cpu") # Load state dict for image_proj_model and adapter_modules self.image_proj_model.load_state_dict(state_dict["image_proj"], strict=True) self.adapter_modules.load_state_dict(state_dict["ip_adapter"], strict=True) # Calculate new checksums new_ip_proj_sum = torch.sum(torch.stack([torch.sum(p) for p in self.image_proj_model.parameters()])) new_adapter_sum = torch.sum(torch.stack([torch.sum(p) for p in self.adapter_modules.parameters()])) # Verify if the weights have changed assert orig_ip_proj_sum != new_ip_proj_sum, "Weights of image_proj_model did not change!" assert orig_adapter_sum != new_adapter_sum, "Weights of adapter_modules did not change!" print(f"Successfully loaded weights from checkpoint {ckpt_path}") def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_ip_adapter_path", type=str, default=None, help="Path to pretrained ip adapter model. If not specified weights are initialized randomly.", ) parser.add_argument( "--data_json_file", type=str, default=None, required=True, help="Training data", ) parser.add_argument( "--data_root_path", type=str, default="", required=True, help="Training data root path", ) parser.add_argument( "--image_encoder_path", type=str, default=None, required=True, help="Path to CLIP image encoder", ) parser.add_argument( "--output_dir", type=str, default="sd-ip_adapter", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--resolution", type=int, default=512, help=("The resolution for input images"), ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Learning rate to use.", ) parser.add_argument("--weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--save_steps", type=int, default=2000, help=("Save a checkpoint of the training state every X updates"), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank return args def main(): args = parse_args() logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") image_encoder = CLIPVisionModelWithProjection.from_pretrained(args.image_encoder_path) # freeze parameters of models to save more memory unet.requires_grad_(False) vae.requires_grad_(False) text_encoder.requires_grad_(False) image_encoder.requires_grad_(False) # ip-adapter image_proj_model = ImageProjModel( cross_attention_dim=unet.config.cross_attention_dim, clip_embeddings_dim=image_encoder.config.projection_dim, clip_extra_context_tokens=4, ) # init adapter modules attn_procs = {} unet_sd = unet.state_dict() for name in unet.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] if cross_attention_dim is None: attn_procs[name] = AttnProcessor() else: layer_name = name.split(".processor")[0] weights = { "to_k_ip.weight": unet_sd[layer_name + ".to_k.weight"], "to_v_ip.weight": unet_sd[layer_name + ".to_v.weight"], } attn_procs[name] = IPAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim) attn_procs[name].load_state_dict(weights) unet.set_attn_processor(attn_procs) adapter_modules = torch.nn.ModuleList(unet.attn_processors.values()) ip_adapter = IPAdapter(unet, image_proj_model, adapter_modules, args.pretrained_ip_adapter_path) weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) image_encoder.to(accelerator.device, dtype=weight_dtype) # optimizer params_to_opt = itertools.chain(ip_adapter.image_proj_model.parameters(), ip_adapter.adapter_modules.parameters()) optimizer = torch.optim.AdamW(params_to_opt, lr=args.learning_rate, weight_decay=args.weight_decay) # dataloader train_dataset = MyDataset( args.data_json_file, tokenizer=tokenizer, size=args.resolution, image_root_path=args.data_root_path ) train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Prepare everything with our `accelerator`. ip_adapter, optimizer, train_dataloader = accelerator.prepare(ip_adapter, optimizer, train_dataloader) global_step = 0 for epoch in range(0, args.num_train_epochs): begin = time.perf_counter() for step, batch in enumerate(train_dataloader): load_data_time = time.perf_counter() - begin with accelerator.accumulate(ip_adapter): # Convert images to latent space with torch.no_grad(): latents = vae.encode( batch["images"].to(accelerator.device, dtype=weight_dtype) ).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) with torch.no_grad(): image_embeds = image_encoder( batch["clip_images"].to(accelerator.device, dtype=weight_dtype) ).image_embeds image_embeds_ = [] for image_embed, drop_image_embed in zip(image_embeds, batch["drop_image_embeds"]): if drop_image_embed == 1: image_embeds_.append(torch.zeros_like(image_embed)) else: image_embeds_.append(image_embed) image_embeds = torch.stack(image_embeds_) with torch.no_grad(): encoder_hidden_states = text_encoder(batch["text_input_ids"].to(accelerator.device))[0] noise_pred = ip_adapter(noisy_latents, timesteps, encoder_hidden_states, image_embeds) loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="mean") # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean().item() # Backpropagate accelerator.backward(loss) optimizer.step() optimizer.zero_grad() if accelerator.is_main_process: print( "Epoch {}, step {}, data_time: {}, time: {}, step_loss: {}".format( epoch, step, load_data_time, time.perf_counter() - begin, avg_loss ) ) global_step += 1 if global_step % args.save_steps == 0: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) begin = time.perf_counter() if __name__ == "__main__": main()
diffusers/examples/research_projects/ip_adapter/tutorial_train_ip-adapter.py/0
{ "file_path": "diffusers/examples/research_projects/ip_adapter/tutorial_train_ip-adapter.py", "repo_id": "diffusers", "token_count": 7360 }
146
import torch import torchvision.transforms as T from controlnet_aux import HEDdetector from diffusers.utils import load_image from examples.research_projects.pixart.controlnet_pixart_alpha import PixArtControlNetAdapterModel from examples.research_projects.pixart.pipeline_pixart_alpha_controlnet import PixArtAlphaControlnetPipeline controlnet_repo_id = "raulc0399/pixart-alpha-hed-controlnet" weight_dtype = torch.float16 image_size = 1024 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") torch.manual_seed(0) # load controlnet controlnet = PixArtControlNetAdapterModel.from_pretrained( controlnet_repo_id, torch_dtype=weight_dtype, use_safetensors=True, ).to(device) pipe = PixArtAlphaControlnetPipeline.from_pretrained( "PixArt-alpha/PixArt-XL-2-1024-MS", controlnet=controlnet, torch_dtype=weight_dtype, use_safetensors=True, ).to(device) images_path = "images" control_image_file = "0_7.jpg" # prompt = "cinematic photo of superman in action . 35mm photograph, film, bokeh, professional, 4k, highly detailed" # prompt = "yellow modern car, city in background, beautiful rainy day" # prompt = "modern villa, clear sky, suny day . 35mm photograph, film, bokeh, professional, 4k, highly detailed" # prompt = "robot dog toy in park . 35mm photograph, film, bokeh, professional, 4k, highly detailed" # prompt = "purple car, on highway, beautiful sunny day" # prompt = "realistical photo of a loving couple standing in the open kitchen of the living room, cooking ." prompt = "battleship in space, galaxy in background" control_image_name = control_image_file.split(".")[0] control_image = load_image(f"{images_path}/{control_image_file}") print(control_image.size) height, width = control_image.size hed = HEDdetector.from_pretrained("lllyasviel/Annotators") condition_transform = T.Compose( [ T.Lambda(lambda img: img.convert("RGB")), T.CenterCrop([image_size, image_size]), ] ) control_image = condition_transform(control_image) hed_edge = hed(control_image, detect_resolution=image_size, image_resolution=image_size) hed_edge.save(f"{images_path}/{control_image_name}_hed.jpg") # run pipeline with torch.no_grad(): out = pipe( prompt=prompt, image=hed_edge, num_inference_steps=14, guidance_scale=4.5, height=image_size, width=image_size, ) out.images[0].save(f"{images_path}//{control_image_name}_output.jpg")
diffusers/examples/research_projects/pixart/run_pixart_alpha_controlnet_pipeline.py/0
{ "file_path": "diffusers/examples/research_projects/pixart/run_pixart_alpha_controlnet_pipeline.py", "repo_id": "diffusers", "token_count": 895 }
147
import argparse import os import torch from PIL import Image, ImageFilter from transformers import CLIPTextModel from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel parser = argparse.ArgumentParser(description="Inference") parser.add_argument( "--model_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--validation_image", type=str, default=None, required=True, help="The directory of the validation image", ) parser.add_argument( "--validation_mask", type=str, default=None, required=True, help="The directory of the validation mask", ) parser.add_argument( "--output_dir", type=str, default="./test-infer/", help="The output directory where predictions are saved", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible inference.") args = parser.parse_args() if __name__ == "__main__": os.makedirs(args.output_dir, exist_ok=True) generator = None # create & load model pipe = StableDiffusionInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-2-inpainting", torch_dtype=torch.float32, revision=None ) pipe.unet = UNet2DConditionModel.from_pretrained( args.model_path, subfolder="unet", revision=None, ) pipe.text_encoder = CLIPTextModel.from_pretrained( args.model_path, subfolder="text_encoder", revision=None, ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") if args.seed is not None: generator = torch.Generator(device="cuda").manual_seed(args.seed) image = Image.open(args.validation_image) mask_image = Image.open(args.validation_mask) results = pipe( ["a photo of sks"] * 16, image=image, mask_image=mask_image, num_inference_steps=25, guidance_scale=5, generator=generator, ).images erode_kernel = ImageFilter.MaxFilter(3) mask_image = mask_image.filter(erode_kernel) blur_kernel = ImageFilter.BoxBlur(1) mask_image = mask_image.filter(blur_kernel) for idx, result in enumerate(results): result = Image.composite(result, image, mask_image) result.save(f"{args.output_dir}/{idx}.png") del pipe torch.cuda.empty_cache()
diffusers/examples/research_projects/realfill/infer.py/0
{ "file_path": "diffusers/examples/research_projects/realfill/infer.py", "repo_id": "diffusers", "token_count": 984 }
148
<jupyter_start><jupyter_text>Running Stable Diffusion 3 (SD3) DreamBooth LoRA training under 16GB GPU VRAM Install Dependencies<jupyter_code>!pip install -q -U git+https://github.com/huggingface/diffusers !pip install -q -U \ transformers \ accelerate \ wandb \ bitsandbytes \ peft<jupyter_output><empty_output><jupyter_text>As SD3 is gated, before using it with diffusers you first need to go to the [Stable Diffusion 3 Medium Hugging Face page](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in:<jupyter_code>!hf auth login<jupyter_output><empty_output><jupyter_text>Clone `diffusers`<jupyter_code>!git clone https://github.com/huggingface/diffusers %cd diffusers/examples/research_projects/sd3_lora_colab<jupyter_output><empty_output><jupyter_text>Download instance data images<jupyter_code>from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) !rm -rf dog/.cache<jupyter_output><empty_output><jupyter_text>Compute embeddingsHere we are using the default instance prompt "a photo of sks dog". But you can configure this. Refer to the `compute_embeddings.py` script for details on other supported arguments.<jupyter_code>!python compute_embeddings.py<jupyter_output><empty_output><jupyter_text>Clear memory<jupyter_code>import torch import gc def flush(): torch.cuda.empty_cache() gc.collect() flush()<jupyter_output><empty_output><jupyter_text>Train!<jupyter_code>!accelerate launch train_dreambooth_lora_sd3_miniature.py \ --pretrained_model_name_or_path="stabilityai/stable-diffusion-3-medium-diffusers" \ --instance_data_dir="dog" \ --data_df_path="sample_embeddings.parquet" \ --output_dir="trained-sd3-lora-miniature" \ --mixed_precision="fp16" \ --instance_prompt="a photo of sks dog" \ --resolution=1024 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --seed="0"<jupyter_output><empty_output><jupyter_text>Training will take about an hour to complete depending on the length of your dataset. Inference<jupyter_code>flush() from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16 ) lora_output_path = "trained-sd3-lora-miniature" pipeline.load_lora_weights("trained-sd3-lora-miniature") pipeline.enable_sequential_cpu_offload() image = pipeline("a photo of sks dog in a bucket").images[0] image.save("bucket_dog.png")<jupyter_output><empty_output>
diffusers/examples/research_projects/sd3_lora_colab/sd3_dreambooth_lora_16gb.ipynb/0
{ "file_path": "diffusers/examples/research_projects/sd3_lora_colab/sd3_dreambooth_lora_16gb.ipynb", "repo_id": "diffusers", "token_count": 1071 }
149
import asyncio import logging import os import random import tempfile import traceback import uuid import aiohttp import torch from fastapi import FastAPI, HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles from pydantic import BaseModel from diffusers.pipelines.stable_diffusion_3 import StableDiffusion3Pipeline logger = logging.getLogger(__name__) class TextToImageInput(BaseModel): model: str prompt: str size: str | None = None n: int | None = None class HttpClient: session: aiohttp.ClientSession = None def start(self): self.session = aiohttp.ClientSession() async def stop(self): await self.session.close() self.session = None def __call__(self) -> aiohttp.ClientSession: assert self.session is not None return self.session class TextToImagePipeline: pipeline: StableDiffusion3Pipeline = None device: str = None def start(self): if torch.cuda.is_available(): model_path = os.getenv("MODEL_PATH", "stabilityai/stable-diffusion-3.5-large") logger.info("Loading CUDA") self.device = "cuda" self.pipeline = StableDiffusion3Pipeline.from_pretrained( model_path, torch_dtype=torch.bfloat16, ).to(device=self.device) elif torch.backends.mps.is_available(): model_path = os.getenv("MODEL_PATH", "stabilityai/stable-diffusion-3.5-medium") logger.info("Loading MPS for Mac M Series") self.device = "mps" self.pipeline = StableDiffusion3Pipeline.from_pretrained( model_path, torch_dtype=torch.bfloat16, ).to(device=self.device) else: raise Exception("No CUDA or MPS device available") app = FastAPI() service_url = os.getenv("SERVICE_URL", "http://localhost:8000") image_dir = os.path.join(tempfile.gettempdir(), "images") if not os.path.exists(image_dir): os.makedirs(image_dir) app.mount("/images", StaticFiles(directory=image_dir), name="images") http_client = HttpClient() shared_pipeline = TextToImagePipeline() # Configure CORS settings app.add_middleware( CORSMiddleware, allow_origins=["*"], # Allows all origins allow_credentials=True, allow_methods=["*"], # Allows all methods, e.g., GET, POST, OPTIONS, etc. allow_headers=["*"], # Allows all headers ) @app.on_event("startup") def startup(): http_client.start() shared_pipeline.start() def save_image(image): filename = "draw" + str(uuid.uuid4()).split("-")[0] + ".png" image_path = os.path.join(image_dir, filename) # write image to disk at image_path logger.info(f"Saving image to {image_path}") image.save(image_path) return os.path.join(service_url, "images", filename) @app.get("/") @app.post("/") @app.options("/") async def base(): return "Welcome to Diffusers! Where you can use diffusion models to generate images" @app.post("/v1/images/generations") async def generate_image(image_input: TextToImageInput): try: loop = asyncio.get_event_loop() scheduler = shared_pipeline.pipeline.scheduler.from_config(shared_pipeline.pipeline.scheduler.config) pipeline = StableDiffusion3Pipeline.from_pipe(shared_pipeline.pipeline, scheduler=scheduler) generator = torch.Generator(device=shared_pipeline.device) generator.manual_seed(random.randint(0, 10000000)) output = await loop.run_in_executor(None, lambda: pipeline(image_input.prompt, generator=generator)) logger.info(f"output: {output}") image_url = save_image(output.images[0]) return {"data": [{"url": image_url}]} except Exception as e: if isinstance(e, HTTPException): raise e elif hasattr(e, "message"): raise HTTPException(status_code=500, detail=e.message + traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e) + traceback.format_exc()) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000)
diffusers/examples/server/server.py/0
{ "file_path": "diffusers/examples/server/server.py", "repo_id": "diffusers", "token_count": 1691 }
150
## Training an VQGAN VAE VQVAEs were first introduced in [Neural Discrete Representation Learning](https://huggingface.co/papers/1711.00937) and was combined with a GAN in the paper [Taming Transformers for High-Resolution Image Synthesis](https://huggingface.co/papers/2012.09841). The basic idea of a VQVAE is it's a type of a variational auto encoder with tokens as the latent space similar to tokens for LLMs. This script was adapted from a [pr to huggingface's open-muse project](https://github.com/huggingface/open-muse/pull/52) with general code following [lucidrian's implementation of the vqgan training script](https://github.com/lucidrains/muse-maskgit-pytorch/blob/main/muse_maskgit_pytorch/trainers.py) but both of these implementation follow from the [taming transformer repo](https://github.com/CompVis/taming-transformers?tab=readme-ov-file). Creating a training image set is [described in a different document](https://huggingface.co/docs/datasets/image_process#image-datasets). ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Training on CIFAR10 The command to train a VQGAN model on cifar10 dataset: ```bash accelerate launch train_vqgan.py \ --dataset_name=cifar10 \ --image_column=img \ --validation_images images/bird.jpg images/car.jpg images/dog.jpg images/frog.jpg \ --resolution=128 \ --train_batch_size=2 \ --gradient_accumulation_steps=8 \ --report_to=wandb ``` An example training run is [here](https://wandb.ai/sayakpaul/vqgan-training/runs/0m5kzdfp) by @sayakpaul and a lower scale one [here](https://wandb.ai/dsbuddy27/vqgan-training/runs/eqd6xi4n?nw=nwuserisamu). The validation images can be obtained from [here](https://huggingface.co/datasets/diffusers/docs-images/tree/main/vqgan_validation_images). The simplest way to improve the quality of a VQGAN model is to maximize the amount of information present in the bottleneck. The easiest way to do this is increasing the image resolution. However, other ways include, but not limited to, lowering compression by downsampling fewer times or increasing the vocabulary size which at most can be around 16384. How to do this is shown below. # Modifying the architecture To modify the architecture of the vqgan model you can save the config taken from [here](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder/blob/main/movq/config.json) and then provide that to the script with the option --model_config_name_or_path. This config is below ``` { "_class_name": "VQModel", "_diffusers_version": "0.17.0.dev0", "act_fn": "silu", "block_out_channels": [ 128, 256, 256, 512 ], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D" ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 2, "norm_num_groups": 32, "norm_type": "spatial", "num_vq_embeddings": 16384, "out_channels": 3, "sample_size": 32, "scaling_factor": 0.18215, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D" ], "vq_embed_dim": 4 } ``` To lower the amount of layers in a VQGan, you can remove layers by modifying the block_out_channels, down_block_types, and up_block_types like below ``` { "_class_name": "VQModel", "_diffusers_version": "0.17.0.dev0", "act_fn": "silu", "block_out_channels": [ 128, 256, 256, ], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 2, "norm_num_groups": 32, "norm_type": "spatial", "num_vq_embeddings": 16384, "out_channels": 3, "sample_size": 32, "scaling_factor": 0.18215, "up_block_types": [ "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D" ], "vq_embed_dim": 4 } ``` For increasing the size of the vocabularies you can increase num_vq_embeddings. However, [some research](https://magvit.cs.cmu.edu/v2/) shows that the representation of VQGANs start degrading after 2^14~16384 vq embeddings so it's not recommended to go past that. ## Extra training tips/ideas During logging take care to make sure data_time is low. data_time is the amount spent loading the data and where the GPU is not active. So essentially, it's the time wasted. The easiest way to lower data time is to increase the --dataloader_num_workers to a higher number like 4. Due to a bug in Pytorch, this only works on linux based systems. For more details check [here](https://github.com/huggingface/diffusers/issues/7646) Secondly, training should seem to be done when both the discriminator and the generator loss converges. Thirdly, another low hanging fruit is just using ema using the --use_ema parameter. This tends to make the output images smoother. This has a con where you have to lower your batch size by 1 but it may be worth it. Another more experimental low hanging fruit is changing from the vgg19 to different models for the lpips loss using the --timm_model_backend. If you do this, I recommend also changing the timm_model_layers parameter to the layer in your model which you think is best for representation. However, be careful with the feature map norms since this can easily overdominate the loss.
diffusers/examples/vqgan/README.md/0
{ "file_path": "diffusers/examples/vqgan/README.md", "repo_id": "diffusers", "token_count": 1950 }
151
import argparse from typing import Any, Dict import torch from transformers import T5EncoderModel, T5Tokenizer from diffusers import ( AutoencoderKLCogVideoX, CogVideoXDDIMScheduler, CogVideoXImageToVideoPipeline, CogVideoXPipeline, CogVideoXTransformer3DModel, ) def reassign_query_key_value_inplace(key: str, state_dict: Dict[str, Any]): to_q_key = key.replace("query_key_value", "to_q") to_k_key = key.replace("query_key_value", "to_k") to_v_key = key.replace("query_key_value", "to_v") to_q, to_k, to_v = torch.chunk(state_dict[key], chunks=3, dim=0) state_dict[to_q_key] = to_q state_dict[to_k_key] = to_k state_dict[to_v_key] = to_v state_dict.pop(key) def reassign_query_key_layernorm_inplace(key: str, state_dict: Dict[str, Any]): layer_id, weight_or_bias = key.split(".")[-2:] if "query" in key: new_key = f"transformer_blocks.{layer_id}.attn1.norm_q.{weight_or_bias}" elif "key" in key: new_key = f"transformer_blocks.{layer_id}.attn1.norm_k.{weight_or_bias}" state_dict[new_key] = state_dict.pop(key) def reassign_adaln_norm_inplace(key: str, state_dict: Dict[str, Any]): layer_id, _, weight_or_bias = key.split(".")[-3:] weights_or_biases = state_dict[key].chunk(12, dim=0) norm1_weights_or_biases = torch.cat(weights_or_biases[0:3] + weights_or_biases[6:9]) norm2_weights_or_biases = torch.cat(weights_or_biases[3:6] + weights_or_biases[9:12]) norm1_key = f"transformer_blocks.{layer_id}.norm1.linear.{weight_or_bias}" state_dict[norm1_key] = norm1_weights_or_biases norm2_key = f"transformer_blocks.{layer_id}.norm2.linear.{weight_or_bias}" state_dict[norm2_key] = norm2_weights_or_biases state_dict.pop(key) def remove_keys_inplace(key: str, state_dict: Dict[str, Any]): state_dict.pop(key) def replace_up_keys_inplace(key: str, state_dict: Dict[str, Any]): key_split = key.split(".") layer_index = int(key_split[2]) replace_layer_index = 4 - 1 - layer_index key_split[1] = "up_blocks" key_split[2] = str(replace_layer_index) new_key = ".".join(key_split) state_dict[new_key] = state_dict.pop(key) TRANSFORMER_KEYS_RENAME_DICT = { "transformer.final_layernorm": "norm_final", "transformer": "transformer_blocks", "attention": "attn1", "mlp": "ff.net", "dense_h_to_4h": "0.proj", "dense_4h_to_h": "2", ".layers": "", "dense": "to_out.0", "input_layernorm": "norm1.norm", "post_attn1_layernorm": "norm2.norm", "time_embed.0": "time_embedding.linear_1", "time_embed.2": "time_embedding.linear_2", "ofs_embed.0": "ofs_embedding.linear_1", "ofs_embed.2": "ofs_embedding.linear_2", "mixins.patch_embed": "patch_embed", "mixins.final_layer.norm_final": "norm_out.norm", "mixins.final_layer.linear": "proj_out", "mixins.final_layer.adaLN_modulation.1": "norm_out.linear", "mixins.pos_embed.pos_embedding": "patch_embed.pos_embedding", # Specific to CogVideoX-5b-I2V } TRANSFORMER_SPECIAL_KEYS_REMAP = { "query_key_value": reassign_query_key_value_inplace, "query_layernorm_list": reassign_query_key_layernorm_inplace, "key_layernorm_list": reassign_query_key_layernorm_inplace, "adaln_layer.adaLN_modulations": reassign_adaln_norm_inplace, "embed_tokens": remove_keys_inplace, "freqs_sin": remove_keys_inplace, "freqs_cos": remove_keys_inplace, "position_embedding": remove_keys_inplace, } VAE_KEYS_RENAME_DICT = { "block.": "resnets.", "down.": "down_blocks.", "downsample": "downsamplers.0", "upsample": "upsamplers.0", "nin_shortcut": "conv_shortcut", "encoder.mid.block_1": "encoder.mid_block.resnets.0", "encoder.mid.block_2": "encoder.mid_block.resnets.1", "decoder.mid.block_1": "decoder.mid_block.resnets.0", "decoder.mid.block_2": "decoder.mid_block.resnets.1", } VAE_SPECIAL_KEYS_REMAP = { "loss": remove_keys_inplace, "up.": replace_up_keys_inplace, } TOKENIZER_MAX_LENGTH = 226 def get_state_dict(saved_dict: Dict[str, Any]) -> Dict[str, Any]: state_dict = saved_dict if "model" in saved_dict.keys(): state_dict = state_dict["model"] if "module" in saved_dict.keys(): state_dict = state_dict["module"] if "state_dict" in saved_dict.keys(): state_dict = state_dict["state_dict"] return state_dict def update_state_dict_inplace(state_dict: Dict[str, Any], old_key: str, new_key: str) -> Dict[str, Any]: state_dict[new_key] = state_dict.pop(old_key) def convert_transformer( ckpt_path: str, num_layers: int, num_attention_heads: int, use_rotary_positional_embeddings: bool, i2v: bool, dtype: torch.dtype, init_kwargs: Dict[str, Any], ): PREFIX_KEY = "model.diffusion_model." original_state_dict = get_state_dict(torch.load(ckpt_path, map_location="cpu", mmap=True)) transformer = CogVideoXTransformer3DModel( in_channels=32 if i2v else 16, num_layers=num_layers, num_attention_heads=num_attention_heads, use_rotary_positional_embeddings=use_rotary_positional_embeddings, ofs_embed_dim=512 if (i2v and init_kwargs["patch_size_t"] is not None) else None, # CogVideoX1.5-5B-I2V use_learned_positional_embeddings=i2v and init_kwargs["patch_size_t"] is None, # CogVideoX-5B-I2V **init_kwargs, ).to(dtype=dtype) for key in list(original_state_dict.keys()): new_key = key[len(PREFIX_KEY) :] for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict_inplace(original_state_dict, key, new_key) for key in list(original_state_dict.keys()): for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, original_state_dict) transformer.load_state_dict(original_state_dict, strict=True) return transformer def convert_vae(ckpt_path: str, scaling_factor: float, version: str, dtype: torch.dtype): init_kwargs = {"scaling_factor": scaling_factor} if version == "1.5": init_kwargs.update({"invert_scale_latents": True}) original_state_dict = get_state_dict(torch.load(ckpt_path, map_location="cpu", mmap=True)) vae = AutoencoderKLCogVideoX(**init_kwargs).to(dtype=dtype) for key in list(original_state_dict.keys()): new_key = key[:] for replace_key, rename_key in VAE_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict_inplace(original_state_dict, key, new_key) for key in list(original_state_dict.keys()): for special_key, handler_fn_inplace in VAE_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, original_state_dict) vae.load_state_dict(original_state_dict, strict=True) return vae def get_transformer_init_kwargs(version: str): if version == "1.0": vae_scale_factor_spatial = 8 init_kwargs = { "patch_size": 2, "patch_size_t": None, "patch_bias": True, "sample_height": 480 // vae_scale_factor_spatial, "sample_width": 720 // vae_scale_factor_spatial, "sample_frames": 49, } elif version == "1.5": vae_scale_factor_spatial = 8 init_kwargs = { "patch_size": 2, "patch_size_t": 2, "patch_bias": False, "sample_height": 300, "sample_width": 300, "sample_frames": 81, } else: raise ValueError("Unsupported version of CogVideoX.") return init_kwargs def get_args(): parser = argparse.ArgumentParser() parser.add_argument( "--transformer_ckpt_path", type=str, default=None, help="Path to original transformer checkpoint" ) parser.add_argument("--vae_ckpt_path", type=str, default=None, help="Path to original vae checkpoint") parser.add_argument("--output_path", type=str, required=True, help="Path where converted model should be saved") parser.add_argument("--fp16", action="store_true", default=False, help="Whether to save the model weights in fp16") parser.add_argument("--bf16", action="store_true", default=False, help="Whether to save the model weights in bf16") parser.add_argument( "--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving" ) parser.add_argument( "--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory" ) parser.add_argument( "--typecast_text_encoder", action="store_true", default=False, help="Whether or not to apply fp16/bf16 precision to text_encoder", ) # For CogVideoX-2B, num_layers is 30. For 5B, it is 42 parser.add_argument("--num_layers", type=int, default=30, help="Number of transformer blocks") # For CogVideoX-2B, num_attention_heads is 30. For 5B, it is 48 parser.add_argument("--num_attention_heads", type=int, default=30, help="Number of attention heads") # For CogVideoX-2B, use_rotary_positional_embeddings is False. For 5B, it is True parser.add_argument( "--use_rotary_positional_embeddings", action="store_true", default=False, help="Whether to use RoPE or not" ) # For CogVideoX-2B, scaling_factor is 1.15258426. For 5B, it is 0.7 parser.add_argument("--scaling_factor", type=float, default=1.15258426, help="Scaling factor in the VAE") # For CogVideoX-2B, snr_shift_scale is 3.0. For 5B, it is 1.0 parser.add_argument("--snr_shift_scale", type=float, default=3.0, help="Scaling factor in the VAE") parser.add_argument( "--i2v", action="store_true", default=False, help="Whether the model to be converted is the Image-to-Video version of CogVideoX.", ) parser.add_argument( "--version", choices=["1.0", "1.5"], default="1.0", help="Which version of CogVideoX to use for initializing default modeling parameters.", ) return parser.parse_args() if __name__ == "__main__": args = get_args() transformer = None vae = None if args.fp16 and args.bf16: raise ValueError("You cannot pass both --fp16 and --bf16 at the same time.") dtype = torch.float16 if args.fp16 else torch.bfloat16 if args.bf16 else torch.float32 if args.transformer_ckpt_path is not None: init_kwargs = get_transformer_init_kwargs(args.version) transformer = convert_transformer( args.transformer_ckpt_path, args.num_layers, args.num_attention_heads, args.use_rotary_positional_embeddings, args.i2v, dtype, init_kwargs, ) if args.vae_ckpt_path is not None: # Keep VAE in float32 for better quality vae = convert_vae(args.vae_ckpt_path, args.scaling_factor, args.version, torch.float32) text_encoder_id = "google/t5-v1_1-xxl" tokenizer = T5Tokenizer.from_pretrained(text_encoder_id, model_max_length=TOKENIZER_MAX_LENGTH) text_encoder = T5EncoderModel.from_pretrained(text_encoder_id, cache_dir=args.text_encoder_cache_dir) if args.typecast_text_encoder: text_encoder = text_encoder.to(dtype=dtype) # Apparently, the conversion does not work anymore without this :shrug: for param in text_encoder.parameters(): param.data = param.data.contiguous() scheduler = CogVideoXDDIMScheduler.from_config( { "snr_shift_scale": args.snr_shift_scale, "beta_end": 0.012, "beta_schedule": "scaled_linear", "beta_start": 0.00085, "clip_sample": False, "num_train_timesteps": 1000, "prediction_type": "v_prediction", "rescale_betas_zero_snr": True, "set_alpha_to_one": True, "timestep_spacing": "trailing", } ) if args.i2v: pipeline_cls = CogVideoXImageToVideoPipeline else: pipeline_cls = CogVideoXPipeline pipe = pipeline_cls( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler, ) # We don't use variant here because the model must be run in fp16 (2B) or bf16 (5B). It would be weird # for users to specify variant when the default is not fp32 and they want to run with the correct default (which # is either fp16/bf16 here). # This is necessary This is necessary for users with insufficient memory, # such as those using Colab and notebooks, as it can save some memory used for model loading. pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub)
diffusers/scripts/convert_cogvideox_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_cogvideox_to_diffusers.py", "repo_id": "diffusers", "token_count": 5698 }
152
import argparse import re import torch import yaml from transformers import ( CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, protected, renew_attention_paths, renew_resnet_paths, renew_vae_attention_paths, renew_vae_resnet_paths, shave_segments, textenc_conversion_map, textenc_pattern, ) def convert_open_clip_checkpoint(checkpoint): checkpoint = checkpoint["text_encoder"] text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") keys = list(checkpoint.keys()) text_model_dict = {} if "cond_stage_model.model.text_projection" in checkpoint: d_model = int(checkpoint["cond_stage_model.model.text_projection"].shape[0]) else: d_model = 1024 for key in keys: if "resblocks.23" in key: # Diffusers drops the final layer and only uses the penultimate layer continue if key in textenc_conversion_map: text_model_dict[textenc_conversion_map[key]] = checkpoint[key] # if key.startswith("cond_stage_model.model.transformer."): new_key = key[len("transformer.") :] if new_key.endswith(".in_proj_weight"): new_key = new_key[: -len(".in_proj_weight")] new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :] text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :] text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :] elif new_key.endswith(".in_proj_bias"): new_key = new_key[: -len(".in_proj_bias")] new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model] text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2] text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :] else: if key != "transformer.text_model.embeddings.position_ids": new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key] = checkpoint[key] if key == "transformer.text_model.embeddings.token_embedding.weight": text_model_dict["text_model.embeddings.token_embedding.weight"] = checkpoint[key] text_model_dict.pop("text_model.embeddings.transformer.text_model.embeddings.token_embedding.weight") text_model.load_state_dict(text_model_dict) return text_model def convert_gligen_vae_checkpoint(checkpoint, config): checkpoint = checkpoint["autoencoder"] vae_state_dict = {} vae_key = "first_stage_model." keys = list(checkpoint.keys()) for key in keys: vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for key in new_checkpoint.keys(): if "encoder.mid_block.attentions.0" in key or "decoder.mid_block.attentions.0" in key: if "query" in key: new_checkpoint[key.replace("query", "to_q")] = new_checkpoint.pop(key) if "value" in key: new_checkpoint[key.replace("value", "to_v")] = new_checkpoint.pop(key) if "key" in key: new_checkpoint[key.replace("key", "to_k")] = new_checkpoint.pop(key) if "proj_attn" in key: new_checkpoint[key.replace("proj_attn", "to_out.0")] = new_checkpoint.pop(key) return new_checkpoint def convert_gligen_unet_checkpoint(checkpoint, config, path=None, extract_ema=False): unet_state_dict = {} checkpoint = checkpoint["model"] keys = list(checkpoint.keys()) unet_key = "model.diffusion_model." if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: print(f"Checkpoint {path} has bot EMA and non-EMA weights.") print( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: print( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] for key in keys: if "position_net" in key: new_checkpoint[key] = unet_state_dict[key] return new_checkpoint def create_vae_config(original_config, image_size: int): vae_params = original_config["autoencoder"]["params"]["ddconfig"] _ = original_config["autoencoder"]["params"]["embed_dim"] block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) config = { "sample_size": image_size, "in_channels": vae_params["in_channels"], "out_channels": vae_params["out_ch"], "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "latent_channels": vae_params["z_channels"], "layers_per_block": vae_params["num_res_blocks"], } return config def create_unet_config(original_config, image_size: int, attention_type): unet_params = original_config["model"]["params"] vae_params = original_config["autoencoder"]["params"]["ddconfig"] block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" up_block_types.append(block_type) resolution //= 2 vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None use_linear_projection = ( unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False ) if use_linear_projection: if head_dim is None: head_dim = [5, 10, 20, 20] config = { "sample_size": image_size // vae_scale_factor, "in_channels": unet_params["in_channels"], "down_block_types": tuple(down_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params["num_res_blocks"], "cross_attention_dim": unet_params["context_dim"], "attention_head_dim": head_dim, "use_linear_projection": use_linear_projection, "attention_type": attention_type, } return config def convert_gligen_to_diffusers( checkpoint_path: str, original_config_file: str, attention_type: str, image_size: int = 512, extract_ema: bool = False, num_in_channels: int = None, device: str = None, ): if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" checkpoint = torch.load(checkpoint_path, map_location=device) else: checkpoint = torch.load(checkpoint_path, map_location=device) if "global_step" in checkpoint: checkpoint["global_step"] else: print("global_step key not found in model") original_config = yaml.safe_load(original_config_file) if num_in_channels is not None: original_config["model"]["params"]["in_channels"] = num_in_channels num_train_timesteps = original_config["diffusion"]["params"]["timesteps"] beta_start = original_config["diffusion"]["params"]["linear_start"] beta_end = original_config["diffusion"]["params"]["linear_end"] scheduler = DDIMScheduler( beta_end=beta_end, beta_schedule="scaled_linear", beta_start=beta_start, num_train_timesteps=num_train_timesteps, steps_offset=1, clip_sample=False, set_alpha_to_one=False, prediction_type="epsilon", ) # Convert the UNet2DConditionalModel model unet_config = create_unet_config(original_config, image_size, attention_type) unet = UNet2DConditionModel(**unet_config) converted_unet_checkpoint = convert_gligen_unet_checkpoint( checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema ) unet.load_state_dict(converted_unet_checkpoint) # Convert the VAE model vae_config = create_vae_config(original_config, image_size) converted_vae_checkpoint = convert_gligen_vae_checkpoint(checkpoint, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) # Convert the text model text_encoder = convert_open_clip_checkpoint(checkpoint) tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") if attention_type == "gated-text-image": image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") pipe = StableDiffusionGLIGENTextImagePipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, processor=processor, unet=unet, scheduler=scheduler, safety_checker=None, feature_extractor=None, ) elif attention_type == "gated": pipe = StableDiffusionGLIGENPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=None, feature_extractor=None, ) return pipe if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--original_config_file", default=None, type=str, required=True, help="The YAML config file corresponding to the gligen architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--attention_type", default=None, type=str, required=True, help="Type of attention ex: gated or gated-text-image", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") args = parser.parse_args() pipe = convert_gligen_to_diffusers( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, attention_type=args.attention_type, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, device=args.device, ) if args.half: pipe.to(dtype=torch.float16) pipe.save_pretrained(args.dump_path)
diffusers/scripts/convert_gligen_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_gligen_to_diffusers.py", "repo_id": "diffusers", "token_count": 11150 }
153
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the LDM checkpoints.""" import argparse import torch from diffusers import UNet3DConditionModel def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear weight = old_checkpoint[path["old"]] names = ["proj_attn.weight"] names_2 = ["proj_out.weight", "proj_in.weight"] if any(k in new_path for k in names): checkpoint[new_path] = weight[:, :, 0] elif any(k in new_path for k in names_2) and len(weight.shape) > 2 and ".attentions." not in new_path: checkpoint[new_path] = weight[:, :, 0] else: checkpoint[new_path] = weight def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_temp_conv_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: mapping.append({"old": old_item, "new": old_item}) return mapping def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) if "temopral_conv" not in old_item: mapping.append({"old": old_item, "new": new_item}) return mapping def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False): """ Takes a state dict and a config, and returns a converted checkpoint. """ # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) unet_key = "model.diffusion_model." # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: print(f"Checkpoint {path} has both EMA and non-EMA weights.") print( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: print( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] if config["class_embed_type"] is None: # No parameters to port ... elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] else: raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] first_temp_attention = [v for v in unet_state_dict if v.startswith("input_blocks.0.1")] paths = renew_attention_paths(first_temp_attention) meta_path = {"old": "input_blocks.0.1", "new": "transformer_in"} assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] temp_attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.2" in key] if f"input_blocks.{i}.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.op.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) temporal_convs = [key for key in resnets if "temopral_conv" in key] paths = renew_temp_conv_paths(temporal_convs) meta_path = { "old": f"input_blocks.{i}.0.temopral_conv", "new": f"down_blocks.{block_id}.temp_convs.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(temp_attentions): paths = renew_attention_paths(temp_attentions) meta_path = { "old": f"input_blocks.{i}.2", "new": f"down_blocks.{block_id}.temp_attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] temporal_convs_0 = [key for key in resnet_0 if "temopral_conv" in key] attentions = middle_blocks[1] temp_attentions = middle_blocks[2] resnet_1 = middle_blocks[3] temporal_convs_1 = [key for key in resnet_1 if "temopral_conv" in key] resnet_0_paths = renew_resnet_paths(resnet_0) meta_path = {"old": "middle_block.0", "new": "mid_block.resnets.0"} assign_to_checkpoint( resnet_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) temp_conv_0_paths = renew_temp_conv_paths(temporal_convs_0) meta_path = {"old": "middle_block.0.temopral_conv", "new": "mid_block.temp_convs.0"} assign_to_checkpoint( temp_conv_0_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) resnet_1_paths = renew_resnet_paths(resnet_1) meta_path = {"old": "middle_block.3", "new": "mid_block.resnets.1"} assign_to_checkpoint( resnet_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) temp_conv_1_paths = renew_temp_conv_paths(temporal_convs_1) meta_path = {"old": "middle_block.3.temopral_conv", "new": "mid_block.temp_convs.1"} assign_to_checkpoint( temp_conv_1_paths, new_checkpoint, unet_state_dict, config=config, additional_replacements=[meta_path] ) attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) temp_attentions_paths = renew_attention_paths(temp_attentions) meta_path = {"old": "middle_block.2", "new": "mid_block.temp_attentions.0"} assign_to_checkpoint( temp_attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] temp_attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.2" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) temporal_convs = [key for key in resnets if "temopral_conv" in key] paths = renew_temp_conv_paths(temporal_convs) meta_path = { "old": f"output_blocks.{i}.0.temopral_conv", "new": f"up_blocks.{block_id}.temp_convs.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(temp_attentions): paths = renew_attention_paths(temp_attentions) meta_path = { "old": f"output_blocks.{i}.2", "new": f"up_blocks.{block_id}.temp_attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] temopral_conv_paths = [l for l in output_block_layers if "temopral_conv" in l] for path in temopral_conv_paths: pruned_path = path.split("temopral_conv.")[-1] old_path = ".".join(["output_blocks", str(i), str(block_id), "temopral_conv", pruned_path]) new_path = ".".join(["up_blocks", str(block_id), "temp_convs", str(layer_in_block_id), pruned_path]) new_checkpoint[new_path] = unet_state_dict[old_path] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() unet_checkpoint = torch.load(args.checkpoint_path, map_location="cpu") unet = UNet3DConditionModel() converted_ckpt = convert_ldm_unet_checkpoint(unet_checkpoint, unet.config) diff_0 = set(unet.state_dict().keys()) - set(converted_ckpt.keys()) diff_1 = set(converted_ckpt.keys()) - set(unet.state_dict().keys()) assert len(diff_0) == len(diff_1) == 0, "Converted weights don't match" # load state_dict unet.load_state_dict(converted_ckpt) unet.save_pretrained(args.dump_path) # -- finish converting the unet --
diffusers/scripts/convert_ms_text_to_video_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ms_text_to_video_to_diffusers.py", "repo_id": "diffusers", "token_count": 8415 }
154
import argparse import tempfile import torch from accelerate import load_checkpoint_and_dispatch from diffusers.models.transformers.prior_transformer import PriorTransformer from diffusers.pipelines.shap_e import ShapERenderer """ Example - From the diffusers root directory: Download weights: ```sh $ wget "https://openaipublic.azureedge.net/main/shap-e/text_cond.pt" ``` Convert the model: ```sh $ python scripts/convert_shap_e_to_diffusers.py \ --prior_checkpoint_path /home/yiyi_huggingface_co/shap-e/shap_e_model_cache/text_cond.pt \ --prior_image_checkpoint_path /home/yiyi_huggingface_co/shap-e/shap_e_model_cache/image_cond.pt \ --transmitter_checkpoint_path /home/yiyi_huggingface_co/shap-e/shap_e_model_cache/transmitter.pt\ --dump_path /home/yiyi_huggingface_co/model_repo/shap-e-img2img/shap_e_renderer\ --debug renderer ``` """ # prior PRIOR_ORIGINAL_PREFIX = "wrapped" PRIOR_CONFIG = { "num_attention_heads": 16, "attention_head_dim": 1024 // 16, "num_layers": 24, "embedding_dim": 1024, "num_embeddings": 1024, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, "time_embed_dim": 1024 * 4, "embedding_proj_dim": 768, "clip_embed_dim": 1024 * 2, } def prior_model_from_original_config(): model = PriorTransformer(**PRIOR_CONFIG) return model def prior_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} # <original>.time_embed.c_fc -> <diffusers>.time_embedding.linear_1 diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.c_fc.weight"], "time_embedding.linear_1.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.c_fc.bias"], } ) # <original>.time_embed.c_proj -> <diffusers>.time_embedding.linear_2 diffusers_checkpoint.update( { "time_embedding.linear_2.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.c_proj.weight"], "time_embedding.linear_2.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.c_proj.bias"], } ) # <original>.input_proj -> <diffusers>.proj_in diffusers_checkpoint.update( { "proj_in.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.input_proj.weight"], "proj_in.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.input_proj.bias"], } ) # <original>.clip_emb -> <diffusers>.embedding_proj diffusers_checkpoint.update( { "embedding_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_embed.weight"], "embedding_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_embed.bias"], } ) # <original>.pos_emb -> <diffusers>.positional_embedding diffusers_checkpoint.update({"positional_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.pos_emb"][None, :]}) # <original>.ln_pre -> <diffusers>.norm_in diffusers_checkpoint.update( { "norm_in.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.ln_pre.weight"], "norm_in.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.ln_pre.bias"], } ) # <original>.backbone.resblocks.<x> -> <diffusers>.transformer_blocks.<x> for idx in range(len(model.transformer_blocks)): diffusers_transformer_prefix = f"transformer_blocks.{idx}" original_transformer_prefix = f"{PRIOR_ORIGINAL_PREFIX}.backbone.resblocks.{idx}" # <original>.attn -> <diffusers>.attn1 diffusers_attention_prefix = f"{diffusers_transformer_prefix}.attn1" original_attention_prefix = f"{original_transformer_prefix}.attn" diffusers_checkpoint.update( prior_attention_to_diffusers( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, original_attention_prefix=original_attention_prefix, attention_head_dim=model.attention_head_dim, ) ) # <original>.mlp -> <diffusers>.ff diffusers_ff_prefix = f"{diffusers_transformer_prefix}.ff" original_ff_prefix = f"{original_transformer_prefix}.mlp" diffusers_checkpoint.update( prior_ff_to_diffusers( checkpoint, diffusers_ff_prefix=diffusers_ff_prefix, original_ff_prefix=original_ff_prefix ) ) # <original>.ln_1 -> <diffusers>.norm1 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm1.weight": checkpoint[ f"{original_transformer_prefix}.ln_1.weight" ], f"{diffusers_transformer_prefix}.norm1.bias": checkpoint[f"{original_transformer_prefix}.ln_1.bias"], } ) # <original>.ln_2 -> <diffusers>.norm3 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm3.weight": checkpoint[ f"{original_transformer_prefix}.ln_2.weight" ], f"{diffusers_transformer_prefix}.norm3.bias": checkpoint[f"{original_transformer_prefix}.ln_2.bias"], } ) # <original>.ln_post -> <diffusers>.norm_out diffusers_checkpoint.update( { "norm_out.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.ln_post.weight"], "norm_out.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.ln_post.bias"], } ) # <original>.output_proj -> <diffusers>.proj_to_clip_embeddings diffusers_checkpoint.update( { "proj_to_clip_embeddings.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.output_proj.weight"], "proj_to_clip_embeddings.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.output_proj.bias"], } ) return diffusers_checkpoint def prior_attention_to_diffusers( checkpoint, *, diffusers_attention_prefix, original_attention_prefix, attention_head_dim ): diffusers_checkpoint = {} # <original>.c_qkv -> <diffusers>.{to_q, to_k, to_v} [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions( weight=checkpoint[f"{original_attention_prefix}.c_qkv.weight"], bias=checkpoint[f"{original_attention_prefix}.c_qkv.bias"], split=3, chunk_size=attention_head_dim, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_q.weight": q_weight, f"{diffusers_attention_prefix}.to_q.bias": q_bias, f"{diffusers_attention_prefix}.to_k.weight": k_weight, f"{diffusers_attention_prefix}.to_k.bias": k_bias, f"{diffusers_attention_prefix}.to_v.weight": v_weight, f"{diffusers_attention_prefix}.to_v.bias": v_bias, } ) # <original>.c_proj -> <diffusers>.to_out.0 diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{original_attention_prefix}.c_proj.weight"], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{original_attention_prefix}.c_proj.bias"], } ) return diffusers_checkpoint def prior_ff_to_diffusers(checkpoint, *, diffusers_ff_prefix, original_ff_prefix): diffusers_checkpoint = { # <original>.c_fc -> <diffusers>.net.0.proj f"{diffusers_ff_prefix}.net.{0}.proj.weight": checkpoint[f"{original_ff_prefix}.c_fc.weight"], f"{diffusers_ff_prefix}.net.{0}.proj.bias": checkpoint[f"{original_ff_prefix}.c_fc.bias"], # <original>.c_proj -> <diffusers>.net.2 f"{diffusers_ff_prefix}.net.{2}.weight": checkpoint[f"{original_ff_prefix}.c_proj.weight"], f"{diffusers_ff_prefix}.net.{2}.bias": checkpoint[f"{original_ff_prefix}.c_proj.bias"], } return diffusers_checkpoint # done prior # prior_image (only slightly different from prior) PRIOR_IMAGE_ORIGINAL_PREFIX = "wrapped" # Uses default arguments PRIOR_IMAGE_CONFIG = { "num_attention_heads": 8, "attention_head_dim": 1024 // 8, "num_layers": 24, "embedding_dim": 1024, "num_embeddings": 1024, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, "time_embed_dim": 1024 * 4, "embedding_proj_dim": 1024, "clip_embed_dim": 1024 * 2, } def prior_image_model_from_original_config(): model = PriorTransformer(**PRIOR_IMAGE_CONFIG) return model def prior_image_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} # <original>.time_embed.c_fc -> <diffusers>.time_embedding.linear_1 diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.time_embed.c_fc.weight"], "time_embedding.linear_1.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.time_embed.c_fc.bias"], } ) # <original>.time_embed.c_proj -> <diffusers>.time_embedding.linear_2 diffusers_checkpoint.update( { "time_embedding.linear_2.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.time_embed.c_proj.weight"], "time_embedding.linear_2.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.time_embed.c_proj.bias"], } ) # <original>.input_proj -> <diffusers>.proj_in diffusers_checkpoint.update( { "proj_in.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.input_proj.weight"], "proj_in.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.input_proj.bias"], } ) # <original>.clip_embed.0 -> <diffusers>.embedding_proj_norm diffusers_checkpoint.update( { "embedding_proj_norm.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.clip_embed.0.weight"], "embedding_proj_norm.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.clip_embed.0.bias"], } ) # <original>..clip_embed.1 -> <diffusers>.embedding_proj diffusers_checkpoint.update( { "embedding_proj.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.clip_embed.1.weight"], "embedding_proj.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.clip_embed.1.bias"], } ) # <original>.pos_emb -> <diffusers>.positional_embedding diffusers_checkpoint.update( {"positional_embedding": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.pos_emb"][None, :]} ) # <original>.ln_pre -> <diffusers>.norm_in diffusers_checkpoint.update( { "norm_in.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.ln_pre.weight"], "norm_in.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.ln_pre.bias"], } ) # <original>.backbone.resblocks.<x> -> <diffusers>.transformer_blocks.<x> for idx in range(len(model.transformer_blocks)): diffusers_transformer_prefix = f"transformer_blocks.{idx}" original_transformer_prefix = f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.backbone.resblocks.{idx}" # <original>.attn -> <diffusers>.attn1 diffusers_attention_prefix = f"{diffusers_transformer_prefix}.attn1" original_attention_prefix = f"{original_transformer_prefix}.attn" diffusers_checkpoint.update( prior_attention_to_diffusers( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, original_attention_prefix=original_attention_prefix, attention_head_dim=model.attention_head_dim, ) ) # <original>.mlp -> <diffusers>.ff diffusers_ff_prefix = f"{diffusers_transformer_prefix}.ff" original_ff_prefix = f"{original_transformer_prefix}.mlp" diffusers_checkpoint.update( prior_ff_to_diffusers( checkpoint, diffusers_ff_prefix=diffusers_ff_prefix, original_ff_prefix=original_ff_prefix ) ) # <original>.ln_1 -> <diffusers>.norm1 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm1.weight": checkpoint[ f"{original_transformer_prefix}.ln_1.weight" ], f"{diffusers_transformer_prefix}.norm1.bias": checkpoint[f"{original_transformer_prefix}.ln_1.bias"], } ) # <original>.ln_2 -> <diffusers>.norm3 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm3.weight": checkpoint[ f"{original_transformer_prefix}.ln_2.weight" ], f"{diffusers_transformer_prefix}.norm3.bias": checkpoint[f"{original_transformer_prefix}.ln_2.bias"], } ) # <original>.ln_post -> <diffusers>.norm_out diffusers_checkpoint.update( { "norm_out.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.ln_post.weight"], "norm_out.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.ln_post.bias"], } ) # <original>.output_proj -> <diffusers>.proj_to_clip_embeddings diffusers_checkpoint.update( { "proj_to_clip_embeddings.weight": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.output_proj.weight"], "proj_to_clip_embeddings.bias": checkpoint[f"{PRIOR_IMAGE_ORIGINAL_PREFIX}.output_proj.bias"], } ) return diffusers_checkpoint # done prior_image # renderer ## create the lookup table for marching cubes method used in MeshDecoder MC_TABLE = [ [], [[0, 1, 0, 2, 0, 4]], [[1, 0, 1, 5, 1, 3]], [[0, 4, 1, 5, 0, 2], [1, 5, 1, 3, 0, 2]], [[2, 0, 2, 3, 2, 6]], [[0, 1, 2, 3, 0, 4], [2, 3, 2, 6, 0, 4]], [[1, 0, 1, 5, 1, 3], [2, 6, 0, 2, 3, 2]], [[3, 2, 2, 6, 3, 1], [3, 1, 2, 6, 1, 5], [1, 5, 2, 6, 0, 4]], [[3, 1, 3, 7, 3, 2]], [[0, 2, 0, 4, 0, 1], [3, 7, 2, 3, 1, 3]], [[1, 5, 3, 7, 1, 0], [3, 7, 3, 2, 1, 0]], [[2, 0, 0, 4, 2, 3], [2, 3, 0, 4, 3, 7], [3, 7, 0, 4, 1, 5]], [[2, 0, 3, 1, 2, 6], [3, 1, 3, 7, 2, 6]], [[1, 3, 3, 7, 1, 0], [1, 0, 3, 7, 0, 4], [0, 4, 3, 7, 2, 6]], [[0, 1, 1, 5, 0, 2], [0, 2, 1, 5, 2, 6], [2, 6, 1, 5, 3, 7]], [[0, 4, 1, 5, 3, 7], [0, 4, 3, 7, 2, 6]], [[4, 0, 4, 6, 4, 5]], [[0, 2, 4, 6, 0, 1], [4, 6, 4, 5, 0, 1]], [[1, 5, 1, 3, 1, 0], [4, 6, 5, 4, 0, 4]], [[5, 1, 1, 3, 5, 4], [5, 4, 1, 3, 4, 6], [4, 6, 1, 3, 0, 2]], [[2, 0, 2, 3, 2, 6], [4, 5, 0, 4, 6, 4]], [[6, 4, 4, 5, 6, 2], [6, 2, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1]], [[2, 6, 2, 0, 3, 2], [1, 0, 1, 5, 3, 1], [6, 4, 5, 4, 0, 4]], [[1, 3, 5, 4, 1, 5], [1, 3, 4, 6, 5, 4], [1, 3, 3, 2, 4, 6], [3, 2, 2, 6, 4, 6]], [[3, 1, 3, 7, 3, 2], [6, 4, 5, 4, 0, 4]], [[4, 5, 0, 1, 4, 6], [0, 1, 0, 2, 4, 6], [7, 3, 2, 3, 1, 3]], [[3, 2, 1, 0, 3, 7], [1, 0, 1, 5, 3, 7], [6, 4, 5, 4, 0, 4]], [[3, 7, 3, 2, 1, 5], [3, 2, 6, 4, 1, 5], [1, 5, 6, 4, 5, 4], [3, 2, 2, 0, 6, 4]], [[3, 7, 2, 6, 3, 1], [2, 6, 2, 0, 3, 1], [5, 4, 0, 4, 6, 4]], [[1, 0, 1, 3, 5, 4], [1, 3, 2, 6, 5, 4], [1, 3, 3, 7, 2, 6], [5, 4, 2, 6, 4, 6]], [[0, 1, 1, 5, 0, 2], [0, 2, 1, 5, 2, 6], [2, 6, 1, 5, 3, 7], [4, 5, 0, 4, 4, 6]], [[6, 2, 4, 6, 4, 5], [4, 5, 5, 1, 6, 2], [6, 2, 5, 1, 7, 3]], [[5, 1, 5, 4, 5, 7]], [[0, 1, 0, 2, 0, 4], [5, 7, 1, 5, 4, 5]], [[1, 0, 5, 4, 1, 3], [5, 4, 5, 7, 1, 3]], [[4, 5, 5, 7, 4, 0], [4, 0, 5, 7, 0, 2], [0, 2, 5, 7, 1, 3]], [[2, 0, 2, 3, 2, 6], [7, 5, 1, 5, 4, 5]], [[2, 6, 0, 4, 2, 3], [0, 4, 0, 1, 2, 3], [7, 5, 1, 5, 4, 5]], [[5, 7, 1, 3, 5, 4], [1, 3, 1, 0, 5, 4], [6, 2, 0, 2, 3, 2]], [[3, 1, 3, 2, 7, 5], [3, 2, 0, 4, 7, 5], [3, 2, 2, 6, 0, 4], [7, 5, 0, 4, 5, 4]], [[3, 7, 3, 2, 3, 1], [5, 4, 7, 5, 1, 5]], [[0, 4, 0, 1, 2, 0], [3, 1, 3, 7, 2, 3], [4, 5, 7, 5, 1, 5]], [[7, 3, 3, 2, 7, 5], [7, 5, 3, 2, 5, 4], [5, 4, 3, 2, 1, 0]], [[0, 4, 2, 3, 0, 2], [0, 4, 3, 7, 2, 3], [0, 4, 4, 5, 3, 7], [4, 5, 5, 7, 3, 7]], [[2, 0, 3, 1, 2, 6], [3, 1, 3, 7, 2, 6], [4, 5, 7, 5, 1, 5]], [[1, 3, 3, 7, 1, 0], [1, 0, 3, 7, 0, 4], [0, 4, 3, 7, 2, 6], [5, 7, 1, 5, 5, 4]], [[2, 6, 2, 0, 3, 7], [2, 0, 4, 5, 3, 7], [3, 7, 4, 5, 7, 5], [2, 0, 0, 1, 4, 5]], [[4, 0, 5, 4, 5, 7], [5, 7, 7, 3, 4, 0], [4, 0, 7, 3, 6, 2]], [[4, 6, 5, 7, 4, 0], [5, 7, 5, 1, 4, 0]], [[1, 0, 0, 2, 1, 5], [1, 5, 0, 2, 5, 7], [5, 7, 0, 2, 4, 6]], [[0, 4, 4, 6, 0, 1], [0, 1, 4, 6, 1, 3], [1, 3, 4, 6, 5, 7]], [[0, 2, 4, 6, 5, 7], [0, 2, 5, 7, 1, 3]], [[5, 1, 4, 0, 5, 7], [4, 0, 4, 6, 5, 7], [3, 2, 6, 2, 0, 2]], [[2, 3, 2, 6, 0, 1], [2, 6, 7, 5, 0, 1], [0, 1, 7, 5, 1, 5], [2, 6, 6, 4, 7, 5]], [[0, 4, 4, 6, 0, 1], [0, 1, 4, 6, 1, 3], [1, 3, 4, 6, 5, 7], [2, 6, 0, 2, 2, 3]], [[3, 1, 2, 3, 2, 6], [2, 6, 6, 4, 3, 1], [3, 1, 6, 4, 7, 5]], [[4, 6, 5, 7, 4, 0], [5, 7, 5, 1, 4, 0], [2, 3, 1, 3, 7, 3]], [[1, 0, 0, 2, 1, 5], [1, 5, 0, 2, 5, 7], [5, 7, 0, 2, 4, 6], [3, 2, 1, 3, 3, 7]], [[0, 1, 0, 4, 2, 3], [0, 4, 5, 7, 2, 3], [0, 4, 4, 6, 5, 7], [2, 3, 5, 7, 3, 7]], [[7, 5, 3, 7, 3, 2], [3, 2, 2, 0, 7, 5], [7, 5, 2, 0, 6, 4]], [[0, 4, 4, 6, 5, 7], [0, 4, 5, 7, 1, 5], [0, 2, 1, 3, 3, 7], [3, 7, 2, 6, 0, 2]], [ [3, 1, 7, 3, 6, 2], [6, 2, 0, 1, 3, 1], [6, 4, 0, 1, 6, 2], [6, 4, 5, 1, 0, 1], [6, 4, 7, 5, 5, 1], ], [ [4, 0, 6, 4, 7, 5], [7, 5, 1, 0, 4, 0], [7, 3, 1, 0, 7, 5], [7, 3, 2, 0, 1, 0], [7, 3, 6, 2, 2, 0], ], [[7, 3, 6, 2, 6, 4], [7, 5, 7, 3, 6, 4]], [[6, 2, 6, 7, 6, 4]], [[0, 4, 0, 1, 0, 2], [6, 7, 4, 6, 2, 6]], [[1, 0, 1, 5, 1, 3], [7, 6, 4, 6, 2, 6]], [[1, 3, 0, 2, 1, 5], [0, 2, 0, 4, 1, 5], [7, 6, 4, 6, 2, 6]], [[2, 3, 6, 7, 2, 0], [6, 7, 6, 4, 2, 0]], [[4, 0, 0, 1, 4, 6], [4, 6, 0, 1, 6, 7], [6, 7, 0, 1, 2, 3]], [[6, 4, 2, 0, 6, 7], [2, 0, 2, 3, 6, 7], [5, 1, 3, 1, 0, 1]], [[1, 5, 1, 3, 0, 4], [1, 3, 7, 6, 0, 4], [0, 4, 7, 6, 4, 6], [1, 3, 3, 2, 7, 6]], [[3, 2, 3, 1, 3, 7], [6, 4, 2, 6, 7, 6]], [[3, 7, 3, 2, 1, 3], [0, 2, 0, 4, 1, 0], [7, 6, 4, 6, 2, 6]], [[1, 5, 3, 7, 1, 0], [3, 7, 3, 2, 1, 0], [4, 6, 2, 6, 7, 6]], [[2, 0, 0, 4, 2, 3], [2, 3, 0, 4, 3, 7], [3, 7, 0, 4, 1, 5], [6, 4, 2, 6, 6, 7]], [[7, 6, 6, 4, 7, 3], [7, 3, 6, 4, 3, 1], [3, 1, 6, 4, 2, 0]], [[0, 1, 4, 6, 0, 4], [0, 1, 6, 7, 4, 6], [0, 1, 1, 3, 6, 7], [1, 3, 3, 7, 6, 7]], [[0, 2, 0, 1, 4, 6], [0, 1, 3, 7, 4, 6], [0, 1, 1, 5, 3, 7], [4, 6, 3, 7, 6, 7]], [[7, 3, 6, 7, 6, 4], [6, 4, 4, 0, 7, 3], [7, 3, 4, 0, 5, 1]], [[4, 0, 6, 2, 4, 5], [6, 2, 6, 7, 4, 5]], [[2, 6, 6, 7, 2, 0], [2, 0, 6, 7, 0, 1], [0, 1, 6, 7, 4, 5]], [[6, 7, 4, 5, 6, 2], [4, 5, 4, 0, 6, 2], [3, 1, 0, 1, 5, 1]], [[2, 0, 2, 6, 3, 1], [2, 6, 4, 5, 3, 1], [2, 6, 6, 7, 4, 5], [3, 1, 4, 5, 1, 5]], [[0, 2, 2, 3, 0, 4], [0, 4, 2, 3, 4, 5], [4, 5, 2, 3, 6, 7]], [[0, 1, 2, 3, 6, 7], [0, 1, 6, 7, 4, 5]], [[0, 2, 2, 3, 0, 4], [0, 4, 2, 3, 4, 5], [4, 5, 2, 3, 6, 7], [1, 3, 0, 1, 1, 5]], [[5, 4, 1, 5, 1, 3], [1, 3, 3, 2, 5, 4], [5, 4, 3, 2, 7, 6]], [[4, 0, 6, 2, 4, 5], [6, 2, 6, 7, 4, 5], [1, 3, 7, 3, 2, 3]], [[2, 6, 6, 7, 2, 0], [2, 0, 6, 7, 0, 1], [0, 1, 6, 7, 4, 5], [3, 7, 2, 3, 3, 1]], [[0, 1, 1, 5, 3, 7], [0, 1, 3, 7, 2, 3], [0, 4, 2, 6, 6, 7], [6, 7, 4, 5, 0, 4]], [ [6, 2, 7, 6, 5, 4], [5, 4, 0, 2, 6, 2], [5, 1, 0, 2, 5, 4], [5, 1, 3, 2, 0, 2], [5, 1, 7, 3, 3, 2], ], [[3, 1, 3, 7, 2, 0], [3, 7, 5, 4, 2, 0], [2, 0, 5, 4, 0, 4], [3, 7, 7, 6, 5, 4]], [[1, 0, 3, 1, 3, 7], [3, 7, 7, 6, 1, 0], [1, 0, 7, 6, 5, 4]], [ [1, 0, 5, 1, 7, 3], [7, 3, 2, 0, 1, 0], [7, 6, 2, 0, 7, 3], [7, 6, 4, 0, 2, 0], [7, 6, 5, 4, 4, 0], ], [[7, 6, 5, 4, 5, 1], [7, 3, 7, 6, 5, 1]], [[5, 7, 5, 1, 5, 4], [6, 2, 7, 6, 4, 6]], [[0, 2, 0, 4, 1, 0], [5, 4, 5, 7, 1, 5], [2, 6, 7, 6, 4, 6]], [[1, 0, 5, 4, 1, 3], [5, 4, 5, 7, 1, 3], [2, 6, 7, 6, 4, 6]], [[4, 5, 5, 7, 4, 0], [4, 0, 5, 7, 0, 2], [0, 2, 5, 7, 1, 3], [6, 7, 4, 6, 6, 2]], [[2, 3, 6, 7, 2, 0], [6, 7, 6, 4, 2, 0], [1, 5, 4, 5, 7, 5]], [[4, 0, 0, 1, 4, 6], [4, 6, 0, 1, 6, 7], [6, 7, 0, 1, 2, 3], [5, 1, 4, 5, 5, 7]], [[0, 2, 2, 3, 6, 7], [0, 2, 6, 7, 4, 6], [0, 1, 4, 5, 5, 7], [5, 7, 1, 3, 0, 1]], [ [5, 4, 7, 5, 3, 1], [3, 1, 0, 4, 5, 4], [3, 2, 0, 4, 3, 1], [3, 2, 6, 4, 0, 4], [3, 2, 7, 6, 6, 4], ], [[5, 4, 5, 7, 1, 5], [3, 7, 3, 2, 1, 3], [4, 6, 2, 6, 7, 6]], [[1, 0, 0, 2, 0, 4], [1, 5, 5, 4, 5, 7], [3, 2, 1, 3, 3, 7], [2, 6, 7, 6, 4, 6]], [[7, 3, 3, 2, 7, 5], [7, 5, 3, 2, 5, 4], [5, 4, 3, 2, 1, 0], [6, 2, 7, 6, 6, 4]], [ [0, 4, 2, 3, 0, 2], [0, 4, 3, 7, 2, 3], [0, 4, 4, 5, 3, 7], [4, 5, 5, 7, 3, 7], [6, 7, 4, 6, 2, 6], ], [[7, 6, 6, 4, 7, 3], [7, 3, 6, 4, 3, 1], [3, 1, 6, 4, 2, 0], [5, 4, 7, 5, 5, 1]], [ [0, 1, 4, 6, 0, 4], [0, 1, 6, 7, 4, 6], [0, 1, 1, 3, 6, 7], [1, 3, 3, 7, 6, 7], [5, 7, 1, 5, 4, 5], ], [ [6, 7, 4, 6, 0, 2], [0, 2, 3, 7, 6, 7], [0, 1, 3, 7, 0, 2], [0, 1, 5, 7, 3, 7], [0, 1, 4, 5, 5, 7], ], [[4, 0, 6, 7, 4, 6], [4, 0, 7, 3, 6, 7], [4, 0, 5, 7, 7, 3], [4, 5, 5, 7, 4, 0]], [[7, 5, 5, 1, 7, 6], [7, 6, 5, 1, 6, 2], [6, 2, 5, 1, 4, 0]], [[0, 2, 1, 5, 0, 1], [0, 2, 5, 7, 1, 5], [0, 2, 2, 6, 5, 7], [2, 6, 6, 7, 5, 7]], [[1, 3, 1, 0, 5, 7], [1, 0, 2, 6, 5, 7], [5, 7, 2, 6, 7, 6], [1, 0, 0, 4, 2, 6]], [[2, 0, 6, 2, 6, 7], [6, 7, 7, 5, 2, 0], [2, 0, 7, 5, 3, 1]], [[0, 4, 0, 2, 1, 5], [0, 2, 6, 7, 1, 5], [0, 2, 2, 3, 6, 7], [1, 5, 6, 7, 5, 7]], [[7, 6, 5, 7, 5, 1], [5, 1, 1, 0, 7, 6], [7, 6, 1, 0, 3, 2]], [ [2, 0, 3, 2, 7, 6], [7, 6, 4, 0, 2, 0], [7, 5, 4, 0, 7, 6], [7, 5, 1, 0, 4, 0], [7, 5, 3, 1, 1, 0], ], [[7, 5, 3, 1, 3, 2], [7, 6, 7, 5, 3, 2]], [[7, 5, 5, 1, 7, 6], [7, 6, 5, 1, 6, 2], [6, 2, 5, 1, 4, 0], [3, 1, 7, 3, 3, 2]], [ [0, 2, 1, 5, 0, 1], [0, 2, 5, 7, 1, 5], [0, 2, 2, 6, 5, 7], [2, 6, 6, 7, 5, 7], [3, 7, 2, 3, 1, 3], ], [ [3, 7, 2, 3, 0, 1], [0, 1, 5, 7, 3, 7], [0, 4, 5, 7, 0, 1], [0, 4, 6, 7, 5, 7], [0, 4, 2, 6, 6, 7], ], [[2, 0, 3, 7, 2, 3], [2, 0, 7, 5, 3, 7], [2, 0, 6, 7, 7, 5], [2, 6, 6, 7, 2, 0]], [ [5, 7, 1, 5, 0, 4], [0, 4, 6, 7, 5, 7], [0, 2, 6, 7, 0, 4], [0, 2, 3, 7, 6, 7], [0, 2, 1, 3, 3, 7], ], [[1, 0, 5, 7, 1, 5], [1, 0, 7, 6, 5, 7], [1, 0, 3, 7, 7, 6], [1, 3, 3, 7, 1, 0]], [[0, 2, 0, 1, 0, 4], [3, 7, 6, 7, 5, 7]], [[7, 5, 7, 3, 7, 6]], [[7, 3, 7, 5, 7, 6]], [[0, 1, 0, 2, 0, 4], [6, 7, 3, 7, 5, 7]], [[1, 3, 1, 0, 1, 5], [7, 6, 3, 7, 5, 7]], [[0, 4, 1, 5, 0, 2], [1, 5, 1, 3, 0, 2], [6, 7, 3, 7, 5, 7]], [[2, 6, 2, 0, 2, 3], [7, 5, 6, 7, 3, 7]], [[0, 1, 2, 3, 0, 4], [2, 3, 2, 6, 0, 4], [5, 7, 6, 7, 3, 7]], [[1, 5, 1, 3, 0, 1], [2, 3, 2, 6, 0, 2], [5, 7, 6, 7, 3, 7]], [[3, 2, 2, 6, 3, 1], [3, 1, 2, 6, 1, 5], [1, 5, 2, 6, 0, 4], [7, 6, 3, 7, 7, 5]], [[3, 1, 7, 5, 3, 2], [7, 5, 7, 6, 3, 2]], [[7, 6, 3, 2, 7, 5], [3, 2, 3, 1, 7, 5], [4, 0, 1, 0, 2, 0]], [[5, 7, 7, 6, 5, 1], [5, 1, 7, 6, 1, 0], [1, 0, 7, 6, 3, 2]], [[2, 3, 2, 0, 6, 7], [2, 0, 1, 5, 6, 7], [2, 0, 0, 4, 1, 5], [6, 7, 1, 5, 7, 5]], [[6, 2, 2, 0, 6, 7], [6, 7, 2, 0, 7, 5], [7, 5, 2, 0, 3, 1]], [[0, 4, 0, 1, 2, 6], [0, 1, 5, 7, 2, 6], [2, 6, 5, 7, 6, 7], [0, 1, 1, 3, 5, 7]], [[1, 5, 0, 2, 1, 0], [1, 5, 2, 6, 0, 2], [1, 5, 5, 7, 2, 6], [5, 7, 7, 6, 2, 6]], [[5, 1, 7, 5, 7, 6], [7, 6, 6, 2, 5, 1], [5, 1, 6, 2, 4, 0]], [[4, 5, 4, 0, 4, 6], [7, 3, 5, 7, 6, 7]], [[0, 2, 4, 6, 0, 1], [4, 6, 4, 5, 0, 1], [3, 7, 5, 7, 6, 7]], [[4, 6, 4, 5, 0, 4], [1, 5, 1, 3, 0, 1], [6, 7, 3, 7, 5, 7]], [[5, 1, 1, 3, 5, 4], [5, 4, 1, 3, 4, 6], [4, 6, 1, 3, 0, 2], [7, 3, 5, 7, 7, 6]], [[2, 3, 2, 6, 0, 2], [4, 6, 4, 5, 0, 4], [3, 7, 5, 7, 6, 7]], [[6, 4, 4, 5, 6, 2], [6, 2, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [7, 5, 6, 7, 7, 3]], [[0, 1, 1, 5, 1, 3], [0, 2, 2, 3, 2, 6], [4, 5, 0, 4, 4, 6], [5, 7, 6, 7, 3, 7]], [ [1, 3, 5, 4, 1, 5], [1, 3, 4, 6, 5, 4], [1, 3, 3, 2, 4, 6], [3, 2, 2, 6, 4, 6], [7, 6, 3, 7, 5, 7], ], [[3, 1, 7, 5, 3, 2], [7, 5, 7, 6, 3, 2], [0, 4, 6, 4, 5, 4]], [[1, 0, 0, 2, 4, 6], [1, 0, 4, 6, 5, 4], [1, 3, 5, 7, 7, 6], [7, 6, 3, 2, 1, 3]], [[5, 7, 7, 6, 5, 1], [5, 1, 7, 6, 1, 0], [1, 0, 7, 6, 3, 2], [4, 6, 5, 4, 4, 0]], [ [7, 5, 6, 7, 2, 3], [2, 3, 1, 5, 7, 5], [2, 0, 1, 5, 2, 3], [2, 0, 4, 5, 1, 5], [2, 0, 6, 4, 4, 5], ], [[6, 2, 2, 0, 6, 7], [6, 7, 2, 0, 7, 5], [7, 5, 2, 0, 3, 1], [4, 0, 6, 4, 4, 5]], [ [4, 6, 5, 4, 1, 0], [1, 0, 2, 6, 4, 6], [1, 3, 2, 6, 1, 0], [1, 3, 7, 6, 2, 6], [1, 3, 5, 7, 7, 6], ], [ [1, 5, 0, 2, 1, 0], [1, 5, 2, 6, 0, 2], [1, 5, 5, 7, 2, 6], [5, 7, 7, 6, 2, 6], [4, 6, 5, 4, 0, 4], ], [[5, 1, 4, 6, 5, 4], [5, 1, 6, 2, 4, 6], [5, 1, 7, 6, 6, 2], [5, 7, 7, 6, 5, 1]], [[5, 4, 7, 6, 5, 1], [7, 6, 7, 3, 5, 1]], [[7, 3, 5, 1, 7, 6], [5, 1, 5, 4, 7, 6], [2, 0, 4, 0, 1, 0]], [[3, 1, 1, 0, 3, 7], [3, 7, 1, 0, 7, 6], [7, 6, 1, 0, 5, 4]], [[0, 2, 0, 4, 1, 3], [0, 4, 6, 7, 1, 3], [1, 3, 6, 7, 3, 7], [0, 4, 4, 5, 6, 7]], [[5, 4, 7, 6, 5, 1], [7, 6, 7, 3, 5, 1], [0, 2, 3, 2, 6, 2]], [[1, 5, 5, 4, 7, 6], [1, 5, 7, 6, 3, 7], [1, 0, 3, 2, 2, 6], [2, 6, 0, 4, 1, 0]], [[3, 1, 1, 0, 3, 7], [3, 7, 1, 0, 7, 6], [7, 6, 1, 0, 5, 4], [2, 0, 3, 2, 2, 6]], [ [2, 3, 6, 2, 4, 0], [4, 0, 1, 3, 2, 3], [4, 5, 1, 3, 4, 0], [4, 5, 7, 3, 1, 3], [4, 5, 6, 7, 7, 3], ], [[1, 5, 5, 4, 1, 3], [1, 3, 5, 4, 3, 2], [3, 2, 5, 4, 7, 6]], [[1, 5, 5, 4, 1, 3], [1, 3, 5, 4, 3, 2], [3, 2, 5, 4, 7, 6], [0, 4, 1, 0, 0, 2]], [[1, 0, 5, 4, 7, 6], [1, 0, 7, 6, 3, 2]], [[2, 3, 0, 2, 0, 4], [0, 4, 4, 5, 2, 3], [2, 3, 4, 5, 6, 7]], [[1, 3, 1, 5, 0, 2], [1, 5, 7, 6, 0, 2], [1, 5, 5, 4, 7, 6], [0, 2, 7, 6, 2, 6]], [ [5, 1, 4, 5, 6, 7], [6, 7, 3, 1, 5, 1], [6, 2, 3, 1, 6, 7], [6, 2, 0, 1, 3, 1], [6, 2, 4, 0, 0, 1], ], [[6, 7, 2, 6, 2, 0], [2, 0, 0, 1, 6, 7], [6, 7, 0, 1, 4, 5]], [[6, 2, 4, 0, 4, 5], [6, 7, 6, 2, 4, 5]], [[6, 7, 7, 3, 6, 4], [6, 4, 7, 3, 4, 0], [4, 0, 7, 3, 5, 1]], [[1, 5, 1, 0, 3, 7], [1, 0, 4, 6, 3, 7], [1, 0, 0, 2, 4, 6], [3, 7, 4, 6, 7, 6]], [[1, 0, 3, 7, 1, 3], [1, 0, 7, 6, 3, 7], [1, 0, 0, 4, 7, 6], [0, 4, 4, 6, 7, 6]], [[6, 4, 7, 6, 7, 3], [7, 3, 3, 1, 6, 4], [6, 4, 3, 1, 2, 0]], [[6, 7, 7, 3, 6, 4], [6, 4, 7, 3, 4, 0], [4, 0, 7, 3, 5, 1], [2, 3, 6, 2, 2, 0]], [ [7, 6, 3, 7, 1, 5], [1, 5, 4, 6, 7, 6], [1, 0, 4, 6, 1, 5], [1, 0, 2, 6, 4, 6], [1, 0, 3, 2, 2, 6], ], [ [1, 0, 3, 7, 1, 3], [1, 0, 7, 6, 3, 7], [1, 0, 0, 4, 7, 6], [0, 4, 4, 6, 7, 6], [2, 6, 0, 2, 3, 2], ], [[3, 1, 7, 6, 3, 7], [3, 1, 6, 4, 7, 6], [3, 1, 2, 6, 6, 4], [3, 2, 2, 6, 3, 1]], [[3, 2, 3, 1, 7, 6], [3, 1, 0, 4, 7, 6], [7, 6, 0, 4, 6, 4], [3, 1, 1, 5, 0, 4]], [ [0, 1, 2, 0, 6, 4], [6, 4, 5, 1, 0, 1], [6, 7, 5, 1, 6, 4], [6, 7, 3, 1, 5, 1], [6, 7, 2, 3, 3, 1], ], [[0, 1, 4, 0, 4, 6], [4, 6, 6, 7, 0, 1], [0, 1, 6, 7, 2, 3]], [[6, 7, 2, 3, 2, 0], [6, 4, 6, 7, 2, 0]], [ [2, 6, 0, 2, 1, 3], [1, 3, 7, 6, 2, 6], [1, 5, 7, 6, 1, 3], [1, 5, 4, 6, 7, 6], [1, 5, 0, 4, 4, 6], ], [[1, 5, 1, 0, 1, 3], [4, 6, 7, 6, 2, 6]], [[0, 1, 2, 6, 0, 2], [0, 1, 6, 7, 2, 6], [0, 1, 4, 6, 6, 7], [0, 4, 4, 6, 0, 1]], [[6, 7, 6, 2, 6, 4]], [[6, 2, 7, 3, 6, 4], [7, 3, 7, 5, 6, 4]], [[7, 5, 6, 4, 7, 3], [6, 4, 6, 2, 7, 3], [1, 0, 2, 0, 4, 0]], [[6, 2, 7, 3, 6, 4], [7, 3, 7, 5, 6, 4], [0, 1, 5, 1, 3, 1]], [[2, 0, 0, 4, 1, 5], [2, 0, 1, 5, 3, 1], [2, 6, 3, 7, 7, 5], [7, 5, 6, 4, 2, 6]], [[3, 7, 7, 5, 3, 2], [3, 2, 7, 5, 2, 0], [2, 0, 7, 5, 6, 4]], [[3, 2, 3, 7, 1, 0], [3, 7, 6, 4, 1, 0], [3, 7, 7, 5, 6, 4], [1, 0, 6, 4, 0, 4]], [[3, 7, 7, 5, 3, 2], [3, 2, 7, 5, 2, 0], [2, 0, 7, 5, 6, 4], [1, 5, 3, 1, 1, 0]], [ [7, 3, 5, 7, 4, 6], [4, 6, 2, 3, 7, 3], [4, 0, 2, 3, 4, 6], [4, 0, 1, 3, 2, 3], [4, 0, 5, 1, 1, 3], ], [[2, 3, 3, 1, 2, 6], [2, 6, 3, 1, 6, 4], [6, 4, 3, 1, 7, 5]], [[2, 3, 3, 1, 2, 6], [2, 6, 3, 1, 6, 4], [6, 4, 3, 1, 7, 5], [0, 1, 2, 0, 0, 4]], [[1, 0, 1, 5, 3, 2], [1, 5, 4, 6, 3, 2], [3, 2, 4, 6, 2, 6], [1, 5, 5, 7, 4, 6]], [ [0, 2, 4, 0, 5, 1], [5, 1, 3, 2, 0, 2], [5, 7, 3, 2, 5, 1], [5, 7, 6, 2, 3, 2], [5, 7, 4, 6, 6, 2], ], [[2, 0, 3, 1, 7, 5], [2, 0, 7, 5, 6, 4]], [[4, 6, 0, 4, 0, 1], [0, 1, 1, 3, 4, 6], [4, 6, 1, 3, 5, 7]], [[0, 2, 1, 0, 1, 5], [1, 5, 5, 7, 0, 2], [0, 2, 5, 7, 4, 6]], [[5, 7, 4, 6, 4, 0], [5, 1, 5, 7, 4, 0]], [[5, 4, 4, 0, 5, 7], [5, 7, 4, 0, 7, 3], [7, 3, 4, 0, 6, 2]], [[0, 1, 0, 2, 4, 5], [0, 2, 3, 7, 4, 5], [4, 5, 3, 7, 5, 7], [0, 2, 2, 6, 3, 7]], [[5, 4, 4, 0, 5, 7], [5, 7, 4, 0, 7, 3], [7, 3, 4, 0, 6, 2], [1, 0, 5, 1, 1, 3]], [ [1, 5, 3, 1, 2, 0], [2, 0, 4, 5, 1, 5], [2, 6, 4, 5, 2, 0], [2, 6, 7, 5, 4, 5], [2, 6, 3, 7, 7, 5], ], [[2, 3, 0, 4, 2, 0], [2, 3, 4, 5, 0, 4], [2, 3, 3, 7, 4, 5], [3, 7, 7, 5, 4, 5]], [[3, 2, 7, 3, 7, 5], [7, 5, 5, 4, 3, 2], [3, 2, 5, 4, 1, 0]], [ [2, 3, 0, 4, 2, 0], [2, 3, 4, 5, 0, 4], [2, 3, 3, 7, 4, 5], [3, 7, 7, 5, 4, 5], [1, 5, 3, 1, 0, 1], ], [[3, 2, 1, 5, 3, 1], [3, 2, 5, 4, 1, 5], [3, 2, 7, 5, 5, 4], [3, 7, 7, 5, 3, 2]], [[2, 6, 2, 3, 0, 4], [2, 3, 7, 5, 0, 4], [2, 3, 3, 1, 7, 5], [0, 4, 7, 5, 4, 5]], [ [3, 2, 1, 3, 5, 7], [5, 7, 6, 2, 3, 2], [5, 4, 6, 2, 5, 7], [5, 4, 0, 2, 6, 2], [5, 4, 1, 0, 0, 2], ], [ [4, 5, 0, 4, 2, 6], [2, 6, 7, 5, 4, 5], [2, 3, 7, 5, 2, 6], [2, 3, 1, 5, 7, 5], [2, 3, 0, 1, 1, 5], ], [[2, 3, 2, 0, 2, 6], [1, 5, 7, 5, 4, 5]], [[5, 7, 4, 5, 4, 0], [4, 0, 0, 2, 5, 7], [5, 7, 0, 2, 1, 3]], [[5, 4, 1, 0, 1, 3], [5, 7, 5, 4, 1, 3]], [[0, 2, 4, 5, 0, 4], [0, 2, 5, 7, 4, 5], [0, 2, 1, 5, 5, 7], [0, 1, 1, 5, 0, 2]], [[5, 4, 5, 1, 5, 7]], [[4, 6, 6, 2, 4, 5], [4, 5, 6, 2, 5, 1], [5, 1, 6, 2, 7, 3]], [[4, 6, 6, 2, 4, 5], [4, 5, 6, 2, 5, 1], [5, 1, 6, 2, 7, 3], [0, 2, 4, 0, 0, 1]], [[3, 7, 3, 1, 2, 6], [3, 1, 5, 4, 2, 6], [3, 1, 1, 0, 5, 4], [2, 6, 5, 4, 6, 4]], [ [6, 4, 2, 6, 3, 7], [3, 7, 5, 4, 6, 4], [3, 1, 5, 4, 3, 7], [3, 1, 0, 4, 5, 4], [3, 1, 2, 0, 0, 4], ], [[2, 0, 2, 3, 6, 4], [2, 3, 1, 5, 6, 4], [6, 4, 1, 5, 4, 5], [2, 3, 3, 7, 1, 5]], [ [0, 4, 1, 0, 3, 2], [3, 2, 6, 4, 0, 4], [3, 7, 6, 4, 3, 2], [3, 7, 5, 4, 6, 4], [3, 7, 1, 5, 5, 4], ], [ [1, 3, 0, 1, 4, 5], [4, 5, 7, 3, 1, 3], [4, 6, 7, 3, 4, 5], [4, 6, 2, 3, 7, 3], [4, 6, 0, 2, 2, 3], ], [[3, 7, 3, 1, 3, 2], [5, 4, 6, 4, 0, 4]], [[3, 1, 2, 6, 3, 2], [3, 1, 6, 4, 2, 6], [3, 1, 1, 5, 6, 4], [1, 5, 5, 4, 6, 4]], [ [3, 1, 2, 6, 3, 2], [3, 1, 6, 4, 2, 6], [3, 1, 1, 5, 6, 4], [1, 5, 5, 4, 6, 4], [0, 4, 1, 0, 2, 0], ], [[4, 5, 6, 4, 6, 2], [6, 2, 2, 3, 4, 5], [4, 5, 2, 3, 0, 1]], [[2, 3, 6, 4, 2, 6], [2, 3, 4, 5, 6, 4], [2, 3, 0, 4, 4, 5], [2, 0, 0, 4, 2, 3]], [[1, 3, 5, 1, 5, 4], [5, 4, 4, 6, 1, 3], [1, 3, 4, 6, 0, 2]], [[1, 3, 0, 4, 1, 0], [1, 3, 4, 6, 0, 4], [1, 3, 5, 4, 4, 6], [1, 5, 5, 4, 1, 3]], [[4, 6, 0, 2, 0, 1], [4, 5, 4, 6, 0, 1]], [[4, 6, 4, 0, 4, 5]], [[4, 0, 6, 2, 7, 3], [4, 0, 7, 3, 5, 1]], [[1, 5, 0, 1, 0, 2], [0, 2, 2, 6, 1, 5], [1, 5, 2, 6, 3, 7]], [[3, 7, 1, 3, 1, 0], [1, 0, 0, 4, 3, 7], [3, 7, 0, 4, 2, 6]], [[3, 1, 2, 0, 2, 6], [3, 7, 3, 1, 2, 6]], [[0, 4, 2, 0, 2, 3], [2, 3, 3, 7, 0, 4], [0, 4, 3, 7, 1, 5]], [[3, 7, 1, 5, 1, 0], [3, 2, 3, 7, 1, 0]], [[0, 4, 1, 3, 0, 1], [0, 4, 3, 7, 1, 3], [0, 4, 2, 3, 3, 7], [0, 2, 2, 3, 0, 4]], [[3, 7, 3, 1, 3, 2]], [[2, 6, 3, 2, 3, 1], [3, 1, 1, 5, 2, 6], [2, 6, 1, 5, 0, 4]], [[1, 5, 3, 2, 1, 3], [1, 5, 2, 6, 3, 2], [1, 5, 0, 2, 2, 6], [1, 0, 0, 2, 1, 5]], [[2, 3, 0, 1, 0, 4], [2, 6, 2, 3, 0, 4]], [[2, 3, 2, 0, 2, 6]], [[1, 5, 0, 4, 0, 2], [1, 3, 1, 5, 0, 2]], [[1, 5, 1, 0, 1, 3]], [[0, 2, 0, 1, 0, 4]], [], ] def create_mc_lookup_table(): cases = torch.zeros(256, 5, 3, dtype=torch.long) masks = torch.zeros(256, 5, dtype=torch.bool) edge_to_index = { (0, 1): 0, (2, 3): 1, (4, 5): 2, (6, 7): 3, (0, 2): 4, (1, 3): 5, (4, 6): 6, (5, 7): 7, (0, 4): 8, (1, 5): 9, (2, 6): 10, (3, 7): 11, } for i, case in enumerate(MC_TABLE): for j, tri in enumerate(case): for k, (c1, c2) in enumerate(zip(tri[::2], tri[1::2])): cases[i, j, k] = edge_to_index[(c1, c2) if c1 < c2 else (c2, c1)] masks[i, j] = True return cases, masks RENDERER_CONFIG = {} def renderer_model_from_original_config(): model = ShapERenderer(**RENDERER_CONFIG) return model RENDERER_MLP_ORIGINAL_PREFIX = "renderer.nerstf" RENDERER_PARAMS_PROJ_ORIGINAL_PREFIX = "encoder.params_proj" def renderer_model_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( {f"mlp.{k}": checkpoint[f"{RENDERER_MLP_ORIGINAL_PREFIX}.{k}"] for k in model.mlp.state_dict().keys()} ) diffusers_checkpoint.update( { f"params_proj.{k}": checkpoint[f"{RENDERER_PARAMS_PROJ_ORIGINAL_PREFIX}.{k}"] for k in model.params_proj.state_dict().keys() } ) diffusers_checkpoint.update({"void.background": model.state_dict()["void.background"]}) cases, masks = create_mc_lookup_table() diffusers_checkpoint.update({"mesh_decoder.cases": cases}) diffusers_checkpoint.update({"mesh_decoder.masks": masks}) return diffusers_checkpoint # done renderer # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) def split_attentions(*, weight, bias, split, chunk_size): weights = [None] * split biases = [None] * split weights_biases_idx = 0 for starting_row_index in range(0, weight.shape[0], chunk_size): row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size) weight_rows = weight[row_indices, :] bias_rows = bias[row_indices] if weights[weights_biases_idx] is None: assert weights[weights_biases_idx] is None weights[weights_biases_idx] = weight_rows biases[weights_biases_idx] = bias_rows else: assert weights[weights_biases_idx] is not None weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows]) biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows]) weights_biases_idx = (weights_biases_idx + 1) % split return weights, biases # done unet utils # Driver functions def prior(*, args, checkpoint_map_location): print("loading prior") prior_checkpoint = torch.load(args.prior_checkpoint_path, map_location=checkpoint_map_location) prior_model = prior_model_from_original_config() prior_diffusers_checkpoint = prior_original_checkpoint_to_diffusers_checkpoint(prior_model, prior_checkpoint) del prior_checkpoint load_prior_checkpoint_to_model(prior_diffusers_checkpoint, prior_model) print("done loading prior") return prior_model def prior_image(*, args, checkpoint_map_location): print("loading prior_image") print(f"load checkpoint from {args.prior_image_checkpoint_path}") prior_checkpoint = torch.load(args.prior_image_checkpoint_path, map_location=checkpoint_map_location) prior_model = prior_image_model_from_original_config() prior_diffusers_checkpoint = prior_image_original_checkpoint_to_diffusers_checkpoint(prior_model, prior_checkpoint) del prior_checkpoint load_prior_checkpoint_to_model(prior_diffusers_checkpoint, prior_model) print("done loading prior_image") return prior_model def renderer(*, args, checkpoint_map_location): print(" loading renderer") renderer_checkpoint = torch.load(args.transmitter_checkpoint_path, map_location=checkpoint_map_location) renderer_model = renderer_model_from_original_config() renderer_diffusers_checkpoint = renderer_model_original_checkpoint_to_diffusers_checkpoint( renderer_model, renderer_checkpoint ) del renderer_checkpoint load_checkpoint_to_model(renderer_diffusers_checkpoint, renderer_model, strict=True) print("done loading renderer") return renderer_model # prior model will expect clip_mean and clip_std, which are missing from the state_dict PRIOR_EXPECTED_MISSING_KEYS = ["clip_mean", "clip_std"] def load_prior_checkpoint_to_model(checkpoint, model): with tempfile.NamedTemporaryFile() as file: torch.save(checkpoint, file.name) del checkpoint missing_keys, unexpected_keys = model.load_state_dict(torch.load(file.name), strict=False) missing_keys = list(set(missing_keys) - set(PRIOR_EXPECTED_MISSING_KEYS)) if len(unexpected_keys) > 0: raise ValueError(f"Unexpected keys when loading prior model: {unexpected_keys}") if len(missing_keys) > 0: raise ValueError(f"Missing keys when loading prior model: {missing_keys}") def load_checkpoint_to_model(checkpoint, model, strict=False): with tempfile.NamedTemporaryFile() as file: torch.save(checkpoint, file.name) del checkpoint if strict: model.load_state_dict(torch.load(file.name), strict=True) else: load_checkpoint_and_dispatch(model, file.name, device_map="auto") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--prior_checkpoint_path", default=None, type=str, required=False, help="Path to the prior checkpoint to convert.", ) parser.add_argument( "--prior_image_checkpoint_path", default=None, type=str, required=False, help="Path to the prior_image checkpoint to convert.", ) parser.add_argument( "--transmitter_checkpoint_path", default=None, type=str, required=False, help="Path to the transmitter checkpoint to convert.", ) parser.add_argument( "--checkpoint_load_device", default="cpu", type=str, required=False, help="The device passed to `map_location` when loading checkpoints.", ) parser.add_argument( "--debug", default=None, type=str, required=False, help="Only run a specific stage of the convert script. Used for debugging", ) args = parser.parse_args() print(f"loading checkpoints to {args.checkpoint_load_device}") checkpoint_map_location = torch.device(args.checkpoint_load_device) if args.debug is not None: print(f"debug: only executing {args.debug}") if args.debug is None: print("YiYi TO-DO") elif args.debug == "prior": prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location) prior_model.save_pretrained(args.dump_path) elif args.debug == "prior_image": prior_model = prior_image(args=args, checkpoint_map_location=checkpoint_map_location) prior_model.save_pretrained(args.dump_path) elif args.debug == "renderer": renderer_model = renderer(args=args, checkpoint_map_location=checkpoint_map_location) renderer_model.save_pretrained(args.dump_path) else: raise ValueError(f"unknown debug value : {args.debug}")
diffusers/scripts/convert_shap_e_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_shap_e_to_diffusers.py", "repo_id": "diffusers", "token_count": 22931 }
155
import argparse import pathlib from typing import Any, Dict, Tuple import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download, snapshot_download from safetensors.torch import load_file from transformers import AutoProcessor, AutoTokenizer, CLIPVisionModelWithProjection, UMT5EncoderModel from diffusers import ( AutoencoderKLWan, UniPCMultistepScheduler, WanImageToVideoPipeline, WanPipeline, WanTransformer3DModel, WanVACEPipeline, WanVACETransformer3DModel, ) TRANSFORMER_KEYS_RENAME_DICT = { "time_embedding.0": "condition_embedder.time_embedder.linear_1", "time_embedding.2": "condition_embedder.time_embedder.linear_2", "text_embedding.0": "condition_embedder.text_embedder.linear_1", "text_embedding.2": "condition_embedder.text_embedder.linear_2", "time_projection.1": "condition_embedder.time_proj", "head.modulation": "scale_shift_table", "head.head": "proj_out", "modulation": "scale_shift_table", "ffn.0": "ffn.net.0.proj", "ffn.2": "ffn.net.2", # Hack to swap the layer names # The original model calls the norms in following order: norm1, norm3, norm2 # We convert it to: norm1, norm2, norm3 "norm2": "norm__placeholder", "norm3": "norm2", "norm__placeholder": "norm3", # For the I2V model "img_emb.proj.0": "condition_embedder.image_embedder.norm1", "img_emb.proj.1": "condition_embedder.image_embedder.ff.net.0.proj", "img_emb.proj.3": "condition_embedder.image_embedder.ff.net.2", "img_emb.proj.4": "condition_embedder.image_embedder.norm2", # for the FLF2V model "img_emb.emb_pos": "condition_embedder.image_embedder.pos_embed", # Add attention component mappings "self_attn.q": "attn1.to_q", "self_attn.k": "attn1.to_k", "self_attn.v": "attn1.to_v", "self_attn.o": "attn1.to_out.0", "self_attn.norm_q": "attn1.norm_q", "self_attn.norm_k": "attn1.norm_k", "cross_attn.q": "attn2.to_q", "cross_attn.k": "attn2.to_k", "cross_attn.v": "attn2.to_v", "cross_attn.o": "attn2.to_out.0", "cross_attn.norm_q": "attn2.norm_q", "cross_attn.norm_k": "attn2.norm_k", "attn2.to_k_img": "attn2.add_k_proj", "attn2.to_v_img": "attn2.add_v_proj", "attn2.norm_k_img": "attn2.norm_added_k", } VACE_TRANSFORMER_KEYS_RENAME_DICT = { "time_embedding.0": "condition_embedder.time_embedder.linear_1", "time_embedding.2": "condition_embedder.time_embedder.linear_2", "text_embedding.0": "condition_embedder.text_embedder.linear_1", "text_embedding.2": "condition_embedder.text_embedder.linear_2", "time_projection.1": "condition_embedder.time_proj", "head.modulation": "scale_shift_table", "head.head": "proj_out", "modulation": "scale_shift_table", "ffn.0": "ffn.net.0.proj", "ffn.2": "ffn.net.2", # Hack to swap the layer names # The original model calls the norms in following order: norm1, norm3, norm2 # We convert it to: norm1, norm2, norm3 "norm2": "norm__placeholder", "norm3": "norm2", "norm__placeholder": "norm3", # # For the I2V model # "img_emb.proj.0": "condition_embedder.image_embedder.norm1", # "img_emb.proj.1": "condition_embedder.image_embedder.ff.net.0.proj", # "img_emb.proj.3": "condition_embedder.image_embedder.ff.net.2", # "img_emb.proj.4": "condition_embedder.image_embedder.norm2", # # for the FLF2V model # "img_emb.emb_pos": "condition_embedder.image_embedder.pos_embed", # Add attention component mappings "self_attn.q": "attn1.to_q", "self_attn.k": "attn1.to_k", "self_attn.v": "attn1.to_v", "self_attn.o": "attn1.to_out.0", "self_attn.norm_q": "attn1.norm_q", "self_attn.norm_k": "attn1.norm_k", "cross_attn.q": "attn2.to_q", "cross_attn.k": "attn2.to_k", "cross_attn.v": "attn2.to_v", "cross_attn.o": "attn2.to_out.0", "cross_attn.norm_q": "attn2.norm_q", "cross_attn.norm_k": "attn2.norm_k", "attn2.to_k_img": "attn2.add_k_proj", "attn2.to_v_img": "attn2.add_v_proj", "attn2.norm_k_img": "attn2.norm_added_k", "before_proj": "proj_in", "after_proj": "proj_out", } TRANSFORMER_SPECIAL_KEYS_REMAP = {} VACE_TRANSFORMER_SPECIAL_KEYS_REMAP = {} def update_state_dict_(state_dict: Dict[str, Any], old_key: str, new_key: str) -> Dict[str, Any]: state_dict[new_key] = state_dict.pop(old_key) def load_sharded_safetensors(dir: pathlib.Path): file_paths = list(dir.glob("diffusion_pytorch_model*.safetensors")) state_dict = {} for path in file_paths: state_dict.update(load_file(path)) return state_dict def get_transformer_config(model_type: str) -> Tuple[Dict[str, Any], ...]: if model_type == "Wan-T2V-1.3B": config = { "model_id": "StevenZhang/Wan2.1-T2V-1.3B-Diff", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 8960, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 12, "num_layers": 30, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-T2V-14B": config = { "model_id": "StevenZhang/Wan2.1-T2V-14B-Diff", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-I2V-14B-480p": config = { "model_id": "StevenZhang/Wan2.1-I2V-14B-480P-Diff", "diffusers_config": { "image_dim": 1280, "added_kv_proj_dim": 5120, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-I2V-14B-720p": config = { "model_id": "StevenZhang/Wan2.1-I2V-14B-720P-Diff", "diffusers_config": { "image_dim": 1280, "added_kv_proj_dim": 5120, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-FLF2V-14B-720P": config = { "model_id": "ypyp/Wan2.1-FLF2V-14B-720P", # This is just a placeholder "diffusers_config": { "image_dim": 1280, "added_kv_proj_dim": 5120, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "rope_max_seq_len": 1024, "pos_embed_seq_len": 257 * 2, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-VACE-1.3B": config = { "model_id": "Wan-AI/Wan2.1-VACE-1.3B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 8960, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 12, "num_layers": 30, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "vace_layers": [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28], "vace_in_channels": 96, }, } RENAME_DICT = VACE_TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = VACE_TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-VACE-14B": config = { "model_id": "Wan-AI/Wan2.1-VACE-14B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "vace_layers": [0, 5, 10, 15, 20, 25, 30, 35], "vace_in_channels": 96, }, } RENAME_DICT = VACE_TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = VACE_TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-I2V-14B-720p": config = { "model_id": "Wan-AI/Wan2.2-I2V-A14B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-T2V-A14B": config = { "model_id": "Wan-AI/Wan2.2-T2V-A14B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-TI2V-5B": config = { "model_id": "Wan-AI/Wan2.2-TI2V-5B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 14336, "freq_dim": 256, "in_channels": 48, "num_attention_heads": 24, "num_layers": 30, "out_channels": 48, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP return config, RENAME_DICT, SPECIAL_KEYS_REMAP def convert_transformer(model_type: str, stage: str = None): config, RENAME_DICT, SPECIAL_KEYS_REMAP = get_transformer_config(model_type) diffusers_config = config["diffusers_config"] model_id = config["model_id"] model_dir = pathlib.Path(snapshot_download(model_id, repo_type="model")) if stage is not None: model_dir = model_dir / stage original_state_dict = load_sharded_safetensors(model_dir) with init_empty_weights(): if "VACE" not in model_type: transformer = WanTransformer3DModel.from_config(diffusers_config) else: transformer = WanVACETransformer3DModel.from_config(diffusers_config) for key in list(original_state_dict.keys()): new_key = key[:] for replace_key, rename_key in RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict_(original_state_dict, key, new_key) for key in list(original_state_dict.keys()): for special_key, handler_fn_inplace in SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, original_state_dict) transformer.load_state_dict(original_state_dict, strict=True, assign=True) return transformer def convert_vae(): vae_ckpt_path = hf_hub_download("Wan-AI/Wan2.1-T2V-14B", "Wan2.1_VAE.pth") old_state_dict = torch.load(vae_ckpt_path, weights_only=True) new_state_dict = {} # Create mappings for specific components middle_key_mapping = { # Encoder middle block "encoder.middle.0.residual.0.gamma": "encoder.mid_block.resnets.0.norm1.gamma", "encoder.middle.0.residual.2.bias": "encoder.mid_block.resnets.0.conv1.bias", "encoder.middle.0.residual.2.weight": "encoder.mid_block.resnets.0.conv1.weight", "encoder.middle.0.residual.3.gamma": "encoder.mid_block.resnets.0.norm2.gamma", "encoder.middle.0.residual.6.bias": "encoder.mid_block.resnets.0.conv2.bias", "encoder.middle.0.residual.6.weight": "encoder.mid_block.resnets.0.conv2.weight", "encoder.middle.2.residual.0.gamma": "encoder.mid_block.resnets.1.norm1.gamma", "encoder.middle.2.residual.2.bias": "encoder.mid_block.resnets.1.conv1.bias", "encoder.middle.2.residual.2.weight": "encoder.mid_block.resnets.1.conv1.weight", "encoder.middle.2.residual.3.gamma": "encoder.mid_block.resnets.1.norm2.gamma", "encoder.middle.2.residual.6.bias": "encoder.mid_block.resnets.1.conv2.bias", "encoder.middle.2.residual.6.weight": "encoder.mid_block.resnets.1.conv2.weight", # Decoder middle block "decoder.middle.0.residual.0.gamma": "decoder.mid_block.resnets.0.norm1.gamma", "decoder.middle.0.residual.2.bias": "decoder.mid_block.resnets.0.conv1.bias", "decoder.middle.0.residual.2.weight": "decoder.mid_block.resnets.0.conv1.weight", "decoder.middle.0.residual.3.gamma": "decoder.mid_block.resnets.0.norm2.gamma", "decoder.middle.0.residual.6.bias": "decoder.mid_block.resnets.0.conv2.bias", "decoder.middle.0.residual.6.weight": "decoder.mid_block.resnets.0.conv2.weight", "decoder.middle.2.residual.0.gamma": "decoder.mid_block.resnets.1.norm1.gamma", "decoder.middle.2.residual.2.bias": "decoder.mid_block.resnets.1.conv1.bias", "decoder.middle.2.residual.2.weight": "decoder.mid_block.resnets.1.conv1.weight", "decoder.middle.2.residual.3.gamma": "decoder.mid_block.resnets.1.norm2.gamma", "decoder.middle.2.residual.6.bias": "decoder.mid_block.resnets.1.conv2.bias", "decoder.middle.2.residual.6.weight": "decoder.mid_block.resnets.1.conv2.weight", } # Create a mapping for attention blocks attention_mapping = { # Encoder middle attention "encoder.middle.1.norm.gamma": "encoder.mid_block.attentions.0.norm.gamma", "encoder.middle.1.to_qkv.weight": "encoder.mid_block.attentions.0.to_qkv.weight", "encoder.middle.1.to_qkv.bias": "encoder.mid_block.attentions.0.to_qkv.bias", "encoder.middle.1.proj.weight": "encoder.mid_block.attentions.0.proj.weight", "encoder.middle.1.proj.bias": "encoder.mid_block.attentions.0.proj.bias", # Decoder middle attention "decoder.middle.1.norm.gamma": "decoder.mid_block.attentions.0.norm.gamma", "decoder.middle.1.to_qkv.weight": "decoder.mid_block.attentions.0.to_qkv.weight", "decoder.middle.1.to_qkv.bias": "decoder.mid_block.attentions.0.to_qkv.bias", "decoder.middle.1.proj.weight": "decoder.mid_block.attentions.0.proj.weight", "decoder.middle.1.proj.bias": "decoder.mid_block.attentions.0.proj.bias", } # Create a mapping for the head components head_mapping = { # Encoder head "encoder.head.0.gamma": "encoder.norm_out.gamma", "encoder.head.2.bias": "encoder.conv_out.bias", "encoder.head.2.weight": "encoder.conv_out.weight", # Decoder head "decoder.head.0.gamma": "decoder.norm_out.gamma", "decoder.head.2.bias": "decoder.conv_out.bias", "decoder.head.2.weight": "decoder.conv_out.weight", } # Create a mapping for the quant components quant_mapping = { "conv1.weight": "quant_conv.weight", "conv1.bias": "quant_conv.bias", "conv2.weight": "post_quant_conv.weight", "conv2.bias": "post_quant_conv.bias", } # Process each key in the state dict for key, value in old_state_dict.items(): # Handle middle block keys using the mapping if key in middle_key_mapping: new_key = middle_key_mapping[key] new_state_dict[new_key] = value # Handle attention blocks using the mapping elif key in attention_mapping: new_key = attention_mapping[key] new_state_dict[new_key] = value # Handle head keys using the mapping elif key in head_mapping: new_key = head_mapping[key] new_state_dict[new_key] = value # Handle quant keys using the mapping elif key in quant_mapping: new_key = quant_mapping[key] new_state_dict[new_key] = value # Handle encoder conv1 elif key == "encoder.conv1.weight": new_state_dict["encoder.conv_in.weight"] = value elif key == "encoder.conv1.bias": new_state_dict["encoder.conv_in.bias"] = value # Handle decoder conv1 elif key == "decoder.conv1.weight": new_state_dict["decoder.conv_in.weight"] = value elif key == "decoder.conv1.bias": new_state_dict["decoder.conv_in.bias"] = value # Handle encoder downsamples elif key.startswith("encoder.downsamples."): # Convert to down_blocks new_key = key.replace("encoder.downsamples.", "encoder.down_blocks.") # Convert residual block naming but keep the original structure if ".residual.0.gamma" in new_key: new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma") elif ".residual.2.bias" in new_key: new_key = new_key.replace(".residual.2.bias", ".conv1.bias") elif ".residual.2.weight" in new_key: new_key = new_key.replace(".residual.2.weight", ".conv1.weight") elif ".residual.3.gamma" in new_key: new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma") elif ".residual.6.bias" in new_key: new_key = new_key.replace(".residual.6.bias", ".conv2.bias") elif ".residual.6.weight" in new_key: new_key = new_key.replace(".residual.6.weight", ".conv2.weight") elif ".shortcut.bias" in new_key: new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias") elif ".shortcut.weight" in new_key: new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight") new_state_dict[new_key] = value # Handle decoder upsamples elif key.startswith("decoder.upsamples."): # Convert to up_blocks parts = key.split(".") block_idx = int(parts[2]) # Group residual blocks if "residual" in key: if block_idx in [0, 1, 2]: new_block_idx = 0 resnet_idx = block_idx elif block_idx in [4, 5, 6]: new_block_idx = 1 resnet_idx = block_idx - 4 elif block_idx in [8, 9, 10]: new_block_idx = 2 resnet_idx = block_idx - 8 elif block_idx in [12, 13, 14]: new_block_idx = 3 resnet_idx = block_idx - 12 else: # Keep as is for other blocks new_state_dict[key] = value continue # Convert residual block naming if ".residual.0.gamma" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm1.gamma" elif ".residual.2.bias" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.bias" elif ".residual.2.weight" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.weight" elif ".residual.3.gamma" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm2.gamma" elif ".residual.6.bias" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.bias" elif ".residual.6.weight" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.weight" else: new_key = key new_state_dict[new_key] = value # Handle shortcut connections elif ".shortcut." in key: if block_idx == 4: new_key = key.replace(".shortcut.", ".resnets.0.conv_shortcut.") new_key = new_key.replace("decoder.upsamples.4", "decoder.up_blocks.1") else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") new_key = new_key.replace(".shortcut.", ".conv_shortcut.") new_state_dict[new_key] = value # Handle upsamplers elif ".resample." in key or ".time_conv." in key: if block_idx == 3: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.0.upsamplers.0") elif block_idx == 7: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.1.upsamplers.0") elif block_idx == 11: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.2.upsamplers.0") else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") new_state_dict[new_key] = value else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") new_state_dict[new_key] = value else: # Keep other keys unchanged new_state_dict[key] = value with init_empty_weights(): vae = AutoencoderKLWan() vae.load_state_dict(new_state_dict, strict=True, assign=True) return vae vae22_diffusers_config = { "base_dim": 160, "z_dim": 48, "is_residual": True, "in_channels": 12, "out_channels": 12, "decoder_base_dim": 256, "scale_factor_temporal": 4, "scale_factor_spatial": 16, "patch_size": 2, "latents_mean": [ -0.2289, -0.0052, -0.1323, -0.2339, -0.2799, 0.0174, 0.1838, 0.1557, -0.1382, 0.0542, 0.2813, 0.0891, 0.1570, -0.0098, 0.0375, -0.1825, -0.2246, -0.1207, -0.0698, 0.5109, 0.2665, -0.2108, -0.2158, 0.2502, -0.2055, -0.0322, 0.1109, 0.1567, -0.0729, 0.0899, -0.2799, -0.1230, -0.0313, -0.1649, 0.0117, 0.0723, -0.2839, -0.2083, -0.0520, 0.3748, 0.0152, 0.1957, 0.1433, -0.2944, 0.3573, -0.0548, -0.1681, -0.0667, ], "latents_std": [ 0.4765, 1.0364, 0.4514, 1.1677, 0.5313, 0.4990, 0.4818, 0.5013, 0.8158, 1.0344, 0.5894, 1.0901, 0.6885, 0.6165, 0.8454, 0.4978, 0.5759, 0.3523, 0.7135, 0.6804, 0.5833, 1.4146, 0.8986, 0.5659, 0.7069, 0.5338, 0.4889, 0.4917, 0.4069, 0.4999, 0.6866, 0.4093, 0.5709, 0.6065, 0.6415, 0.4944, 0.5726, 1.2042, 0.5458, 1.6887, 0.3971, 1.0600, 0.3943, 0.5537, 0.5444, 0.4089, 0.7468, 0.7744, ], "clip_output": False, } def convert_vae_22(): vae_ckpt_path = hf_hub_download("Wan-AI/Wan2.2-TI2V-5B", "Wan2.2_VAE.pth") old_state_dict = torch.load(vae_ckpt_path, weights_only=True) new_state_dict = {} # Create mappings for specific components middle_key_mapping = { # Encoder middle block "encoder.middle.0.residual.0.gamma": "encoder.mid_block.resnets.0.norm1.gamma", "encoder.middle.0.residual.2.bias": "encoder.mid_block.resnets.0.conv1.bias", "encoder.middle.0.residual.2.weight": "encoder.mid_block.resnets.0.conv1.weight", "encoder.middle.0.residual.3.gamma": "encoder.mid_block.resnets.0.norm2.gamma", "encoder.middle.0.residual.6.bias": "encoder.mid_block.resnets.0.conv2.bias", "encoder.middle.0.residual.6.weight": "encoder.mid_block.resnets.0.conv2.weight", "encoder.middle.2.residual.0.gamma": "encoder.mid_block.resnets.1.norm1.gamma", "encoder.middle.2.residual.2.bias": "encoder.mid_block.resnets.1.conv1.bias", "encoder.middle.2.residual.2.weight": "encoder.mid_block.resnets.1.conv1.weight", "encoder.middle.2.residual.3.gamma": "encoder.mid_block.resnets.1.norm2.gamma", "encoder.middle.2.residual.6.bias": "encoder.mid_block.resnets.1.conv2.bias", "encoder.middle.2.residual.6.weight": "encoder.mid_block.resnets.1.conv2.weight", # Decoder middle block "decoder.middle.0.residual.0.gamma": "decoder.mid_block.resnets.0.norm1.gamma", "decoder.middle.0.residual.2.bias": "decoder.mid_block.resnets.0.conv1.bias", "decoder.middle.0.residual.2.weight": "decoder.mid_block.resnets.0.conv1.weight", "decoder.middle.0.residual.3.gamma": "decoder.mid_block.resnets.0.norm2.gamma", "decoder.middle.0.residual.6.bias": "decoder.mid_block.resnets.0.conv2.bias", "decoder.middle.0.residual.6.weight": "decoder.mid_block.resnets.0.conv2.weight", "decoder.middle.2.residual.0.gamma": "decoder.mid_block.resnets.1.norm1.gamma", "decoder.middle.2.residual.2.bias": "decoder.mid_block.resnets.1.conv1.bias", "decoder.middle.2.residual.2.weight": "decoder.mid_block.resnets.1.conv1.weight", "decoder.middle.2.residual.3.gamma": "decoder.mid_block.resnets.1.norm2.gamma", "decoder.middle.2.residual.6.bias": "decoder.mid_block.resnets.1.conv2.bias", "decoder.middle.2.residual.6.weight": "decoder.mid_block.resnets.1.conv2.weight", } # Create a mapping for attention blocks attention_mapping = { # Encoder middle attention "encoder.middle.1.norm.gamma": "encoder.mid_block.attentions.0.norm.gamma", "encoder.middle.1.to_qkv.weight": "encoder.mid_block.attentions.0.to_qkv.weight", "encoder.middle.1.to_qkv.bias": "encoder.mid_block.attentions.0.to_qkv.bias", "encoder.middle.1.proj.weight": "encoder.mid_block.attentions.0.proj.weight", "encoder.middle.1.proj.bias": "encoder.mid_block.attentions.0.proj.bias", # Decoder middle attention "decoder.middle.1.norm.gamma": "decoder.mid_block.attentions.0.norm.gamma", "decoder.middle.1.to_qkv.weight": "decoder.mid_block.attentions.0.to_qkv.weight", "decoder.middle.1.to_qkv.bias": "decoder.mid_block.attentions.0.to_qkv.bias", "decoder.middle.1.proj.weight": "decoder.mid_block.attentions.0.proj.weight", "decoder.middle.1.proj.bias": "decoder.mid_block.attentions.0.proj.bias", } # Create a mapping for the head components head_mapping = { # Encoder head "encoder.head.0.gamma": "encoder.norm_out.gamma", "encoder.head.2.bias": "encoder.conv_out.bias", "encoder.head.2.weight": "encoder.conv_out.weight", # Decoder head "decoder.head.0.gamma": "decoder.norm_out.gamma", "decoder.head.2.bias": "decoder.conv_out.bias", "decoder.head.2.weight": "decoder.conv_out.weight", } # Create a mapping for the quant components quant_mapping = { "conv1.weight": "quant_conv.weight", "conv1.bias": "quant_conv.bias", "conv2.weight": "post_quant_conv.weight", "conv2.bias": "post_quant_conv.bias", } # Process each key in the state dict for key, value in old_state_dict.items(): # Handle middle block keys using the mapping if key in middle_key_mapping: new_key = middle_key_mapping[key] new_state_dict[new_key] = value # Handle attention blocks using the mapping elif key in attention_mapping: new_key = attention_mapping[key] new_state_dict[new_key] = value # Handle head keys using the mapping elif key in head_mapping: new_key = head_mapping[key] new_state_dict[new_key] = value # Handle quant keys using the mapping elif key in quant_mapping: new_key = quant_mapping[key] new_state_dict[new_key] = value # Handle encoder conv1 elif key == "encoder.conv1.weight": new_state_dict["encoder.conv_in.weight"] = value elif key == "encoder.conv1.bias": new_state_dict["encoder.conv_in.bias"] = value # Handle decoder conv1 elif key == "decoder.conv1.weight": new_state_dict["decoder.conv_in.weight"] = value elif key == "decoder.conv1.bias": new_state_dict["decoder.conv_in.bias"] = value # Handle encoder downsamples elif key.startswith("encoder.downsamples."): # Change encoder.downsamples to encoder.down_blocks new_key = key.replace("encoder.downsamples.", "encoder.down_blocks.") # Handle residual blocks - change downsamples to resnets and rename components if "residual" in new_key or "shortcut" in new_key: # Change the second downsamples to resnets new_key = new_key.replace(".downsamples.", ".resnets.") # Rename residual components if ".residual.0.gamma" in new_key: new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma") elif ".residual.2.weight" in new_key: new_key = new_key.replace(".residual.2.weight", ".conv1.weight") elif ".residual.2.bias" in new_key: new_key = new_key.replace(".residual.2.bias", ".conv1.bias") elif ".residual.3.gamma" in new_key: new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma") elif ".residual.6.weight" in new_key: new_key = new_key.replace(".residual.6.weight", ".conv2.weight") elif ".residual.6.bias" in new_key: new_key = new_key.replace(".residual.6.bias", ".conv2.bias") elif ".shortcut.weight" in new_key: new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight") elif ".shortcut.bias" in new_key: new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias") # Handle resample blocks - change downsamples to downsampler and remove index elif "resample" in new_key or "time_conv" in new_key: # Change the second downsamples to downsampler and remove the index parts = new_key.split(".") # Find the pattern: encoder.down_blocks.X.downsamples.Y.resample... # We want to change it to: encoder.down_blocks.X.downsampler.resample... if len(parts) >= 4 and parts[3] == "downsamples": # Remove the index (parts[4]) and change downsamples to downsampler new_parts = parts[:3] + ["downsampler"] + parts[5:] new_key = ".".join(new_parts) new_state_dict[new_key] = value # Handle decoder upsamples elif key.startswith("decoder.upsamples."): # Change decoder.upsamples to decoder.up_blocks new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") # Handle residual blocks - change upsamples to resnets and rename components if "residual" in new_key or "shortcut" in new_key: # Change the second upsamples to resnets new_key = new_key.replace(".upsamples.", ".resnets.") # Rename residual components if ".residual.0.gamma" in new_key: new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma") elif ".residual.2.weight" in new_key: new_key = new_key.replace(".residual.2.weight", ".conv1.weight") elif ".residual.2.bias" in new_key: new_key = new_key.replace(".residual.2.bias", ".conv1.bias") elif ".residual.3.gamma" in new_key: new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma") elif ".residual.6.weight" in new_key: new_key = new_key.replace(".residual.6.weight", ".conv2.weight") elif ".residual.6.bias" in new_key: new_key = new_key.replace(".residual.6.bias", ".conv2.bias") elif ".shortcut.weight" in new_key: new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight") elif ".shortcut.bias" in new_key: new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias") # Handle resample blocks - change upsamples to upsampler and remove index elif "resample" in new_key or "time_conv" in new_key: # Change the second upsamples to upsampler and remove the index parts = new_key.split(".") # Find the pattern: encoder.down_blocks.X.downsamples.Y.resample... # We want to change it to: encoder.down_blocks.X.downsampler.resample... if len(parts) >= 4 and parts[3] == "upsamples": # Remove the index (parts[4]) and change upsamples to upsampler new_parts = parts[:3] + ["upsampler"] + parts[5:] new_key = ".".join(new_parts) new_state_dict[new_key] = value else: # Keep other keys unchanged new_state_dict[key] = value with init_empty_weights(): vae = AutoencoderKLWan(**vae22_diffusers_config) vae.load_state_dict(new_state_dict, strict=True, assign=True) return vae def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--model_type", type=str, default=None) parser.add_argument("--output_path", type=str, required=True) parser.add_argument("--dtype", default="fp32", choices=["fp32", "fp16", "bf16", "none"]) return parser.parse_args() DTYPE_MAPPING = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } if __name__ == "__main__": args = get_args() if "Wan2.2" in args.model_type and "TI2V" not in args.model_type: transformer = convert_transformer(args.model_type, stage="high_noise_model") transformer_2 = convert_transformer(args.model_type, stage="low_noise_model") else: transformer = convert_transformer(args.model_type) transformer_2 = None if "Wan2.2" in args.model_type and "TI2V" in args.model_type: vae = convert_vae_22() else: vae = convert_vae() text_encoder = UMT5EncoderModel.from_pretrained("google/umt5-xxl", torch_dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained("google/umt5-xxl") if "FLF2V" in args.model_type: flow_shift = 16.0 elif "TI2V" in args.model_type: flow_shift = 5.0 else: flow_shift = 3.0 scheduler = UniPCMultistepScheduler( prediction_type="flow_prediction", use_flow_sigmas=True, num_train_timesteps=1000, flow_shift=flow_shift ) # If user has specified "none", we keep the original dtypes of the state dict without any conversion if args.dtype != "none": dtype = DTYPE_MAPPING[args.dtype] transformer.to(dtype) if "Wan2.2" and "I2V" in args.model_type and "TI2V" not in args.model_type: pipe = WanImageToVideoPipeline( transformer=transformer, transformer_2=transformer_2, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, boundary_ratio=0.9, ) elif "Wan2.2" and "T2V" in args.model_type: pipe = WanPipeline( transformer=transformer, transformer_2=transformer_2, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, boundary_ratio=0.875, ) elif "Wan2.2" and "TI2V" in args.model_type: pipe = WanPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, expand_timesteps=True, ) elif "I2V" in args.model_type or "FLF2V" in args.model_type: image_encoder = CLIPVisionModelWithProjection.from_pretrained( "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", torch_dtype=torch.bfloat16 ) image_processor = AutoProcessor.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K") pipe = WanImageToVideoPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, image_encoder=image_encoder, image_processor=image_processor, ) elif "VACE" in args.model_type: pipe = WanVACEPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, ) else: pipe = WanPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, ) pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB")
diffusers/scripts/convert_wan_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_wan_to_diffusers.py", "repo_id": "diffusers", "token_count": 21823 }
156
# 🧨 Diffusers Experimental We are adding experimental code to support novel applications and usages of the Diffusers library. Currently, the following experiments are supported: * Reinforcement learning via an implementation of the [Diffuser](https://huggingface.co/papers/2205.09991) model.
diffusers/src/diffusers/experimental/README.md/0
{ "file_path": "diffusers/src/diffusers/experimental/README.md", "repo_id": "diffusers", "token_count": 70 }
157
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from ..models.attention import AttentionModuleMixin, FeedForward, LuminaFeedForward from ..models.attention_processor import Attention, MochiAttention _ATTENTION_CLASSES = (Attention, MochiAttention, AttentionModuleMixin) _FEEDFORWARD_CLASSES = (FeedForward, LuminaFeedForward) _SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks", "single_transformer_blocks", "layers") _TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS = ("temporal_transformer_blocks",) _CROSS_TRANSFORMER_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks", "layers") _ALL_TRANSFORMER_BLOCK_IDENTIFIERS = tuple( { *_SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS, *_TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS, *_CROSS_TRANSFORMER_BLOCK_IDENTIFIERS, } ) # Layers supported for group offloading and layerwise casting _GO_LC_SUPPORTED_PYTORCH_LAYERS = ( torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d, torch.nn.Linear, # TODO(aryan): look into torch.nn.LayerNorm, torch.nn.GroupNorm later, seems to be causing some issues with CogVideoX # because of double invocation of the same norm layer in CogVideoXLayerNorm ) def _get_submodule_from_fqn(module: torch.nn.Module, fqn: str) -> Optional[torch.nn.Module]: for submodule_name, submodule in module.named_modules(): if submodule_name == fqn: return submodule return None
diffusers/src/diffusers/hooks/_common.py/0
{ "file_path": "diffusers/src/diffusers/hooks/_common.py", "repo_id": "diffusers", "token_count": 748 }
158
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Callable, Dict, List, Optional, Union import torch from huggingface_hub.utils import validate_hf_hub_args from ..utils import ( USE_PEFT_BACKEND, deprecate, get_submodule_by_name, is_bitsandbytes_available, is_gguf_available, is_peft_available, is_peft_version, is_torch_version, is_transformers_available, is_transformers_version, logging, ) from .lora_base import ( # noqa LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE, LoraBaseMixin, _fetch_state_dict, _load_lora_into_text_encoder, _pack_dict_with_prefix, ) from .lora_conversion_utils import ( _convert_bfl_flux_control_lora_to_diffusers, _convert_fal_kontext_lora_to_diffusers, _convert_hunyuan_video_lora_to_diffusers, _convert_kohya_flux_lora_to_diffusers, _convert_musubi_wan_lora_to_diffusers, _convert_non_diffusers_hidream_lora_to_diffusers, _convert_non_diffusers_lora_to_diffusers, _convert_non_diffusers_ltxv_lora_to_diffusers, _convert_non_diffusers_lumina2_lora_to_diffusers, _convert_non_diffusers_qwen_lora_to_diffusers, _convert_non_diffusers_wan_lora_to_diffusers, _convert_xlabs_flux_lora_to_diffusers, _maybe_map_sgm_blocks_to_diffusers, ) _LOW_CPU_MEM_USAGE_DEFAULT_LORA = False if is_torch_version(">=", "1.9.0"): if ( is_peft_available() and is_peft_version(">=", "0.13.1") and is_transformers_available() and is_transformers_version(">", "4.45.2") ): _LOW_CPU_MEM_USAGE_DEFAULT_LORA = True logger = logging.get_logger(__name__) TEXT_ENCODER_NAME = "text_encoder" UNET_NAME = "unet" TRANSFORMER_NAME = "transformer" _MODULE_NAME_TO_ATTRIBUTE_MAP_FLUX = {"x_embedder": "in_channels"} def _maybe_dequantize_weight_for_expanded_lora(model, module): if is_bitsandbytes_available(): from ..quantizers.bitsandbytes import dequantize_bnb_weight if is_gguf_available(): from ..quantizers.gguf.utils import dequantize_gguf_tensor is_bnb_4bit_quantized = module.weight.__class__.__name__ == "Params4bit" is_bnb_8bit_quantized = module.weight.__class__.__name__ == "Int8Params" is_gguf_quantized = module.weight.__class__.__name__ == "GGUFParameter" if is_bnb_4bit_quantized and not is_bitsandbytes_available(): raise ValueError( "The checkpoint seems to have been quantized with `bitsandbytes` (4bits). Install `bitsandbytes` to load quantized checkpoints." ) if is_bnb_8bit_quantized and not is_bitsandbytes_available(): raise ValueError( "The checkpoint seems to have been quantized with `bitsandbytes` (8bits). Install `bitsandbytes` to load quantized checkpoints." ) if is_gguf_quantized and not is_gguf_available(): raise ValueError( "The checkpoint seems to have been quantized with `gguf`. Install `gguf` to load quantized checkpoints." ) weight_on_cpu = False if module.weight.device.type == "cpu": weight_on_cpu = True device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" if is_bnb_4bit_quantized or is_bnb_8bit_quantized: module_weight = dequantize_bnb_weight( module.weight.to(device) if weight_on_cpu else module.weight, state=module.weight.quant_state if is_bnb_4bit_quantized else module.state, dtype=model.dtype, ).data elif is_gguf_quantized: module_weight = dequantize_gguf_tensor( module.weight.to(device) if weight_on_cpu else module.weight, ) module_weight = module_weight.to(model.dtype) else: module_weight = module.weight.data if weight_on_cpu: module_weight = module_weight.cpu() return module_weight class StableDiffusionLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into Stable Diffusion [`UNet2DConditionModel`] and [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel). """ _lora_loadable_modules = ["unet", "text_encoder"] unet_name = UNET_NAME text_encoder_name = TEXT_ENCODER_NAME def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into `self.unet`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded into `self.text_encoder`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): Defaults to `False`. Whether to substitute an existing (LoRA) adapter with the newly loaded adapter in-place. This means that, instead of loading an additional adapter, this will take the existing adapter weights and replace them with the weights of the new adapter. This can be faster and more memory efficient. However, the main advantage of hotswapping is that when the model is compiled with torch.compile, loading the new adapter does not require recompilation of the model. When using hotswapping, the passed `adapter_name` should be the name of an already loaded adapter. If the new adapter and the old adapter have different ranks and/or LoRA alphas (i.e. scaling), you need to call an additional method before loading the adapter: ```py pipeline = ... # load diffusers pipeline max_rank = ... # the highest rank among all LoRAs that you want to load # call *before* compiling and loading the LoRA adapter pipeline.enable_lora_hotswap(target_rank=max_rank) pipeline.load_lora_weights(file_name) # optionally compile the model now ``` Note that hotswapping adapters of the text encoder is not yet supported. There are some further limitations to this technique, which are documented here: https://huggingface.co/docs/peft/main/en/package_reference/hotswap kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, network_alphas, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_unet( state_dict, network_alphas=network_alphas, unet=getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) self.load_lora_into_text_encoder( state_dict, network_alphas=network_alphas, text_encoder=getattr(self, self.text_encoder_name) if not hasattr(self, "text_encoder") else self.text_encoder, lora_scale=self.lora_scale, adapter_name=adapter_name, _pipeline=self, metadata=metadata, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. weight_name (`str`, *optional*, defaults to None): Name of the serialized state dict file. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # UNet and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) unet_config = kwargs.pop("unet_config", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} network_alphas = None # TODO: replace it with a method from `state_dict_utils` if all( ( k.startswith("lora_te_") or k.startswith("lora_unet_") or k.startswith("lora_te1_") or k.startswith("lora_te2_") ) for k in state_dict.keys() ): # Map SDXL blocks correctly. if unet_config is not None: # use unet config to remap block numbers state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config) state_dict, network_alphas = _convert_non_diffusers_lora_to_diffusers(state_dict) out = (state_dict, network_alphas, metadata) if return_lora_metadata else (state_dict, network_alphas) return out @classmethod def load_lora_into_unet( cls, state_dict, network_alphas, unet, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `unet`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). unet (`UNet2DConditionModel`): The UNet model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as # their prefixes. logger.info(f"Loading {cls.unet_name}.") unet.load_lora_adapter( state_dict, prefix=cls.unet_name, network_alphas=network_alphas, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod def load_lora_into_text_encoder( cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `text_encoder` Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The key should be prefixed with an additional `text_encoder` to distinguish between unet lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). text_encoder (`CLIPTextModel`): The text encoder model to load the LoRA layers into. prefix (`str`): Expected prefix of the `text_encoder` in the `state_dict`. lora_scale (`float`): How much to scale the output of the lora linear layer before it is added with the output of the regular lora layer. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ _load_lora_into_text_encoder( state_dict=state_dict, network_alphas=network_alphas, lora_scale=lora_scale, text_encoder=text_encoder, prefix=prefix, text_encoder_name=cls.text_encoder_name, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod def save_lora_weights( cls, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, torch.nn.Module] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, unet_lora_adapter_metadata=None, text_encoder_lora_adapter_metadata=None, ): r""" Save the LoRA parameters corresponding to the UNet and text encoder. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `unet`. text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text encoder LoRA state dict because it comes from 🤗 Transformers. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. unet_lora_adapter_metadata: LoRA adapter metadata associated with the unet to be serialized with the state dict. text_encoder_lora_adapter_metadata: LoRA adapter metadata associated with the text encoder to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not (unet_lora_layers or text_encoder_lora_layers): raise ValueError("You must pass at least one of `unet_lora_layers` and `text_encoder_lora_layers`.") if unet_lora_layers: state_dict.update(cls.pack_weights(unet_lora_layers, cls.unet_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) if unet_lora_adapter_metadata: lora_adapter_metadata.update(_pack_dict_with_prefix(unet_lora_adapter_metadata, cls.unet_name)) if text_encoder_lora_adapter_metadata: lora_adapter_metadata.update( _pack_dict_with_prefix(text_encoder_lora_adapter_metadata, cls.text_encoder_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) def fuse_lora( self, components: List[str] = ["unet", "text_encoder"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) def unfuse_lora(self, components: List[str] = ["unet", "text_encoder"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. unfuse_text_encoder (`bool`, defaults to `True`): Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the LoRA parameters then it won't have any effect. """ super().unfuse_lora(components=components, **kwargs) class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into Stable Diffusion XL [`UNet2DConditionModel`], [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), and [`CLIPTextModelWithProjection`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection). """ _lora_loadable_modules = ["unet", "text_encoder", "text_encoder_2"] unet_name = UNET_NAME text_encoder_name = TEXT_ENCODER_NAME def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into `self.unet`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded into `self.text_encoder`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # We could have accessed the unet config from `lora_state_dict()` too. We pass # it here explicitly to be able to tell that it's coming from an SDXL # pipeline. # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, network_alphas, metadata = self.lora_state_dict( pretrained_model_name_or_path_or_dict, unet_config=self.unet.config, **kwargs, ) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_unet( state_dict, network_alphas=network_alphas, unet=self.unet, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) self.load_lora_into_text_encoder( state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix=self.text_encoder_name, lora_scale=self.lora_scale, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) self.load_lora_into_text_encoder( state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder_2, prefix=f"{self.text_encoder_name}_2", lora_scale=self.lora_scale, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod @validate_hf_hub_args # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.lora_state_dict def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. weight_name (`str`, *optional*, defaults to None): Name of the serialized state dict file. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # UNet and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) unet_config = kwargs.pop("unet_config", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} network_alphas = None # TODO: replace it with a method from `state_dict_utils` if all( ( k.startswith("lora_te_") or k.startswith("lora_unet_") or k.startswith("lora_te1_") or k.startswith("lora_te2_") ) for k in state_dict.keys() ): # Map SDXL blocks correctly. if unet_config is not None: # use unet config to remap block numbers state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config) state_dict, network_alphas = _convert_non_diffusers_lora_to_diffusers(state_dict) out = (state_dict, network_alphas, metadata) if return_lora_metadata else (state_dict, network_alphas) return out @classmethod # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_unet def load_lora_into_unet( cls, state_dict, network_alphas, unet, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `unet`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). unet (`UNet2DConditionModel`): The UNet model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as # their prefixes. logger.info(f"Loading {cls.unet_name}.") unet.load_lora_adapter( state_dict, prefix=cls.unet_name, network_alphas=network_alphas, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder def load_lora_into_text_encoder( cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `text_encoder` Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The key should be prefixed with an additional `text_encoder` to distinguish between unet lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). text_encoder (`CLIPTextModel`): The text encoder model to load the LoRA layers into. prefix (`str`): Expected prefix of the `text_encoder` in the `state_dict`. lora_scale (`float`): How much to scale the output of the lora linear layer before it is added with the output of the regular lora layer. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ _load_lora_into_text_encoder( state_dict=state_dict, network_alphas=network_alphas, lora_scale=lora_scale, text_encoder=text_encoder, prefix=prefix, text_encoder_name=cls.text_encoder_name, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod def save_lora_weights( cls, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, unet_lora_adapter_metadata=None, text_encoder_lora_adapter_metadata=None, text_encoder_2_lora_adapter_metadata=None, ): r""" Save the LoRA parameters corresponding to the UNet and text encoder. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `unet`. text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text encoder LoRA state dict because it comes from 🤗 Transformers. text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text encoder LoRA state dict because it comes from 🤗 Transformers. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. unet_lora_adapter_metadata: LoRA adapter metadata associated with the unet to be serialized with the state dict. text_encoder_lora_adapter_metadata: LoRA adapter metadata associated with the text encoder to be serialized with the state dict. text_encoder_2_lora_adapter_metadata: LoRA adapter metadata associated with the second text encoder to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): raise ValueError( "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers`, `text_encoder_2_lora_layers`." ) if unet_lora_layers: state_dict.update(cls.pack_weights(unet_lora_layers, cls.unet_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, "text_encoder")) if text_encoder_2_lora_layers: state_dict.update(cls.pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) if unet_lora_adapter_metadata is not None: lora_adapter_metadata.update(_pack_dict_with_prefix(unet_lora_adapter_metadata, cls.unet_name)) if text_encoder_lora_adapter_metadata: lora_adapter_metadata.update( _pack_dict_with_prefix(text_encoder_lora_adapter_metadata, cls.text_encoder_name) ) if text_encoder_2_lora_adapter_metadata: lora_adapter_metadata.update( _pack_dict_with_prefix(text_encoder_2_lora_adapter_metadata, "text_encoder_2") ) cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) def fuse_lora( self, components: List[str] = ["unet", "text_encoder", "text_encoder_2"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) def unfuse_lora(self, components: List[str] = ["unet", "text_encoder", "text_encoder_2"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. unfuse_text_encoder (`bool`, defaults to `True`): Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the LoRA parameters then it won't have any effect. """ super().unfuse_lora(components=components, **kwargs) class SD3LoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`SD3Transformer2DModel`], [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), and [`CLIPTextModelWithProjection`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection). Specific to [`StableDiffusion3Pipeline`]. """ _lora_loadable_modules = ["transformer", "text_encoder", "text_encoder_2"] transformer_name = TRANSFORMER_NAME text_encoder_name = TEXT_ENCODER_NAME @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) self.load_lora_into_text_encoder( state_dict, network_alphas=None, text_encoder=self.text_encoder, prefix=self.text_encoder_name, lora_scale=self.lora_scale, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) self.load_lora_into_text_encoder( state_dict, network_alphas=None, text_encoder=self.text_encoder_2, prefix=f"{self.text_encoder_name}_2", lora_scale=self.lora_scale, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`SD3Transformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder def load_lora_into_text_encoder( cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `text_encoder` Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The key should be prefixed with an additional `text_encoder` to distinguish between unet lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). text_encoder (`CLIPTextModel`): The text encoder model to load the LoRA layers into. prefix (`str`): Expected prefix of the `text_encoder` in the `state_dict`. lora_scale (`float`): How much to scale the output of the lora linear layer before it is added with the output of the regular lora layer. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ _load_lora_into_text_encoder( state_dict=state_dict, network_alphas=network_alphas, lora_scale=lora_scale, text_encoder=text_encoder, prefix=prefix, text_encoder_name=cls.text_encoder_name, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.save_lora_weights with unet->transformer def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata=None, text_encoder_lora_adapter_metadata=None, text_encoder_2_lora_adapter_metadata=None, ): r""" Save the LoRA parameters corresponding to the UNet and text encoder. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text encoder LoRA state dict because it comes from 🤗 Transformers. text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text encoder LoRA state dict because it comes from 🤗 Transformers. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. text_encoder_lora_adapter_metadata: LoRA adapter metadata associated with the text encoder to be serialized with the state dict. text_encoder_2_lora_adapter_metadata: LoRA adapter metadata associated with the second text encoder to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not (transformer_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): raise ValueError( "You must pass at least one of `transformer_lora_layers`, `text_encoder_lora_layers`, `text_encoder_2_lora_layers`." ) if transformer_lora_layers: state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, "text_encoder")) if text_encoder_2_lora_layers: state_dict.update(cls.pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) if text_encoder_lora_adapter_metadata: lora_adapter_metadata.update( _pack_dict_with_prefix(text_encoder_lora_adapter_metadata, cls.text_encoder_name) ) if text_encoder_2_lora_adapter_metadata: lora_adapter_metadata.update( _pack_dict_with_prefix(text_encoder_2_lora_adapter_metadata, "text_encoder_2") ) cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.fuse_lora with unet->transformer def fuse_lora( self, components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.unfuse_lora with unet->transformer def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. unfuse_text_encoder (`bool`, defaults to `True`): Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the LoRA parameters then it won't have any effect. """ super().unfuse_lora(components=components, **kwargs) class AuraFlowLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`AuraFlowTransformer2DModel`] Specific to [`AuraFlowPipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.lora_state_dict def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->AuraFlowTransformer2DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`AuraFlowTransformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class FluxLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`FluxTransformer2DModel`], [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel). Specific to [`StableDiffusion3Pipeline`]. """ _lora_loadable_modules = ["transformer", "text_encoder"] transformer_name = TRANSFORMER_NAME text_encoder_name = TEXT_ENCODER_NAME _control_lora_supported_norm_keys = ["norm_q", "norm_k", "norm_added_q", "norm_added_k"] @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], return_alphas: bool = False, **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} # TODO (sayakpaul): to a follow-up to clean and try to unify the conditions. is_kohya = any(".lora_down.weight" in k for k in state_dict) if is_kohya: state_dict = _convert_kohya_flux_lora_to_diffusers(state_dict) # Kohya already takes care of scaling the LoRA parameters with alpha. return cls._prepare_outputs( state_dict, metadata=metadata, alphas=None, return_alphas=return_alphas, return_metadata=return_lora_metadata, ) is_xlabs = any("processor" in k for k in state_dict) if is_xlabs: state_dict = _convert_xlabs_flux_lora_to_diffusers(state_dict) # xlabs doesn't use `alpha`. return cls._prepare_outputs( state_dict, metadata=metadata, alphas=None, return_alphas=return_alphas, return_metadata=return_lora_metadata, ) is_bfl_control = any("query_norm.scale" in k for k in state_dict) if is_bfl_control: state_dict = _convert_bfl_flux_control_lora_to_diffusers(state_dict) return cls._prepare_outputs( state_dict, metadata=metadata, alphas=None, return_alphas=return_alphas, return_metadata=return_lora_metadata, ) is_fal_kontext = any("base_model" in k for k in state_dict) if is_fal_kontext: state_dict = _convert_fal_kontext_lora_to_diffusers(state_dict) return cls._prepare_outputs( state_dict, metadata=metadata, alphas=None, return_alphas=return_alphas, return_metadata=return_lora_metadata, ) # For state dicts like # https://huggingface.co/TheLastBen/Jon_Snow_Flux_LoRA keys = list(state_dict.keys()) network_alphas = {} for k in keys: if "alpha" in k: alpha_value = state_dict.get(k) if (torch.is_tensor(alpha_value) and torch.is_floating_point(alpha_value)) or isinstance( alpha_value, float ): network_alphas[k] = state_dict.pop(k) else: raise ValueError( f"The alpha key ({k}) seems to be incorrect. If you think this error is unexpected, please open as issue." ) if return_alphas or return_lora_metadata: return cls._prepare_outputs( state_dict, metadata=metadata, alphas=network_alphas, return_alphas=return_alphas, return_metadata=return_lora_metadata, ) else: return state_dict def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): `Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, network_alphas, metadata = self.lora_state_dict( pretrained_model_name_or_path_or_dict, return_alphas=True, **kwargs ) has_lora_keys = any("lora" in key for key in state_dict.keys()) # Flux Control LoRAs also have norm keys has_norm_keys = any( norm_key in key for key in state_dict.keys() for norm_key in self._control_lora_supported_norm_keys ) if not (has_lora_keys or has_norm_keys): raise ValueError("Invalid LoRA checkpoint.") transformer_lora_state_dict = { k: state_dict.get(k) for k in list(state_dict.keys()) if k.startswith(f"{self.transformer_name}.") and "lora" in k } transformer_norm_state_dict = { k: state_dict.pop(k) for k in list(state_dict.keys()) if k.startswith(f"{self.transformer_name}.") and any(norm_key in k for norm_key in self._control_lora_supported_norm_keys) } transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer has_param_with_expanded_shape = False if len(transformer_lora_state_dict) > 0: has_param_with_expanded_shape = self._maybe_expand_transformer_param_shape_or_error_( transformer, transformer_lora_state_dict, transformer_norm_state_dict ) if has_param_with_expanded_shape: logger.info( "The LoRA weights contain parameters that have different shapes that expected by the transformer. " "As a result, the state_dict of the transformer has been expanded to match the LoRA parameter shapes. " "To get a comprehensive list of parameter names that were modified, enable debug logging." ) if len(transformer_lora_state_dict) > 0: transformer_lora_state_dict = self._maybe_expand_lora_state_dict( transformer=transformer, lora_state_dict=transformer_lora_state_dict ) for k in transformer_lora_state_dict: state_dict.update({k: transformer_lora_state_dict[k]}) self.load_lora_into_transformer( state_dict, network_alphas=network_alphas, transformer=transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) if len(transformer_norm_state_dict) > 0: transformer._transformer_norm_layers = self._load_norm_into_transformer( transformer_norm_state_dict, transformer=transformer, discard_original_layers=False, ) self.load_lora_into_text_encoder( state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix=self.text_encoder_name, lora_scale=self.lora_scale, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod def load_lora_into_transformer( cls, state_dict, network_alphas, transformer, adapter_name=None, metadata=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). transformer (`FluxTransformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=network_alphas, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod def _load_norm_into_transformer( cls, state_dict, transformer, prefix=None, discard_original_layers=False, ) -> Dict[str, torch.Tensor]: # Remove prefix if present prefix = prefix or cls.transformer_name for key in list(state_dict.keys()): if key.split(".")[0] == prefix: state_dict[key.removeprefix(f"{prefix}.")] = state_dict.pop(key) # Find invalid keys transformer_state_dict = transformer.state_dict() transformer_keys = set(transformer_state_dict.keys()) state_dict_keys = set(state_dict.keys()) extra_keys = list(state_dict_keys - transformer_keys) if extra_keys: logger.warning( f"Unsupported keys found in state dict when trying to load normalization layers into the transformer. The following keys will be ignored:\n{extra_keys}." ) for key in extra_keys: state_dict.pop(key) # Save the layers that are going to be overwritten so that unload_lora_weights can work as expected overwritten_layers_state_dict = {} if not discard_original_layers: for key in state_dict.keys(): overwritten_layers_state_dict[key] = transformer_state_dict[key].clone() logger.info( "The provided state dict contains normalization layers in addition to LoRA layers. The normalization layers will directly update the state_dict of the transformer " 'as opposed to the LoRA layers that will co-exist separately until the "fuse_lora()" method is called. That is to say, the normalization layers will always be directly ' "fused into the transformer and can only be unfused if `discard_original_layers=True` is passed. This might also have implications when dealing with multiple LoRAs. " "If you notice something unexpected, please open an issue: https://github.com/huggingface/diffusers/issues." ) # We can't load with strict=True because the current state_dict does not contain all the transformer keys incompatible_keys = transformer.load_state_dict(state_dict, strict=False) unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) # We shouldn't expect to see the supported norm keys here being present in the unexpected keys. if unexpected_keys: if any(norm_key in k for k in unexpected_keys for norm_key in cls._control_lora_supported_norm_keys): raise ValueError( f"Found {unexpected_keys} as unexpected keys while trying to load norm layers into the transformer." ) return overwritten_layers_state_dict @classmethod # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder def load_lora_into_text_encoder( cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `text_encoder` Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The key should be prefixed with an additional `text_encoder` to distinguish between unet lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). text_encoder (`CLIPTextModel`): The text encoder model to load the LoRA layers into. prefix (`str`): Expected prefix of the `text_encoder` in the `state_dict`. lora_scale (`float`): How much to scale the output of the lora linear layer before it is added with the output of the regular lora layer. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ _load_lora_into_text_encoder( state_dict=state_dict, network_alphas=network_alphas, lora_scale=lora_scale, text_encoder=text_encoder, prefix=prefix, text_encoder_name=cls.text_encoder_name, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.save_lora_weights with unet->transformer def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, torch.nn.Module] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata=None, text_encoder_lora_adapter_metadata=None, ): r""" Save the LoRA parameters corresponding to the UNet and text encoder. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text encoder LoRA state dict because it comes from 🤗 Transformers. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. text_encoder_lora_adapter_metadata: LoRA adapter metadata associated with the text encoder to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not (transformer_lora_layers or text_encoder_lora_layers): raise ValueError("You must pass at least one of `transformer_lora_layers` and `text_encoder_lora_layers`.") if transformer_lora_layers: state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) if transformer_lora_adapter_metadata: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) if text_encoder_lora_adapter_metadata: lora_adapter_metadata.update( _pack_dict_with_prefix(text_encoder_lora_adapter_metadata, cls.text_encoder_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer if ( hasattr(transformer, "_transformer_norm_layers") and isinstance(transformer._transformer_norm_layers, dict) and len(transformer._transformer_norm_layers.keys()) > 0 ): logger.info( "The provided state dict contains normalization layers in addition to LoRA layers. The normalization layers will be directly updated the state_dict of the transformer " "as opposed to the LoRA layers that will co-exist separately until the 'fuse_lora()' method is called. That is to say, the normalization layers will always be directly " "fused into the transformer and can only be unfused if `discard_original_layers=True` is passed." ) super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. """ transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer if hasattr(transformer, "_transformer_norm_layers") and transformer._transformer_norm_layers: transformer.load_state_dict(transformer._transformer_norm_layers, strict=False) super().unfuse_lora(components=components, **kwargs) # We override this here account for `_transformer_norm_layers` and `_overwritten_params`. def unload_lora_weights(self, reset_to_overwritten_params=False): """ Unloads the LoRA parameters. Args: reset_to_overwritten_params (`bool`, defaults to `False`): Whether to reset the LoRA-loaded modules to their original params. Refer to the [Flux documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux) to learn more. Examples: ```python >>> # Assuming `pipeline` is already loaded with the LoRA parameters. >>> pipeline.unload_lora_weights() >>> ... ``` """ super().unload_lora_weights() transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer if hasattr(transformer, "_transformer_norm_layers") and transformer._transformer_norm_layers: transformer.load_state_dict(transformer._transformer_norm_layers, strict=False) transformer._transformer_norm_layers = None if reset_to_overwritten_params and getattr(transformer, "_overwritten_params", None) is not None: overwritten_params = transformer._overwritten_params module_names = set() for param_name in overwritten_params: if param_name.endswith(".weight"): module_names.add(param_name.replace(".weight", "")) for name, module in transformer.named_modules(): if isinstance(module, torch.nn.Linear) and name in module_names: module_weight = module.weight.data module_bias = module.bias.data if module.bias is not None else None bias = module_bias is not None parent_module_name, _, current_module_name = name.rpartition(".") parent_module = transformer.get_submodule(parent_module_name) current_param_weight = overwritten_params[f"{name}.weight"] in_features, out_features = current_param_weight.shape[1], current_param_weight.shape[0] with torch.device("meta"): original_module = torch.nn.Linear( in_features, out_features, bias=bias, dtype=module_weight.dtype, ) tmp_state_dict = {"weight": current_param_weight} if module_bias is not None: tmp_state_dict.update({"bias": overwritten_params[f"{name}.bias"]}) original_module.load_state_dict(tmp_state_dict, assign=True, strict=True) setattr(parent_module, current_module_name, original_module) del tmp_state_dict if current_module_name in _MODULE_NAME_TO_ATTRIBUTE_MAP_FLUX: attribute_name = _MODULE_NAME_TO_ATTRIBUTE_MAP_FLUX[current_module_name] new_value = int(current_param_weight.shape[1]) old_value = getattr(transformer.config, attribute_name) setattr(transformer.config, attribute_name, new_value) logger.info( f"Set the {attribute_name} attribute of the model to {new_value} from {old_value}." ) @classmethod def _maybe_expand_transformer_param_shape_or_error_( cls, transformer: torch.nn.Module, lora_state_dict=None, norm_state_dict=None, prefix=None, ) -> bool: """ Control LoRA expands the shape of the input layer from (3072, 64) to (3072, 128). This method handles that and generalizes things a bit so that any parameter that needs expansion receives appropriate treatment. """ state_dict = {} if lora_state_dict is not None: state_dict.update(lora_state_dict) if norm_state_dict is not None: state_dict.update(norm_state_dict) # Remove prefix if present prefix = prefix or cls.transformer_name for key in list(state_dict.keys()): if key.split(".")[0] == prefix: state_dict[key.removeprefix(f"{prefix}.")] = state_dict.pop(key) # Expand transformer parameter shapes if they don't match lora has_param_with_shape_update = False overwritten_params = {} is_peft_loaded = getattr(transformer, "peft_config", None) is not None is_quantized = hasattr(transformer, "hf_quantizer") for name, module in transformer.named_modules(): if isinstance(module, torch.nn.Linear): module_weight = module.weight.data module_bias = module.bias.data if module.bias is not None else None bias = module_bias is not None lora_base_name = name.replace(".base_layer", "") if is_peft_loaded else name lora_A_weight_name = f"{lora_base_name}.lora_A.weight" lora_B_weight_name = f"{lora_base_name}.lora_B.weight" if lora_A_weight_name not in state_dict: continue in_features = state_dict[lora_A_weight_name].shape[1] out_features = state_dict[lora_B_weight_name].shape[0] # Model maybe loaded with different quantization schemes which may flatten the params. # `bitsandbytes`, for example, flatten the weights when using 4bit. 8bit bnb models # preserve weight shape. module_weight_shape = cls._calculate_module_shape(model=transformer, base_module=module) # This means there's no need for an expansion in the params, so we simply skip. if tuple(module_weight_shape) == (out_features, in_features): continue module_out_features, module_in_features = module_weight_shape debug_message = "" if in_features > module_in_features: debug_message += ( f'Expanding the nn.Linear input/output features for module="{name}" because the provided LoRA ' f"checkpoint contains higher number of features than expected. The number of input_features will be " f"expanded from {module_in_features} to {in_features}" ) if out_features > module_out_features: debug_message += ( ", and the number of output features will be " f"expanded from {module_out_features} to {out_features}." ) else: debug_message += "." if debug_message: logger.debug(debug_message) if out_features > module_out_features or in_features > module_in_features: has_param_with_shape_update = True parent_module_name, _, current_module_name = name.rpartition(".") parent_module = transformer.get_submodule(parent_module_name) if is_quantized: module_weight = _maybe_dequantize_weight_for_expanded_lora(transformer, module) # TODO: consider if this layer needs to be a quantized layer as well if `is_quantized` is True. with torch.device("meta"): expanded_module = torch.nn.Linear( in_features, out_features, bias=bias, dtype=module_weight.dtype ) # Only weights are expanded and biases are not. This is because only the input dimensions # are changed while the output dimensions remain the same. The shape of the weight tensor # is (out_features, in_features), while the shape of bias tensor is (out_features,), which # explains the reason why only weights are expanded. new_weight = torch.zeros_like( expanded_module.weight.data, device=module_weight.device, dtype=module_weight.dtype ) slices = tuple(slice(0, dim) for dim in module_weight_shape) new_weight[slices] = module_weight tmp_state_dict = {"weight": new_weight} if module_bias is not None: tmp_state_dict["bias"] = module_bias expanded_module.load_state_dict(tmp_state_dict, strict=True, assign=True) setattr(parent_module, current_module_name, expanded_module) del tmp_state_dict if current_module_name in _MODULE_NAME_TO_ATTRIBUTE_MAP_FLUX: attribute_name = _MODULE_NAME_TO_ATTRIBUTE_MAP_FLUX[current_module_name] new_value = int(expanded_module.weight.data.shape[1]) old_value = getattr(transformer.config, attribute_name) setattr(transformer.config, attribute_name, new_value) logger.info( f"Set the {attribute_name} attribute of the model to {new_value} from {old_value}." ) # For `unload_lora_weights()`. # TODO: this could lead to more memory overhead if the number of overwritten params # are large. Should be revisited later and tackled through a `discard_original_layers` arg. overwritten_params[f"{current_module_name}.weight"] = module_weight if module_bias is not None: overwritten_params[f"{current_module_name}.bias"] = module_bias if len(overwritten_params) > 0: transformer._overwritten_params = overwritten_params return has_param_with_shape_update @classmethod def _maybe_expand_lora_state_dict(cls, transformer, lora_state_dict): expanded_module_names = set() transformer_state_dict = transformer.state_dict() prefix = f"{cls.transformer_name}." lora_module_names = [ key[: -len(".lora_A.weight")] for key in lora_state_dict if key.endswith(".lora_A.weight") ] lora_module_names = [name[len(prefix) :] for name in lora_module_names if name.startswith(prefix)] lora_module_names = sorted(set(lora_module_names)) transformer_module_names = sorted({name for name, _ in transformer.named_modules()}) unexpected_modules = set(lora_module_names) - set(transformer_module_names) if unexpected_modules: logger.debug(f"Found unexpected modules: {unexpected_modules}. These will be ignored.") for k in lora_module_names: if k in unexpected_modules: continue base_param_name = ( f"{k.replace(prefix, '')}.base_layer.weight" if f"{k.replace(prefix, '')}.base_layer.weight" in transformer_state_dict else f"{k.replace(prefix, '')}.weight" ) base_weight_param = transformer_state_dict[base_param_name] lora_A_param = lora_state_dict[f"{prefix}{k}.lora_A.weight"] # TODO (sayakpaul): Handle the cases when we actually need to expand when using quantization. base_module_shape = cls._calculate_module_shape(model=transformer, base_weight_param_name=base_param_name) if base_module_shape[1] > lora_A_param.shape[1]: shape = (lora_A_param.shape[0], base_weight_param.shape[1]) expanded_state_dict_weight = torch.zeros(shape, device=base_weight_param.device) expanded_state_dict_weight[:, : lora_A_param.shape[1]].copy_(lora_A_param) lora_state_dict[f"{prefix}{k}.lora_A.weight"] = expanded_state_dict_weight expanded_module_names.add(k) elif base_module_shape[1] < lora_A_param.shape[1]: raise NotImplementedError( f"This LoRA param ({k}.lora_A.weight) has an incompatible shape {lora_A_param.shape}. Please open an issue to file for a feature request - https://github.com/huggingface/diffusers/issues/new." ) if expanded_module_names: logger.info( f"The following LoRA modules were zero padded to match the state dict of {cls.transformer_name}: {expanded_module_names}. Please open an issue if you think this was unexpected - https://github.com/huggingface/diffusers/issues/new." ) return lora_state_dict @staticmethod def _calculate_module_shape( model: "torch.nn.Module", base_module: "torch.nn.Linear" = None, base_weight_param_name: str = None, ) -> "torch.Size": def _get_weight_shape(weight: torch.Tensor): if weight.__class__.__name__ == "Params4bit": return weight.quant_state.shape elif weight.__class__.__name__ == "GGUFParameter": return weight.quant_shape else: return weight.shape if base_module is not None: return _get_weight_shape(base_module.weight) elif base_weight_param_name is not None: if not base_weight_param_name.endswith(".weight"): raise ValueError( f"Invalid `base_weight_param_name` passed as it does not end with '.weight' {base_weight_param_name=}." ) module_path = base_weight_param_name.rsplit(".weight", 1)[0] submodule = get_submodule_by_name(model, module_path) return _get_weight_shape(submodule.weight) raise ValueError("Either `base_module` or `base_weight_param_name` must be provided.") @staticmethod def _prepare_outputs(state_dict, metadata, alphas=None, return_alphas=False, return_metadata=False): outputs = [state_dict] if return_alphas: outputs.append(alphas) if return_metadata: outputs.append(metadata) return tuple(outputs) if (return_alphas or return_metadata) else state_dict # The reason why we subclass from `StableDiffusionLoraLoaderMixin` here is because Amused initially # relied on `StableDiffusionLoraLoaderMixin` for its LoRA support. class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin): _lora_loadable_modules = ["transformer", "text_encoder"] transformer_name = TRANSFORMER_NAME text_encoder_name = TEXT_ENCODER_NAME @classmethod # Copied from diffusers.loaders.lora_pipeline.FluxLoraLoaderMixin.load_lora_into_transformer with FluxTransformer2DModel->UVit2DModel def load_lora_into_transformer( cls, state_dict, network_alphas, transformer, adapter_name=None, metadata=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). transformer (`UVit2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=network_alphas, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder def load_lora_into_text_encoder( cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `text_encoder` Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The key should be prefixed with an additional `text_encoder` to distinguish between unet lora layers. network_alphas (`Dict[str, float]`): The value of the network alpha used for stable learning and preventing underflow. This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). text_encoder (`CLIPTextModel`): The text encoder model to load the LoRA layers into. prefix (`str`): Expected prefix of the `text_encoder` in the `state_dict`. lora_scale (`float`): How much to scale the output of the lora linear layer before it is added with the output of the regular lora layer. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ _load_lora_into_text_encoder( state_dict=state_dict, network_alphas=network_alphas, lora_scale=lora_scale, text_encoder=text_encoder, prefix=prefix, text_encoder_name=cls.text_encoder_name, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod def save_lora_weights( cls, save_directory: Union[str, os.PathLike], text_encoder_lora_layers: Dict[str, torch.nn.Module] = None, transformer_lora_layers: Dict[str, torch.nn.Module] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, ): r""" Save the LoRA parameters corresponding to the UNet and text encoder. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `unet`. text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text encoder LoRA state dict because it comes from 🤗 Transformers. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. """ state_dict = {} if not (transformer_lora_layers or text_encoder_lora_layers): raise ValueError("You must pass at least one of `transformer_lora_layers` or `text_encoder_lora_layers`.") if transformer_lora_layers: state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, ) class CogVideoXLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`CogVideoXTransformer3DModel`]. Specific to [`CogVideoXPipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.lora_state_dict def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->CogVideoXTransformer3DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`CogVideoXTransformer3DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Adapted from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.save_lora_weights without support for text encoder def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class Mochi1LoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`MochiTransformer3DModel`]. Specific to [`MochiPipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.lora_state_dict def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->MochiTransformer3DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`MochiTransformer3DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class LTXVideoLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`LTXVideoTransformer3DModel`]. Specific to [`LTXPipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} is_non_diffusers_format = any(k.startswith("diffusion_model.") for k in state_dict) if is_non_diffusers_format: state_dict = _convert_non_diffusers_ltxv_lora_to_diffusers(state_dict) out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->LTXVideoTransformer3DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`LTXVideoTransformer3DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class SanaLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`SanaTransformer2DModel`]. Specific to [`SanaPipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.lora_state_dict def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->SanaTransformer2DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`SanaTransformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class HunyuanVideoLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`HunyuanVideoTransformer3DModel`]. Specific to [`HunyuanVideoPipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading original format HunyuanVideo LoRA checkpoints. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} is_original_hunyuan_video = any("img_attn_qkv" in k for k in state_dict) if is_original_hunyuan_video: state_dict = _convert_hunyuan_video_lora_to_diffusers(state_dict) out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->HunyuanVideoTransformer3DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`HunyuanVideoTransformer3DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class Lumina2LoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`Lumina2Transformer2DModel`]. Specific to [`Lumina2Text2ImgPipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} # conversion. non_diffusers = any(k.startswith("diffusion_model.") for k in state_dict) if non_diffusers: state_dict = _convert_non_diffusers_lumina2_lora_to_diffusers(state_dict) out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->Lumina2Transformer2DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`Lumina2Transformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class WanLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`WanTransformer3DModel`]. Specific to [`WanPipeline`] and `[WanImageToVideoPipeline`]. """ _lora_loadable_modules = ["transformer", "transformer_2"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) if any(k.startswith("diffusion_model.") for k in state_dict): state_dict = _convert_non_diffusers_wan_lora_to_diffusers(state_dict) elif any(k.startswith("lora_unet_") for k in state_dict): state_dict = _convert_musubi_wan_lora_to_diffusers(state_dict) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out @classmethod def _maybe_expand_t2v_lora_for_i2v( cls, transformer: torch.nn.Module, state_dict, ): if transformer.config.image_dim is None: return state_dict target_device = transformer.device if any(k.startswith("transformer.blocks.") for k in state_dict): num_blocks = len({k.split("blocks.")[1].split(".")[0] for k in state_dict if "blocks." in k}) is_i2v_lora = any("add_k_proj" in k for k in state_dict) and any("add_v_proj" in k for k in state_dict) has_bias = any(".lora_B.bias" in k for k in state_dict) if is_i2v_lora: return state_dict for i in range(num_blocks): for o, c in zip(["k_img", "v_img"], ["add_k_proj", "add_v_proj"]): # These keys should exist if the block `i` was part of the T2V LoRA. ref_key_lora_A = f"transformer.blocks.{i}.attn2.to_k.lora_A.weight" ref_key_lora_B = f"transformer.blocks.{i}.attn2.to_k.lora_B.weight" if ref_key_lora_A not in state_dict or ref_key_lora_B not in state_dict: continue state_dict[f"transformer.blocks.{i}.attn2.{c}.lora_A.weight"] = torch.zeros_like( state_dict[f"transformer.blocks.{i}.attn2.to_k.lora_A.weight"], device=target_device ) state_dict[f"transformer.blocks.{i}.attn2.{c}.lora_B.weight"] = torch.zeros_like( state_dict[f"transformer.blocks.{i}.attn2.to_k.lora_B.weight"], device=target_device ) # If the original LoRA had biases (indicated by has_bias) # AND the specific reference bias key exists for this block. ref_key_lora_B_bias = f"transformer.blocks.{i}.attn2.to_k.lora_B.bias" if has_bias and ref_key_lora_B_bias in state_dict: ref_lora_B_bias_tensor = state_dict[ref_key_lora_B_bias] state_dict[f"transformer.blocks.{i}.attn2.{c}.lora_B.bias"] = torch.zeros_like( ref_lora_B_bias_tensor, device=target_device, ) return state_dict def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) # convert T2V LoRA to I2V LoRA (when loaded to Wan I2V) by adding zeros for the additional (missing) _img layers state_dict = self._maybe_expand_t2v_lora_for_i2v( transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, state_dict=state_dict, ) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") load_into_transformer_2 = kwargs.pop("load_into_transformer_2", False) if load_into_transformer_2: if not hasattr(self, "transformer_2"): raise AttributeError( f"'{type(self).__name__}' object has no attribute transformer_2" "Note that Wan2.1 models do not have a transformer_2 component." "Ensure the model has a transformer_2 component before setting load_into_transformer_2=True." ) self.load_lora_into_transformer( state_dict, transformer=self.transformer_2, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) else: self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->WanTransformer3DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`WanTransformer3DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class SkyReelsV2LoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`SkyReelsV2Transformer3DModel`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args # Copied from diffusers.loaders.lora_pipeline.WanLoraLoaderMixin.lora_state_dict def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) if any(k.startswith("diffusion_model.") for k in state_dict): state_dict = _convert_non_diffusers_wan_lora_to_diffusers(state_dict) elif any(k.startswith("lora_unet_") for k in state_dict): state_dict = _convert_musubi_wan_lora_to_diffusers(state_dict) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out @classmethod # Copied from diffusers.loaders.lora_pipeline.WanLoraLoaderMixin._maybe_expand_t2v_lora_for_i2v def _maybe_expand_t2v_lora_for_i2v( cls, transformer: torch.nn.Module, state_dict, ): if transformer.config.image_dim is None: return state_dict target_device = transformer.device if any(k.startswith("transformer.blocks.") for k in state_dict): num_blocks = len({k.split("blocks.")[1].split(".")[0] for k in state_dict if "blocks." in k}) is_i2v_lora = any("add_k_proj" in k for k in state_dict) and any("add_v_proj" in k for k in state_dict) has_bias = any(".lora_B.bias" in k for k in state_dict) if is_i2v_lora: return state_dict for i in range(num_blocks): for o, c in zip(["k_img", "v_img"], ["add_k_proj", "add_v_proj"]): # These keys should exist if the block `i` was part of the T2V LoRA. ref_key_lora_A = f"transformer.blocks.{i}.attn2.to_k.lora_A.weight" ref_key_lora_B = f"transformer.blocks.{i}.attn2.to_k.lora_B.weight" if ref_key_lora_A not in state_dict or ref_key_lora_B not in state_dict: continue state_dict[f"transformer.blocks.{i}.attn2.{c}.lora_A.weight"] = torch.zeros_like( state_dict[f"transformer.blocks.{i}.attn2.to_k.lora_A.weight"], device=target_device ) state_dict[f"transformer.blocks.{i}.attn2.{c}.lora_B.weight"] = torch.zeros_like( state_dict[f"transformer.blocks.{i}.attn2.to_k.lora_B.weight"], device=target_device ) # If the original LoRA had biases (indicated by has_bias) # AND the specific reference bias key exists for this block. ref_key_lora_B_bias = f"transformer.blocks.{i}.attn2.to_k.lora_B.bias" if has_bias and ref_key_lora_B_bias in state_dict: ref_lora_B_bias_tensor = state_dict[ref_key_lora_B_bias] state_dict[f"transformer.blocks.{i}.attn2.{c}.lora_B.bias"] = torch.zeros_like( ref_lora_B_bias_tensor, device=target_device, ) return state_dict # Copied from diffusers.loaders.lora_pipeline.WanLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) # convert T2V LoRA to I2V LoRA (when loaded to Wan I2V) by adding zeros for the additional (missing) _img layers state_dict = self._maybe_expand_t2v_lora_for_i2v( transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, state_dict=state_dict, ) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") load_into_transformer_2 = kwargs.pop("load_into_transformer_2", False) if load_into_transformer_2: if not hasattr(self, "transformer_2"): raise AttributeError( f"'{type(self).__name__}' object has no attribute transformer_2" "Note that Wan2.1 models do not have a transformer_2 component." "Ensure the model has a transformer_2 component before setting load_into_transformer_2=True." ) self.load_lora_into_transformer( state_dict, transformer=self.transformer_2, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) else: self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->SkyReelsV2Transformer3DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`SkyReelsV2Transformer3DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class CogView4LoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`WanTransformer3DModel`]. Specific to [`CogView4Pipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.lora_state_dict def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->CogView4Transformer2DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`CogView4Transformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class HiDreamImageLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`HiDreamImageTransformer2DModel`]. Specific to [`HiDreamImagePipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} is_non_diffusers_format = any("diffusion_model" in k for k in state_dict) if is_non_diffusers_format: state_dict = _convert_non_diffusers_hidream_lora_to_diffusers(state_dict) out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->HiDreamImageTransformer2DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`HiDreamImageTransformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class QwenImageLoraLoaderMixin(LoraBaseMixin): r""" Load LoRA layers into [`QwenImageTransformer2DModel`]. Specific to [`QwenImagePipeline`]. """ _lora_loadable_modules = ["transformer"] transformer_name = TRANSFORMER_NAME @classmethod @validate_hf_hub_args def lora_state_dict( cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs, ): r""" Return state dict for lora weights and the network alphas. <Tip warning={true}> We support loading A1111 formatted LoRA checkpoints in a limited capacity. This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_lora_metadata (`bool`, *optional*, defaults to False): When enabled, additionally return the LoRA adapter metadata, typically found in the state dict. """ # Load the main state dict first which has the LoRA layers for either of # transformer and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) return_lora_metadata = kwargs.pop("return_lora_metadata", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dict, metadata = _fetch_state_dict( pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle, ) is_dora_scale_present = any("dora_scale" in k for k in state_dict) if is_dora_scale_present: warn_msg = "It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new." logger.warning(warn_msg) state_dict = {k: v for k, v in state_dict.items() if "dora_scale" not in k} has_alphas_in_sd = any(k.endswith(".alpha") for k in state_dict) has_lora_unet = any(k.startswith("lora_unet_") for k in state_dict) if has_alphas_in_sd or has_lora_unet: state_dict = _convert_non_diffusers_qwen_lora_to_diffusers(state_dict) out = (state_dict, metadata) if return_lora_metadata else state_dict return out # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str] = None, hotswap: bool = False, **kwargs, ): """ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded into `self.transformer`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT_LORA) if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # if a dict is passed, copy it instead of modifying it inplace if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() # First, ensure that the checkpoint is a compatible one and can be successfully loaded. kwargs["return_lora_metadata"] = True state_dict, metadata = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all("lora" in key for key in state_dict.keys()) if not is_correct_format: raise ValueError("Invalid LoRA checkpoint.") self.load_lora_into_transformer( state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, adapter_name=adapter_name, metadata=metadata, _pipeline=self, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->QwenImageTransformer2DModel def load_lora_into_transformer( cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): """ This will load the LoRA layers specified in `state_dict` into `transformer`. Parameters: state_dict (`dict`): A standard state dict containing the lora layer parameters. The keys can either be indexed directly into the unet or prefixed with an additional `unet` which can be used to distinguish between text encoder lora layers. transformer (`QwenImageTransformer2DModel`): The Transformer model to load the LoRA layers into. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. hotswap (`bool`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]. metadata (`dict`): Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived from the state dict. """ if low_cpu_mem_usage and is_peft_version("<", "0.13.0"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) # Load the layers corresponding to transformer. logger.info(f"Loading {cls.transformer_name}.") transformer.load_lora_adapter( state_dict, network_alphas=None, adapter_name=adapter_name, metadata=metadata, _pipeline=_pipeline, low_cpu_mem_usage=low_cpu_mem_usage, hotswap=hotswap, ) @classmethod # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights def save_lora_weights( cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, transformer_lora_adapter_metadata: Optional[dict] = None, ): r""" Save the LoRA parameters corresponding to the transformer. Arguments: save_directory (`str` or `os.PathLike`): Directory to save LoRA parameters to. Will be created if it doesn't exist. transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): State dict of the LoRA layers corresponding to the `transformer`. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful during distributed training when you need to replace `torch.save` with another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. transformer_lora_adapter_metadata: LoRA adapter metadata associated with the transformer to be serialized with the state dict. """ state_dict = {} lora_adapter_metadata = {} if not transformer_lora_layers: raise ValueError("You must pass `transformer_lora_layers`.") state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if transformer_lora_adapter_metadata is not None: lora_adapter_metadata.update( _pack_dict_with_prefix(transformer_lora_adapter_metadata, cls.transformer_name) ) # Save the model cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, lora_adapter_metadata=lora_adapter_metadata, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora def fuse_lora( self, components: List[str] = ["transformer"], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ super().fuse_lora( components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names, **kwargs, ) # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. """ super().unfuse_lora(components=components, **kwargs) class LoraLoaderMixin(StableDiffusionLoraLoaderMixin): def __init__(self, *args, **kwargs): deprecation_message = "LoraLoaderMixin is deprecated and this will be removed in a future version. Please use `StableDiffusionLoraLoaderMixin`, instead." deprecate("LoraLoaderMixin", "1.0.0", deprecation_message) super().__init__(*args, **kwargs)
diffusers/src/diffusers/loaders/lora_pipeline.py/0
{ "file_path": "diffusers/src/diffusers/loaders/lora_pipeline.py", "repo_id": "diffusers", "token_count": 144944 }
159
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import functools import inspect import math from enum import Enum from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union import torch from ..utils import ( get_logger, is_flash_attn_3_available, is_flash_attn_available, is_flash_attn_version, is_sageattention_available, is_sageattention_version, is_torch_npu_available, is_torch_version, is_torch_xla_available, is_torch_xla_version, is_xformers_available, is_xformers_version, ) from ..utils.constants import DIFFUSERS_ATTN_BACKEND, DIFFUSERS_ATTN_CHECKS _REQUIRED_FLASH_VERSION = "2.6.3" _REQUIRED_SAGE_VERSION = "2.1.1" _REQUIRED_FLEX_VERSION = "2.5.0" _REQUIRED_XLA_VERSION = "2.2" _REQUIRED_XFORMERS_VERSION = "0.0.29" _CAN_USE_FLASH_ATTN = is_flash_attn_available() and is_flash_attn_version(">=", _REQUIRED_FLASH_VERSION) _CAN_USE_FLASH_ATTN_3 = is_flash_attn_3_available() _CAN_USE_SAGE_ATTN = is_sageattention_available() and is_sageattention_version(">=", _REQUIRED_SAGE_VERSION) _CAN_USE_FLEX_ATTN = is_torch_version(">=", _REQUIRED_FLEX_VERSION) _CAN_USE_NPU_ATTN = is_torch_npu_available() _CAN_USE_XLA_ATTN = is_torch_xla_available() and is_torch_xla_version(">=", _REQUIRED_XLA_VERSION) _CAN_USE_XFORMERS_ATTN = is_xformers_available() and is_xformers_version(">=", _REQUIRED_XFORMERS_VERSION) if _CAN_USE_FLASH_ATTN: from flash_attn import flash_attn_func, flash_attn_varlen_func else: flash_attn_func = None flash_attn_varlen_func = None if _CAN_USE_FLASH_ATTN_3: from flash_attn_interface import flash_attn_func as flash_attn_3_func from flash_attn_interface import flash_attn_varlen_func as flash_attn_3_varlen_func else: flash_attn_3_func = None flash_attn_3_varlen_func = None if _CAN_USE_SAGE_ATTN: from sageattention import ( sageattn, sageattn_qk_int8_pv_fp8_cuda, sageattn_qk_int8_pv_fp8_cuda_sm90, sageattn_qk_int8_pv_fp16_cuda, sageattn_qk_int8_pv_fp16_triton, sageattn_varlen, ) else: sageattn = None sageattn_qk_int8_pv_fp16_cuda = None sageattn_qk_int8_pv_fp16_triton = None sageattn_qk_int8_pv_fp8_cuda = None sageattn_qk_int8_pv_fp8_cuda_sm90 = None sageattn_varlen = None if _CAN_USE_FLEX_ATTN: # We cannot import the flex_attention function from the package directly because it is expected (from the # pytorch documentation) that the user may compile it. If we import directly, we will not have access to the # compiled function. import torch.nn.attention.flex_attention as flex_attention if _CAN_USE_NPU_ATTN: from torch_npu import npu_fusion_attention else: npu_fusion_attention = None if _CAN_USE_XLA_ATTN: from torch_xla.experimental.custom_kernel import flash_attention as xla_flash_attention else: xla_flash_attention = None if _CAN_USE_XFORMERS_ATTN: import xformers.ops as xops else: xops = None logger = get_logger(__name__) # pylint: disable=invalid-name # TODO(aryan): Add support for the following: # - Sage Attention++ # - block sparse, radial and other attention methods # - CP with sage attention, flex, xformers, other missing backends # - Add support for normal and CP training with backends that don't support it yet _SAGE_ATTENTION_PV_ACCUM_DTYPE = Literal["fp32", "fp32+fp32"] _SAGE_ATTENTION_QK_QUANT_GRAN = Literal["per_thread", "per_warp"] _SAGE_ATTENTION_QUANTIZATION_BACKEND = Literal["cuda", "triton"] class AttentionBackendName(str, Enum): # EAGER = "eager" # `flash-attn` FLASH = "flash" FLASH_VARLEN = "flash_varlen" _FLASH_3 = "_flash_3" _FLASH_VARLEN_3 = "_flash_varlen_3" # PyTorch native FLEX = "flex" NATIVE = "native" _NATIVE_CUDNN = "_native_cudnn" _NATIVE_EFFICIENT = "_native_efficient" _NATIVE_FLASH = "_native_flash" _NATIVE_MATH = "_native_math" _NATIVE_NPU = "_native_npu" _NATIVE_XLA = "_native_xla" # `sageattention` SAGE = "sage" SAGE_VARLEN = "sage_varlen" _SAGE_QK_INT8_PV_FP8_CUDA = "_sage_qk_int8_pv_fp8_cuda" _SAGE_QK_INT8_PV_FP8_CUDA_SM90 = "_sage_qk_int8_pv_fp8_cuda_sm90" _SAGE_QK_INT8_PV_FP16_CUDA = "_sage_qk_int8_pv_fp16_cuda" _SAGE_QK_INT8_PV_FP16_TRITON = "_sage_qk_int8_pv_fp16_triton" # TODO: let's not add support for Sparge Attention now because it requires tuning per model # We can look into supporting something "autotune"-ing in the future # SPARGE = "sparge" # `xformers` XFORMERS = "xformers" class _AttentionBackendRegistry: _backends = {} _constraints = {} _supported_arg_names = {} _active_backend = AttentionBackendName(DIFFUSERS_ATTN_BACKEND) _checks_enabled = DIFFUSERS_ATTN_CHECKS @classmethod def register(cls, backend: AttentionBackendName, constraints: Optional[List[Callable]] = None): logger.debug(f"Registering attention backend: {backend} with constraints: {constraints}") def decorator(func): cls._backends[backend] = func cls._constraints[backend] = constraints or [] cls._supported_arg_names[backend] = set(inspect.signature(func).parameters.keys()) return func return decorator @classmethod def get_active_backend(cls): return cls._active_backend, cls._backends[cls._active_backend] @classmethod def list_backends(cls): return list(cls._backends.keys()) @contextlib.contextmanager def attention_backend(backend: Union[str, AttentionBackendName] = AttentionBackendName.NATIVE): """ Context manager to set the active attention backend. """ if backend not in _AttentionBackendRegistry._backends: raise ValueError(f"Backend {backend} is not registered.") backend = AttentionBackendName(backend) _check_attention_backend_requirements(backend) old_backend = _AttentionBackendRegistry._active_backend _AttentionBackendRegistry._active_backend = backend try: yield finally: _AttentionBackendRegistry._active_backend = old_backend def dispatch_attention_fn( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, attention_kwargs: Optional[Dict[str, Any]] = None, *, backend: Optional[AttentionBackendName] = None, ) -> torch.Tensor: attention_kwargs = attention_kwargs or {} if backend is None: # If no backend is specified, we either use the default backend (set via the DIFFUSERS_ATTN_BACKEND environment # variable), or we use a custom backend based on whether user is using the `attention_backend` context manager backend_name, backend_fn = _AttentionBackendRegistry.get_active_backend() else: backend_name = AttentionBackendName(backend) backend_fn = _AttentionBackendRegistry._backends.get(backend_name) kwargs = { "query": query, "key": key, "value": value, "attn_mask": attn_mask, "dropout_p": dropout_p, "is_causal": is_causal, "scale": scale, **attention_kwargs, } if is_torch_version(">=", "2.5.0"): kwargs["enable_gqa"] = enable_gqa if _AttentionBackendRegistry._checks_enabled: removed_kwargs = set(kwargs) - set(_AttentionBackendRegistry._supported_arg_names[backend_name]) if removed_kwargs: logger.warning(f"Removing unsupported arguments for attention backend {backend_name}: {removed_kwargs}.") for check in _AttentionBackendRegistry._constraints.get(backend_name): check(**kwargs) kwargs = {k: v for k, v in kwargs.items() if k in _AttentionBackendRegistry._supported_arg_names[backend_name]} return backend_fn(**kwargs) # ===== Checks ===== # A list of very simple functions to catch common errors quickly when debugging. def _check_attn_mask_or_causal(attn_mask: Optional[torch.Tensor], is_causal: bool, **kwargs) -> None: if attn_mask is not None and is_causal: raise ValueError("`is_causal` cannot be True when `attn_mask` is not None.") def _check_device(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: if query.device != key.device or query.device != value.device: raise ValueError("Query, key, and value must be on the same device.") if query.dtype != key.dtype or query.dtype != value.dtype: raise ValueError("Query, key, and value must have the same dtype.") def _check_device_cuda(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: _check_device(query, key, value) if query.device.type != "cuda": raise ValueError("Query, key, and value must be on a CUDA device.") def _check_device_cuda_atleast_smXY(major: int, minor: int) -> Callable: def check_device_cuda(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: _check_device_cuda(query, key, value) if torch.cuda.get_device_capability(query.device) < (major, minor): raise ValueError( f"Query, key, and value must be on a CUDA device with compute capability >= {major}.{minor}." ) return check_device_cuda def _check_qkv_dtype_match(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: if query.dtype != key.dtype: raise ValueError("Query and key must have the same dtype.") if query.dtype != value.dtype: raise ValueError("Query and value must have the same dtype.") def _check_qkv_dtype_bf16_or_fp16(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: _check_qkv_dtype_match(query, key, value) if query.dtype not in (torch.bfloat16, torch.float16): raise ValueError("Query, key, and value must be either bfloat16 or float16.") def _check_shape( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, **kwargs, ) -> None: if query.shape[-1] != key.shape[-1]: raise ValueError("Query and key must have the same last dimension.") if query.shape[-2] != value.shape[-2]: raise ValueError("Query and value must have the same second to last dimension.") if attn_mask is not None and attn_mask.shape[-1] != key.shape[-2]: raise ValueError("Attention mask must match the key's second to last dimension.") # ===== Helper functions ===== def _check_attention_backend_requirements(backend: AttentionBackendName) -> None: if backend in [AttentionBackendName.FLASH, AttentionBackendName.FLASH_VARLEN]: if not _CAN_USE_FLASH_ATTN: raise RuntimeError( f"Flash Attention backend '{backend.value}' is not usable because of missing package or the version is too old. Please install `flash-attn>={_REQUIRED_FLASH_VERSION}`." ) elif backend in [AttentionBackendName._FLASH_3, AttentionBackendName._FLASH_VARLEN_3]: if not _CAN_USE_FLASH_ATTN_3: raise RuntimeError( f"Flash Attention 3 backend '{backend.value}' is not usable because of missing package or the version is too old. Please build FA3 beta release from source." ) elif backend in [ AttentionBackendName.SAGE, AttentionBackendName.SAGE_VARLEN, AttentionBackendName._SAGE_QK_INT8_PV_FP8_CUDA, AttentionBackendName._SAGE_QK_INT8_PV_FP8_CUDA_SM90, AttentionBackendName._SAGE_QK_INT8_PV_FP16_CUDA, AttentionBackendName._SAGE_QK_INT8_PV_FP16_TRITON, ]: if not _CAN_USE_SAGE_ATTN: raise RuntimeError( f"Sage Attention backend '{backend.value}' is not usable because of missing package or the version is too old. Please install `sageattention>={_REQUIRED_SAGE_VERSION}`." ) elif backend == AttentionBackendName.FLEX: if not _CAN_USE_FLEX_ATTN: raise RuntimeError( f"Flex Attention backend '{backend.value}' is not usable because of missing package or the version is too old. Please install `torch>=2.5.0`." ) elif backend == AttentionBackendName._NATIVE_NPU: if not _CAN_USE_NPU_ATTN: raise RuntimeError( f"NPU Attention backend '{backend.value}' is not usable because of missing package or the version is too old. Please install `torch_npu`." ) elif backend == AttentionBackendName._NATIVE_XLA: if not _CAN_USE_XLA_ATTN: raise RuntimeError( f"XLA Attention backend '{backend.value}' is not usable because of missing package or the version is too old. Please install `torch_xla>={_REQUIRED_XLA_VERSION}`." ) elif backend == AttentionBackendName.XFORMERS: if not _CAN_USE_XFORMERS_ATTN: raise RuntimeError( f"Xformers Attention backend '{backend.value}' is not usable because of missing package or the version is too old. Please install `xformers>={_REQUIRED_XFORMERS_VERSION}`." ) @functools.lru_cache(maxsize=128) def _prepare_for_flash_attn_or_sage_varlen_without_mask( batch_size: int, seq_len_q: int, seq_len_kv: int, device: Optional[torch.device] = None, ): seqlens_q = torch.full((batch_size,), seq_len_q, dtype=torch.int32, device=device) seqlens_k = torch.full((batch_size,), seq_len_kv, dtype=torch.int32, device=device) cu_seqlens_q = torch.zeros(batch_size + 1, dtype=torch.int32, device=device) cu_seqlens_k = torch.zeros(batch_size + 1, dtype=torch.int32, device=device) cu_seqlens_q[1:] = torch.cumsum(seqlens_q, dim=0) cu_seqlens_k[1:] = torch.cumsum(seqlens_k, dim=0) max_seqlen_q = seqlens_q.max().item() max_seqlen_k = seqlens_k.max().item() return (seqlens_q, seqlens_k), (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) def _prepare_for_flash_attn_or_sage_varlen_with_mask( batch_size: int, seq_len_q: int, attn_mask: torch.Tensor, device: Optional[torch.device] = None, ): seqlens_q = torch.full((batch_size,), seq_len_q, dtype=torch.int32, device=device) seqlens_k = attn_mask.sum(dim=1, dtype=torch.int32) cu_seqlens_q = torch.zeros(batch_size + 1, dtype=torch.int32, device=device) cu_seqlens_k = torch.zeros(batch_size + 1, dtype=torch.int32, device=device) cu_seqlens_q[1:] = torch.cumsum(seqlens_q, dim=0) cu_seqlens_k[1:] = torch.cumsum(seqlens_k, dim=0) max_seqlen_q = seqlens_q.max().item() max_seqlen_k = seqlens_k.max().item() return (seqlens_q, seqlens_k), (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) def _prepare_for_flash_attn_or_sage_varlen( batch_size: int, seq_len_q: int, seq_len_kv: int, attn_mask: Optional[torch.Tensor] = None, device: Optional[torch.device] = None, ) -> None: if attn_mask is None: return _prepare_for_flash_attn_or_sage_varlen_without_mask(batch_size, seq_len_q, seq_len_kv, device) return _prepare_for_flash_attn_or_sage_varlen_with_mask(batch_size, seq_len_q, attn_mask, device) def _normalize_attn_mask(attn_mask: torch.Tensor, batch_size: int, seq_len_k: int) -> torch.Tensor: """ Normalize an attention mask to shape [batch_size, seq_len_k] (bool) suitable for inferring seqlens_[q|k] in FlashAttention/Sage varlen. Supports 1D to 4D shapes and common broadcasting patterns. """ if attn_mask.dtype != torch.bool: raise ValueError(f"Attention mask must be of type bool, got {attn_mask.dtype}.") if attn_mask.ndim == 1: # [seq_len_k] -> broadcast across batch attn_mask = attn_mask.unsqueeze(0).expand(batch_size, seq_len_k) elif attn_mask.ndim == 2: # [batch_size, seq_len_k]. Maybe broadcast across batch if attn_mask.size(0) not in [1, batch_size]: raise ValueError( f"attn_mask.shape[0] ({attn_mask.shape[0]}) must be 1 or {batch_size} for 2D attention mask." ) attn_mask = attn_mask.expand(batch_size, seq_len_k) elif attn_mask.ndim == 3: # [batch_size, seq_len_q, seq_len_k] -> reduce over query dimension # We do this reduction because we know that arbitrary QK masks is not supported in Flash/Sage varlen. if attn_mask.size(0) not in [1, batch_size]: raise ValueError( f"attn_mask.shape[0] ({attn_mask.shape[0]}) must be 1 or {batch_size} for 3D attention mask." ) attn_mask = attn_mask.any(dim=1) attn_mask = attn_mask.expand(batch_size, seq_len_k) elif attn_mask.ndim == 4: # [batch_size, num_heads, seq_len_q, seq_len_k] or broadcastable versions if attn_mask.size(0) not in [1, batch_size]: raise ValueError( f"attn_mask.shape[0] ({attn_mask.shape[0]}) must be 1 or {batch_size} for 4D attention mask." ) attn_mask = attn_mask.expand(batch_size, -1, -1, seq_len_k) # [B, H, Q, K] attn_mask = attn_mask.any(dim=(1, 2)) # [B, K] else: raise ValueError(f"Unsupported attention mask shape: {attn_mask.shape}") if attn_mask.shape != (batch_size, seq_len_k): raise ValueError( f"Normalized attention mask shape mismatch: got {attn_mask.shape}, expected ({batch_size}, {seq_len_k})" ) return attn_mask def _flex_attention_causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx): return q_idx >= kv_idx # ===== torch op registrations ===== # Registrations are required for fullgraph tracing compatibility # TODO: library.custom_op and register_fake probably need version guards? # TODO: this is only required because the beta release FA3 does not have it. There is a PR adding # this but it was never merged: https://github.com/Dao-AILab/flash-attention/pull/1590 @torch.library.custom_op("flash_attn_3::_flash_attn_forward", mutates_args=(), device_types="cuda") def _wrapped_flash_attn_3_original( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: out, lse = flash_attn_3_func(query, key, value) lse = lse.permute(0, 2, 1) return out, lse @torch.library.register_fake("flash_attn_3::_flash_attn_forward") def _(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, seq_len, num_heads, head_dim = query.shape lse_shape = (batch_size, seq_len, num_heads) return torch.empty_like(query), query.new_empty(lse_shape) # ===== Attention backends ===== @_AttentionBackendRegistry.register( AttentionBackendName.FLASH, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _flash_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, dropout_p: float = 0.0, scale: Optional[float] = None, is_causal: bool = False, window_size: Tuple[int, int] = (-1, -1), softcap: float = 0.0, alibi_slopes: Optional[torch.Tensor] = None, deterministic: bool = False, return_attn_probs: bool = False, ) -> torch.Tensor: out = flash_attn_func( q=query, k=key, v=value, dropout_p=dropout_p, softmax_scale=scale, causal=is_causal, window_size=window_size, softcap=softcap, alibi_slopes=alibi_slopes, deterministic=deterministic, return_attn_probs=return_attn_probs, ) return out @_AttentionBackendRegistry.register( AttentionBackendName.FLASH_VARLEN, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _flash_varlen_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, cu_seqlens_q: Optional[torch.Tensor] = None, cu_seqlens_k: Optional[torch.Tensor] = None, max_seqlen_q: Optional[int] = None, max_seqlen_k: Optional[int] = None, dropout_p: float = 0.0, scale: Optional[float] = None, is_causal: bool = False, window_size: Tuple[int, int] = (-1, -1), softcap: float = 0.0, alibi_slopes: Optional[torch.Tensor] = None, deterministic: bool = False, return_attn_probs: bool = False, attn_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, seq_len_q, _, _ = query.shape _, seq_len_kv, _, _ = key.shape if attn_mask is not None: attn_mask = _normalize_attn_mask(attn_mask, batch_size, seq_len_kv) if any(x is None for x in (cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k)): (_, seqlens_k), (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = ( _prepare_for_flash_attn_or_sage_varlen( batch_size, seq_len_q, seq_len_kv, attn_mask=attn_mask, device=query.device ) ) else: seqlens_k = torch.full((batch_size,), max_seqlen_k, dtype=torch.int32, device=query.device) cu_seqlens_q = cu_seqlens_q.to(dtype=torch.int32, device=query.device) cu_seqlens_k = cu_seqlens_k.to(dtype=torch.int32, device=query.device) key_valid, value_valid = [], [] for b in range(batch_size): valid_len = seqlens_k[b] key_valid.append(key[b, :valid_len]) value_valid.append(value[b, :valid_len]) query_packed = query.flatten(0, 1) key_packed = torch.cat(key_valid, dim=0) value_packed = torch.cat(value_valid, dim=0) out = flash_attn_varlen_func( q=query_packed, k=key_packed, v=value_packed, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=dropout_p, softmax_scale=scale, causal=is_causal, window_size=window_size, softcap=softcap, alibi_slopes=alibi_slopes, deterministic=deterministic, return_attn_probs=return_attn_probs, ) out = out.unflatten(0, (batch_size, -1)) return out @_AttentionBackendRegistry.register( AttentionBackendName._FLASH_3, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _flash_attention_3( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, scale: Optional[float] = None, is_causal: bool = False, window_size: Tuple[int, int] = (-1, -1), softcap: float = 0.0, deterministic: bool = False, return_attn_probs: bool = False, ) -> torch.Tensor: out, lse, *_ = flash_attn_3_func( q=query, k=key, v=value, softmax_scale=scale, causal=is_causal, qv=None, q_descale=None, k_descale=None, v_descale=None, window_size=window_size, attention_chunk=0, softcap=softcap, num_splits=1, pack_gqa=None, deterministic=deterministic, sm_margin=0, ) return (out, lse) if return_attn_probs else out @_AttentionBackendRegistry.register( AttentionBackendName._FLASH_VARLEN_3, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _flash_varlen_attention_3( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, cu_seqlens_q: Optional[torch.Tensor] = None, cu_seqlens_k: Optional[torch.Tensor] = None, max_seqlen_q: Optional[int] = None, max_seqlen_k: Optional[int] = None, scale: Optional[float] = None, is_causal: bool = False, window_size: Tuple[int, int] = (-1, -1), softcap: float = 0.0, deterministic: bool = False, return_attn_probs: bool = False, attn_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, seq_len_q, _, _ = query.shape _, seq_len_kv, _, _ = key.shape if attn_mask is not None: attn_mask = _normalize_attn_mask(attn_mask, batch_size, seq_len_kv) if any(x is None for x in (cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k)): (_, seqlens_k), (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = ( _prepare_for_flash_attn_or_sage_varlen( batch_size, seq_len_q, seq_len_kv, attn_mask=attn_mask, device=query.device ) ) else: seqlens_k = torch.full((batch_size,), max_seqlen_k, dtype=torch.int32, device=query.device) cu_seqlens_q = cu_seqlens_q.to(dtype=torch.int32, device=query.device) cu_seqlens_k = cu_seqlens_k.to(dtype=torch.int32, device=query.device) key_valid, value_valid = [], [] for b in range(batch_size): valid_len = seqlens_k[b] key_valid.append(key[b, :valid_len]) value_valid.append(value[b, :valid_len]) query_packed = query.flatten(0, 1) key_packed = torch.cat(key_valid, dim=0) value_packed = torch.cat(value_valid, dim=0) out, lse, *_ = flash_attn_3_varlen_func( q=query_packed, k=key_packed, v=value_packed, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, seqused_q=None, seqused_k=None, softmax_scale=scale, causal=is_causal, qv=None, q_descale=None, k_descale=None, v_descale=None, window_size=window_size, softcap=softcap, num_splits=1, pack_gqa=None, deterministic=deterministic, sm_margin=0, ) out = out.unflatten(0, (batch_size, -1)) return (out, lse) if return_attn_probs else out @_AttentionBackendRegistry.register( AttentionBackendName.FLEX, constraints=[_check_attn_mask_or_causal, _check_device, _check_shape], ) def _native_flex_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[Union[torch.Tensor, "flex_attention.BlockMask"]] = None, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, return_lse: bool = False, kernel_options: Optional[Dict[str, Any]] = None, ) -> torch.Tensor: # TODO: should we LRU cache the block mask creation? score_mod = None block_mask = None batch_size, seq_len_q, num_heads, _ = query.shape _, seq_len_kv, _, _ = key.shape if attn_mask is None or isinstance(attn_mask, flex_attention.BlockMask): block_mask = attn_mask elif is_causal: block_mask = flex_attention.create_block_mask( _flex_attention_causal_mask_mod, batch_size, num_heads, seq_len_q, seq_len_kv, query.device ) elif torch.is_tensor(attn_mask): if attn_mask.ndim == 2: attn_mask = attn_mask.view(attn_mask.size(0), 1, attn_mask.size(1), 1) attn_mask = attn_mask.expand(batch_size, num_heads, seq_len_q, seq_len_kv) if attn_mask.dtype == torch.bool: # TODO: this probably does not work but verify! def mask_mod(batch_idx, head_idx, q_idx, kv_idx): return attn_mask[batch_idx, head_idx, q_idx, kv_idx] block_mask = flex_attention.create_block_mask( mask_mod, batch_size, None, seq_len_q, seq_len_kv, query.device ) else: def score_mod(score, batch_idx, head_idx, q_idx, kv_idx): return score + attn_mask[batch_idx, head_idx, q_idx, kv_idx] else: raise ValueError("Attention mask must be either None, a BlockMask, or a 2D/4D tensor.") query, key, value = (x.permute(0, 2, 1, 3) for x in (query, key, value)) out = flex_attention.flex_attention( query=query, key=key, value=value, score_mod=score_mod, block_mask=block_mask, scale=scale, enable_gqa=enable_gqa, return_lse=return_lse, kernel_options=kernel_options, ) out = out.permute(0, 2, 1, 3) return out @_AttentionBackendRegistry.register( AttentionBackendName.NATIVE, constraints=[_check_device, _check_shape], ) def _native_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, ) -> torch.Tensor: query, key, value = (x.permute(0, 2, 1, 3) for x in (query, key, value)) out = torch.nn.functional.scaled_dot_product_attention( query=query, key=key, value=value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa, ) out = out.permute(0, 2, 1, 3) return out @_AttentionBackendRegistry.register( AttentionBackendName._NATIVE_CUDNN, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _native_cudnn_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, ) -> torch.Tensor: query, key, value = (x.permute(0, 2, 1, 3) for x in (query, key, value)) with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.CUDNN_ATTENTION): out = torch.nn.functional.scaled_dot_product_attention( query=query, key=key, value=value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa, ) out = out.permute(0, 2, 1, 3) return out @_AttentionBackendRegistry.register( AttentionBackendName._NATIVE_EFFICIENT, constraints=[_check_device, _check_shape], ) def _native_efficient_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, ) -> torch.Tensor: query, key, value = (x.permute(0, 2, 1, 3) for x in (query, key, value)) with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION): out = torch.nn.functional.scaled_dot_product_attention( query=query, key=key, value=value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa, ) out = out.permute(0, 2, 1, 3) return out @_AttentionBackendRegistry.register( AttentionBackendName._NATIVE_FLASH, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _native_flash_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, ) -> torch.Tensor: query, key, value = (x.permute(0, 2, 1, 3) for x in (query, key, value)) with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.FLASH_ATTENTION): out = torch.nn.functional.scaled_dot_product_attention( query=query, key=key, value=value, attn_mask=None, # not supported dropout_p=dropout_p, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa, ) out = out.permute(0, 2, 1, 3) return out @_AttentionBackendRegistry.register( AttentionBackendName._NATIVE_MATH, constraints=[_check_device, _check_shape], ) def _native_math_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, ) -> torch.Tensor: query, key, value = (x.permute(0, 2, 1, 3) for x in (query, key, value)) with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): out = torch.nn.functional.scaled_dot_product_attention( query=query, key=key, value=value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa, ) out = out.permute(0, 2, 1, 3) return out @_AttentionBackendRegistry.register( AttentionBackendName._NATIVE_NPU, constraints=[_check_device, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _native_npu_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, dropout_p: float = 0.0, scale: Optional[float] = None, ) -> torch.Tensor: return npu_fusion_attention( query, key, value, query.size(2), # num_heads input_layout="BSND", pse=None, scale=1.0 / math.sqrt(query.shape[-1]) if scale is None else scale, pre_tockens=65536, next_tockens=65536, keep_prob=1.0 - dropout_p, sync=False, inner_precise=0, )[0] # Reference: https://github.com/pytorch/xla/blob/06c5533de6588f6b90aa1655d9850bcf733b90b4/torch_xla/experimental/custom_kernel.py#L853 @_AttentionBackendRegistry.register( AttentionBackendName._NATIVE_XLA, constraints=[_check_device, _check_shape], ) def _native_xla_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, is_causal: bool = False, ) -> torch.Tensor: query, key, value = (x.permute(0, 2, 1, 3) for x in (query, key, value)) query = query / math.sqrt(query.shape[-1]) out = xla_flash_attention( q=query, k=key, v=value, causal=is_causal, ) out = out.permute(0, 2, 1, 3) return out @_AttentionBackendRegistry.register( AttentionBackendName.SAGE, constraints=[_check_device_cuda, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _sage_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, is_causal: bool = False, scale: Optional[float] = None, return_lse: bool = False, ) -> torch.Tensor: return sageattn( q=query, k=key, v=value, tensor_layout="NHD", is_causal=is_causal, sm_scale=scale, return_lse=return_lse, ) @_AttentionBackendRegistry.register( AttentionBackendName.SAGE_VARLEN, constraints=[_check_device_cuda, _check_qkv_dtype_bf16_or_fp16, _check_shape], ) def _sage_varlen_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, cu_seqlens_q: Optional[torch.Tensor] = None, cu_seqlens_k: Optional[torch.Tensor] = None, max_seqlen_q: Optional[int] = None, max_seqlen_k: Optional[int] = None, is_causal: bool = False, scale: Optional[float] = None, smooth_k: bool = True, attn_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, seq_len_q, _, _ = query.shape _, seq_len_kv, _, _ = key.shape if attn_mask is not None: attn_mask = _normalize_attn_mask(attn_mask, batch_size, seq_len_kv) if any(x is None for x in (cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k)): (_, seqlens_k), (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = ( _prepare_for_flash_attn_or_sage_varlen( batch_size, seq_len_q, seq_len_kv, attn_mask=attn_mask, device=query.device ) ) else: seqlens_k = torch.full((batch_size,), max_seqlen_k, dtype=torch.int32, device=query.device) cu_seqlens_q = cu_seqlens_q.to(dtype=torch.int32, device=query.device) cu_seqlens_k = cu_seqlens_k.to(dtype=torch.int32, device=query.device) key_valid, value_valid = [], [] for b in range(batch_size): valid_len = seqlens_k[b] key_valid.append(key[b, :valid_len]) value_valid.append(value[b, :valid_len]) query_packed = query.flatten(0, 1) key_packed = torch.cat(key_valid, dim=0) value_packed = torch.cat(value_valid, dim=0) out = sageattn_varlen( q=query_packed, k=key_packed, v=value_packed, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, is_causal=is_causal, sm_scale=scale, smooth_k=smooth_k, ) out = out.unflatten(0, (batch_size, -1)) return out @_AttentionBackendRegistry.register( AttentionBackendName._SAGE_QK_INT8_PV_FP8_CUDA, constraints=[_check_device_cuda_atleast_smXY(9, 0), _check_shape], ) def _sage_qk_int8_pv_fp8_cuda_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, is_causal: bool = False, scale: Optional[float] = None, qk_quant_gran: _SAGE_ATTENTION_QK_QUANT_GRAN = "per_thread", pv_accum_dtype: _SAGE_ATTENTION_PV_ACCUM_DTYPE = "fp32+fp32", smooth_k: bool = True, smooth_v: bool = False, return_lse: bool = False, ) -> torch.Tensor: return sageattn_qk_int8_pv_fp8_cuda( q=query, k=key, v=value, tensor_layout="NHD", is_causal=is_causal, qk_quant_gran=qk_quant_gran, sm_scale=scale, pv_accum_dtype=pv_accum_dtype, smooth_k=smooth_k, smooth_v=smooth_v, return_lse=return_lse, ) @_AttentionBackendRegistry.register( AttentionBackendName._SAGE_QK_INT8_PV_FP8_CUDA_SM90, constraints=[_check_device_cuda_atleast_smXY(9, 0), _check_shape], ) def _sage_qk_int8_pv_fp8_cuda_sm90_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, is_causal: bool = False, scale: Optional[float] = None, qk_quant_gran: _SAGE_ATTENTION_QK_QUANT_GRAN = "per_thread", pv_accum_dtype: _SAGE_ATTENTION_PV_ACCUM_DTYPE = "fp32+fp32", smooth_k: bool = True, return_lse: bool = False, ) -> torch.Tensor: return sageattn_qk_int8_pv_fp8_cuda_sm90( q=query, k=key, v=value, tensor_layout="NHD", is_causal=is_causal, qk_quant_gran=qk_quant_gran, sm_scale=scale, pv_accum_dtype=pv_accum_dtype, smooth_k=smooth_k, return_lse=return_lse, ) @_AttentionBackendRegistry.register( AttentionBackendName._SAGE_QK_INT8_PV_FP16_CUDA, constraints=[_check_device_cuda_atleast_smXY(8, 0), _check_shape], ) def _sage_qk_int8_pv_fp16_cuda_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, is_causal: bool = False, scale: Optional[float] = None, qk_quant_gran: _SAGE_ATTENTION_QK_QUANT_GRAN = "per_thread", pv_accum_dtype: _SAGE_ATTENTION_PV_ACCUM_DTYPE = "fp32", smooth_k: bool = True, smooth_v: bool = False, return_lse: bool = False, ) -> torch.Tensor: return sageattn_qk_int8_pv_fp16_cuda( q=query, k=key, v=value, tensor_layout="NHD", is_causal=is_causal, qk_quant_gran=qk_quant_gran, sm_scale=scale, pv_accum_dtype=pv_accum_dtype, smooth_k=smooth_k, smooth_v=smooth_v, return_lse=return_lse, ) @_AttentionBackendRegistry.register( AttentionBackendName._SAGE_QK_INT8_PV_FP16_TRITON, constraints=[_check_device_cuda_atleast_smXY(8, 0), _check_shape], ) def _sage_qk_int8_pv_fp16_triton_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, is_causal: bool = False, scale: Optional[float] = None, quantization_backend: _SAGE_ATTENTION_QUANTIZATION_BACKEND = "triton", smooth_k: bool = True, return_lse: bool = False, ) -> torch.Tensor: return sageattn_qk_int8_pv_fp16_triton( q=query, k=key, v=value, tensor_layout="NHD", quantization_backend=quantization_backend, is_causal=is_causal, sm_scale=scale, smooth_k=smooth_k, return_lse=return_lse, ) @_AttentionBackendRegistry.register( AttentionBackendName.XFORMERS, constraints=[_check_attn_mask_or_causal, _check_device, _check_shape], ) def _xformers_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False, ) -> torch.Tensor: batch_size, seq_len_q, num_heads_q, _ = query.shape _, seq_len_kv, num_heads_kv, _ = key.shape if is_causal: attn_mask = xops.LowerTriangularMask() elif attn_mask is not None: if attn_mask.ndim == 2: attn_mask = attn_mask.view(attn_mask.size(0), 1, attn_mask.size(1), 1) elif attn_mask.ndim != 4: raise ValueError("Only 2D and 4D attention masks are supported for xformers attention.") attn_mask = attn_mask.expand(batch_size, num_heads_q, seq_len_q, seq_len_kv).type_as(query) if enable_gqa: if num_heads_q % num_heads_kv != 0: raise ValueError("Number of heads in query must be divisible by number of heads in key/value.") num_heads_per_group = num_heads_q // num_heads_kv query = query.unflatten(2, (num_heads_kv, -1)) key = key.unflatten(2, (num_heads_kv, -1)).expand(-1, -1, -1, num_heads_per_group, -1) value = value.unflatten(2, (num_heads_kv, -1)).expand(-1, -1, -1, num_heads_per_group, -1) out = xops.memory_efficient_attention(query, key, value, attn_mask, dropout_p, scale) if enable_gqa: out = out.flatten(2, 3) return out
diffusers/src/diffusers/models/attention_dispatch.py/0
{ "file_path": "diffusers/src/diffusers/models/attention_dispatch.py", "repo_id": "diffusers", "token_count": 19449 }
160
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder class TemporalDecoder(nn.Module): def __init__( self, in_channels: int = 4, out_channels: int = 3, block_out_channels: Tuple[int] = (128, 256, 512, 512), layers_per_block: int = 2, ): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) self.mid_block = MidBlockTemporalDecoder( num_layers=self.layers_per_block, in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], attention_head_dim=block_out_channels[-1], ) # up self.up_blocks = nn.ModuleList([]) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for i in range(len(block_out_channels)): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = UpBlockTemporalDecoder( num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, add_upsample=not is_final_block, ) self.up_blocks.append(up_block) prev_output_channel = output_channel self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6) self.conv_act = nn.SiLU() self.conv_out = torch.nn.Conv2d( in_channels=block_out_channels[0], out_channels=out_channels, kernel_size=3, padding=1, ) conv_out_kernel_size = (3, 1, 1) padding = [int(k // 2) for k in conv_out_kernel_size] self.time_conv_out = torch.nn.Conv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=conv_out_kernel_size, padding=padding, ) self.gradient_checkpointing = False def forward( self, sample: torch.Tensor, image_only_indicator: torch.Tensor, num_frames: int = 1, ) -> torch.Tensor: r"""The forward method of the `Decoder` class.""" sample = self.conv_in(sample) upscale_dtype = next(itertools.chain(self.up_blocks.parameters(), self.up_blocks.buffers())).dtype if torch.is_grad_enabled() and self.gradient_checkpointing: # middle sample = self._gradient_checkpointing_func( self.mid_block, sample, image_only_indicator, ) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = self._gradient_checkpointing_func( up_block, sample, image_only_indicator, ) else: # middle sample = self.mid_block(sample, image_only_indicator=image_only_indicator) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = up_block(sample, image_only_indicator=image_only_indicator) # post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) batch_frames, channels, height, width = sample.shape batch_size = batch_frames // num_frames sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) sample = self.time_conv_out(sample) sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return sample class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without losing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: Tuple[str] = ("DownEncoderBlock2D",), block_out_channels: Tuple[int] = (64,), layers_per_block: int = 1, latent_channels: int = 4, sample_size: int = 32, scaling_factor: float = 0.18215, force_upcast: float = True, ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, double_z=True, ) # pass init params to Decoder self.decoder = TemporalDecoder( in_channels=latent_channels, out_channels=out_channels, block_out_channels=block_out_channels, layers_per_block=layers_per_block, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoders.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) @apply_forward_hook def decode( self, z: torch.Tensor, num_frames: int, return_dict: bool = True, ) -> Union[DecoderOutput, torch.Tensor]: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ batch_size = z.shape[0] // num_frames image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device) decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator) if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, num_frames: int = 1, ) -> Union[DecoderOutput, torch.Tensor]: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, num_frames=num_frames).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py", "repo_id": "diffusers", "token_count": 6362 }
161
# Copyright 2025 HunyuanDiT Authors, Qixun Wang and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput, logging from ..attention_processor import AttentionProcessor from ..embeddings import ( HunyuanCombinedTimestepTextSizeStyleEmbedding, PatchEmbed, PixArtAlphaTextProjection, ) from ..modeling_utils import ModelMixin from ..transformers.hunyuan_transformer_2d import HunyuanDiTBlock from .controlnet import Tuple, zero_module logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class HunyuanControlNetOutput(BaseOutput): controlnet_block_samples: Tuple[torch.Tensor] class HunyuanDiT2DControlNetModel(ModelMixin, ConfigMixin): @register_to_config def __init__( self, conditioning_channels: int = 3, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "gelu-approximate", sample_size=32, hidden_size=1152, transformer_num_layers: int = 40, mlp_ratio: float = 4.0, cross_attention_dim: int = 1024, cross_attention_dim_t5: int = 2048, pooled_projection_dim: int = 1024, text_len: int = 77, text_len_t5: int = 256, use_style_cond_and_image_meta_size: bool = True, ): super().__init__() self.num_heads = num_attention_heads self.inner_dim = num_attention_heads * attention_head_dim self.text_embedder = PixArtAlphaTextProjection( in_features=cross_attention_dim_t5, hidden_size=cross_attention_dim_t5 * 4, out_features=cross_attention_dim, act_fn="silu_fp32", ) self.text_embedding_padding = nn.Parameter( torch.randn(text_len + text_len_t5, cross_attention_dim, dtype=torch.float32) ) self.pos_embed = PatchEmbed( height=sample_size, width=sample_size, in_channels=in_channels, embed_dim=hidden_size, patch_size=patch_size, pos_embed_type=None, ) self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding( hidden_size, pooled_projection_dim=pooled_projection_dim, seq_len=text_len_t5, cross_attention_dim=cross_attention_dim_t5, use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size, ) # controlnet_blocks self.controlnet_blocks = nn.ModuleList([]) # HunyuanDiT Blocks self.blocks = nn.ModuleList( [ HunyuanDiTBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, activation_fn=activation_fn, ff_inner_dim=int(self.inner_dim * mlp_ratio), cross_attention_dim=cross_attention_dim, qk_norm=True, # See https://huggingface.co/papers/2302.05442 for details. skip=False, # always False as it is the first half of the model ) for layer in range(transformer_num_layers // 2 - 1) ] ) self.input_block = zero_module(nn.Linear(hidden_size, hidden_size)) for _ in range(len(self.blocks)): controlnet_block = nn.Linear(hidden_size, hidden_size) controlnet_block = zero_module(controlnet_block) self.controlnet_blocks.append(controlnet_block) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) @classmethod def from_transformer( cls, transformer, conditioning_channels=3, transformer_num_layers=None, load_weights_from_transformer=True ): config = transformer.config activation_fn = config.activation_fn attention_head_dim = config.attention_head_dim cross_attention_dim = config.cross_attention_dim cross_attention_dim_t5 = config.cross_attention_dim_t5 hidden_size = config.hidden_size in_channels = config.in_channels mlp_ratio = config.mlp_ratio num_attention_heads = config.num_attention_heads patch_size = config.patch_size sample_size = config.sample_size text_len = config.text_len text_len_t5 = config.text_len_t5 conditioning_channels = conditioning_channels transformer_num_layers = transformer_num_layers or config.transformer_num_layers controlnet = cls( conditioning_channels=conditioning_channels, transformer_num_layers=transformer_num_layers, activation_fn=activation_fn, attention_head_dim=attention_head_dim, cross_attention_dim=cross_attention_dim, cross_attention_dim_t5=cross_attention_dim_t5, hidden_size=hidden_size, in_channels=in_channels, mlp_ratio=mlp_ratio, num_attention_heads=num_attention_heads, patch_size=patch_size, sample_size=sample_size, text_len=text_len, text_len_t5=text_len_t5, ) if load_weights_from_transformer: key = controlnet.load_state_dict(transformer.state_dict(), strict=False) logger.warning(f"controlnet load from Hunyuan-DiT. missing_keys: {key[0]}") return controlnet def forward( self, hidden_states, timestep, controlnet_cond: torch.Tensor, conditioning_scale: float = 1.0, encoder_hidden_states=None, text_embedding_mask=None, encoder_hidden_states_t5=None, text_embedding_mask_t5=None, image_meta_size=None, style=None, image_rotary_emb=None, return_dict=True, ): """ The [`HunyuanDiT2DControlNetModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`): The input tensor. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. controlnet_cond ( `torch.Tensor` ): The conditioning input to ControlNet. conditioning_scale ( `float` ): Indicate the conditioning scale. encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of `BertModel`. text_embedding_mask: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output of `BertModel`. encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder. text_embedding_mask_t5: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output of T5 Text Encoder. image_meta_size (torch.Tensor): Conditional embedding indicate the image sizes style: torch.Tensor: Conditional embedding indicate the style image_rotary_emb (`torch.Tensor`): The image rotary embeddings to apply on query and key tensors during attention calculation. return_dict: bool Whether to return a dictionary. """ height, width = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) # b,c,H,W -> b, N, C # 2. pre-process hidden_states = hidden_states + self.input_block(self.pos_embed(controlnet_cond)) temb = self.time_extra_emb( timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype ) # [B, D] # text projection batch_size, sequence_length, _ = encoder_hidden_states_t5.shape encoder_hidden_states_t5 = self.text_embedder( encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1]) ) encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1) encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1) text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1) text_embedding_mask = text_embedding_mask.unsqueeze(2).bool() encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding) block_res_samples = () for layer, block in enumerate(self.blocks): hidden_states = block( hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) # (N, L, D) block_res_samples = block_res_samples + (hidden_states,) controlnet_block_res_samples = () for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks): block_res_sample = controlnet_block(block_res_sample) controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,) # 6. scaling controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples] if not return_dict: return (controlnet_block_res_samples,) return HunyuanControlNetOutput(controlnet_block_samples=controlnet_block_res_samples) class HunyuanDiT2DMultiControlNetModel(ModelMixin): r""" `HunyuanDiT2DMultiControlNetModel` wrapper class for Multi-HunyuanDiT2DControlNetModel This module is a wrapper for multiple instances of the `HunyuanDiT2DControlNetModel`. The `forward()` API is designed to be compatible with `HunyuanDiT2DControlNetModel`. Args: controlnets (`List[HunyuanDiT2DControlNetModel]`): Provides additional conditioning to the unet during the denoising process. You must set multiple `HunyuanDiT2DControlNetModel` as a list. """ def __init__(self, controlnets): super().__init__() self.nets = nn.ModuleList(controlnets) def forward( self, hidden_states, timestep, controlnet_cond: torch.Tensor, conditioning_scale: float = 1.0, encoder_hidden_states=None, text_embedding_mask=None, encoder_hidden_states_t5=None, text_embedding_mask_t5=None, image_meta_size=None, style=None, image_rotary_emb=None, return_dict=True, ): """ The [`HunyuanDiT2DControlNetModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`): The input tensor. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. controlnet_cond ( `torch.Tensor` ): The conditioning input to ControlNet. conditioning_scale ( `float` ): Indicate the conditioning scale. encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of `BertModel`. text_embedding_mask: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output of `BertModel`. encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder. text_embedding_mask_t5: torch.Tensor An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output of T5 Text Encoder. image_meta_size (torch.Tensor): Conditional embedding indicate the image sizes style: torch.Tensor: Conditional embedding indicate the style image_rotary_emb (`torch.Tensor`): The image rotary embeddings to apply on query and key tensors during attention calculation. return_dict: bool Whether to return a dictionary. """ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): block_samples = controlnet( hidden_states=hidden_states, timestep=timestep, controlnet_cond=image, conditioning_scale=scale, encoder_hidden_states=encoder_hidden_states, text_embedding_mask=text_embedding_mask, encoder_hidden_states_t5=encoder_hidden_states_t5, text_embedding_mask_t5=text_embedding_mask_t5, image_meta_size=image_meta_size, style=style, image_rotary_emb=image_rotary_emb, return_dict=return_dict, ) # merge samples if i == 0: control_block_samples = block_samples else: control_block_samples = [ control_block_sample + block_sample for control_block_sample, block_sample in zip(control_block_samples[0], block_samples[0]) ] control_block_samples = (control_block_samples,) return control_block_samples
diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py", "repo_id": "diffusers", "token_count": 7436 }
162
from dataclasses import dataclass from ..utils import BaseOutput @dataclass class AutoencoderKLOutput(BaseOutput): """ Output of AutoencoderKL encoding method. Args: latent_dist (`DiagonalGaussianDistribution`): Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`. `DiagonalGaussianDistribution` allows for sampling latents from the distribution. """ latent_dist: "DiagonalGaussianDistribution" # noqa: F821 @dataclass class Transformer2DModelOutput(BaseOutput): """ The output of [`Transformer2DModel`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability distributions for the unnoised latent pixels. """ sample: "torch.Tensor" # noqa: F821
diffusers/src/diffusers/models/modeling_outputs.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_outputs.py", "repo_id": "diffusers", "token_count": 377 }
163
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin from ...utils import BaseOutput from ..attention import BasicTransformerBlock from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin @dataclass class PriorTransformerOutput(BaseOutput): """ The output of [`PriorTransformer`]. Args: predicted_image_embedding (`torch.Tensor` of shape `(batch_size, embedding_dim)`): The predicted CLIP image embedding conditioned on the CLIP text embedding input. """ predicted_image_embedding: torch.Tensor class PriorTransformer(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): """ A Prior Transformer model. Parameters: num_attention_heads (`int`, *optional*, defaults to 32): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head. num_layers (`int`, *optional*, defaults to 20): The number of layers of Transformer blocks to use. embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `hidden_states` num_embeddings (`int`, *optional*, defaults to 77): The number of embeddings of the model input `hidden_states` additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings + additional_embeddings`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. time_embed_act_fn (`str`, *optional*, defaults to 'silu'): The activation function to use to create timestep embeddings. norm_in_type (`str`, *optional*, defaults to None): The normalization layer to apply on hidden states before passing to Transformer blocks. Set it to `None` if normalization is not needed. embedding_proj_norm_type (`str`, *optional*, defaults to None): The normalization layer to apply on the input `proj_embedding`. Set it to `None` if normalization is not needed. encoder_hid_proj_type (`str`, *optional*, defaults to `linear`): The projection layer to apply on the input `encoder_hidden_states`. Set it to `None` if `encoder_hidden_states` is `None`. added_emb_type (`str`, *optional*, defaults to `prd`): Additional embeddings to condition the model. Choose from `prd` or `None`. if choose `prd`, it will prepend a token indicating the (quantized) dot product between the text embedding and image embedding as proposed in the unclip paper https://huggingface.co/papers/2204.06125 If it is `None`, no additional embeddings will be prepended. time_embed_dim (`int, *optional*, defaults to None): The dimension of timestep embeddings. If None, will be set to `num_attention_heads * attention_head_dim` embedding_proj_dim (`int`, *optional*, default to None): The dimension of `proj_embedding`. If None, will be set to `embedding_dim`. clip_embed_dim (`int`, *optional*, default to None): The dimension of the output. If None, will be set to `embedding_dim`. """ @register_to_config def __init__( self, num_attention_heads: int = 32, attention_head_dim: int = 64, num_layers: int = 20, embedding_dim: int = 768, num_embeddings=77, additional_embeddings=4, dropout: float = 0.0, time_embed_act_fn: str = "silu", norm_in_type: Optional[str] = None, # layer embedding_proj_norm_type: Optional[str] = None, # layer encoder_hid_proj_type: Optional[str] = "linear", # linear added_emb_type: Optional[str] = "prd", # prd time_embed_dim: Optional[int] = None, embedding_proj_dim: Optional[int] = None, clip_embed_dim: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.additional_embeddings = additional_embeddings time_embed_dim = time_embed_dim or inner_dim embedding_proj_dim = embedding_proj_dim or embedding_dim clip_embed_dim = clip_embed_dim or embedding_dim self.time_proj = Timesteps(inner_dim, True, 0) self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn) self.proj_in = nn.Linear(embedding_dim, inner_dim) if embedding_proj_norm_type is None: self.embedding_proj_norm = None elif embedding_proj_norm_type == "layer": self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim) else: raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}") self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim) if encoder_hid_proj_type is None: self.encoder_hidden_states_proj = None elif encoder_hid_proj_type == "linear": self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim) else: raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}") self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim)) if added_emb_type == "prd": self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim)) elif added_emb_type is None: self.prd_embedding = None else: raise ValueError( f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, activation_fn="gelu", attention_bias=True, ) for d in range(num_layers) ] ) if norm_in_type == "layer": self.norm_in = nn.LayerNorm(inner_dim) elif norm_in_type is None: self.norm_in = None else: raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.") self.norm_out = nn.LayerNorm(inner_dim) self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim) causal_attention_mask = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0 ) causal_attention_mask.triu_(1) causal_attention_mask = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask", causal_attention_mask, persistent=False) self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim)) self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim)) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def forward( self, hidden_states, timestep: Union[torch.Tensor, float, int], proj_embedding: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.BoolTensor] = None, return_dict: bool = True, ): """ The [`PriorTransformer`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch_size, embedding_dim)`): The currently predicted image embeddings. timestep (`torch.LongTensor`): Current denoising step. proj_embedding (`torch.Tensor` of shape `(batch_size, embedding_dim)`): Projected embedding vector the denoising process is conditioned on. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, num_embeddings, embedding_dim)`): Hidden states of the text embeddings the denoising process is conditioned on. attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`): Text mask for the text embeddings. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformers.prior_transformer.PriorTransformerOutput`] instead of a plain tuple. Returns: [`~models.transformers.prior_transformer.PriorTransformerOutput`] or `tuple`: If return_dict is True, a [`~models.transformers.prior_transformer.PriorTransformerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ batch_size = hidden_states.shape[0] timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device) timesteps_projected = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. timesteps_projected = timesteps_projected.to(dtype=self.dtype) time_embeddings = self.time_embedding(timesteps_projected) if self.embedding_proj_norm is not None: proj_embedding = self.embedding_proj_norm(proj_embedding) proj_embeddings = self.embedding_proj(proj_embedding) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set") hidden_states = self.proj_in(hidden_states) positional_embeddings = self.positional_embedding.to(hidden_states.dtype) additional_embeds = [] additional_embeddings_len = 0 if encoder_hidden_states is not None: additional_embeds.append(encoder_hidden_states) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: proj_embeddings = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: hidden_states = hidden_states[:, None, :] additional_embeds = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1) additional_embeds.append(prd_embedding) hidden_states = torch.cat( additional_embeds, dim=1, ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: positional_embeddings = F.pad( positional_embeddings, ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ), value=0.0, ) hidden_states = hidden_states + positional_embeddings if attention_mask is not None: attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0) attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) attention_mask = attention_mask.repeat_interleave( self.config.num_attention_heads, dim=0, output_size=attention_mask.shape[0] * self.config.num_attention_heads, ) if self.norm_in is not None: hidden_states = self.norm_in(hidden_states) for block in self.transformer_blocks: hidden_states = block(hidden_states, attention_mask=attention_mask) hidden_states = self.norm_out(hidden_states) if self.prd_embedding is not None: hidden_states = hidden_states[:, -1] else: hidden_states = hidden_states[:, additional_embeddings_len:] predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding) def post_process_latents(self, prior_latents): prior_latents = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
diffusers/src/diffusers/models/transformers/prior_transformer.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/prior_transformer.py", "repo_id": "diffusers", "token_count": 7467 }
164
# Copyright 2025 The Lightricks team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, deprecate, is_torch_version, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import PixArtAlphaTextProjection from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormSingle, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class LTXVideoAttentionProcessor2_0: def __new__(cls, *args, **kwargs): deprecation_message = "`LTXVideoAttentionProcessor2_0` is deprecated and this will be removed in a future version. Please use `LTXVideoAttnProcessor`" deprecate("LTXVideoAttentionProcessor2_0", "1.0.0", deprecation_message) return LTXVideoAttnProcessor(*args, **kwargs) class LTXVideoAttnProcessor: r""" Processor for implementing attention (SDPA is used by default if you're using PyTorch 2.0). This is used in the LTX model. It applies a normalization layer and rotary embedding on the query and key vector. """ _attention_backend = None def __init__(self): if is_torch_version("<", "2.0"): raise ValueError( "LTX attention processors require a minimum PyTorch version of 2.0. Please upgrade your PyTorch installation." ) def __call__( self, attn: "LTXAttention", hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if encoder_hidden_states is None: encoder_hidden_states = hidden_states query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.norm_q(query) key = attn.norm_k(key) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) key = apply_rotary_emb(key, image_rotary_emb) query = query.unflatten(2, (attn.heads, -1)) key = key.unflatten(2, (attn.heads, -1)) value = value.unflatten(2, (attn.heads, -1)) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class LTXAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = LTXVideoAttnProcessor _available_processors = [LTXVideoAttnProcessor] def __init__( self, query_dim: int, heads: int = 8, kv_heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = True, cross_attention_dim: Optional[int] = None, out_bias: bool = True, qk_norm: str = "rms_norm_across_heads", processor=None, ): super().__init__() if qk_norm != "rms_norm_across_heads": raise NotImplementedError("Only 'rms_norm_across_heads' is supported as a valid value for `qk_norm`.") self.head_dim = dim_head self.inner_dim = dim_head * heads self.inner_kv_dim = self.inner_dim if kv_heads is None else dim_head * kv_heads self.query_dim = query_dim self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim self.use_bias = bias self.dropout = dropout self.out_dim = query_dim self.heads = heads norm_eps = 1e-5 norm_elementwise_affine = True self.norm_q = torch.nn.RMSNorm(dim_head * heads, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm_k = torch.nn.RMSNorm(dim_head * kv_heads, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_k = torch.nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) self.to_v = torch.nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) self.to_out = torch.nn.ModuleList([]) self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(torch.nn.Dropout(dropout)) if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) class LTXVideoRotaryPosEmbed(nn.Module): def __init__( self, dim: int, base_num_frames: int = 20, base_height: int = 2048, base_width: int = 2048, patch_size: int = 1, patch_size_t: int = 1, theta: float = 10000.0, ) -> None: super().__init__() self.dim = dim self.base_num_frames = base_num_frames self.base_height = base_height self.base_width = base_width self.patch_size = patch_size self.patch_size_t = patch_size_t self.theta = theta def _prepare_video_coords( self, batch_size: int, num_frames: int, height: int, width: int, rope_interpolation_scale: Tuple[torch.Tensor, float, float], device: torch.device, ) -> torch.Tensor: # Always compute rope in fp32 grid_h = torch.arange(height, dtype=torch.float32, device=device) grid_w = torch.arange(width, dtype=torch.float32, device=device) grid_f = torch.arange(num_frames, dtype=torch.float32, device=device) grid = torch.meshgrid(grid_f, grid_h, grid_w, indexing="ij") grid = torch.stack(grid, dim=0) grid = grid.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1) if rope_interpolation_scale is not None: grid[:, 0:1] = grid[:, 0:1] * rope_interpolation_scale[0] * self.patch_size_t / self.base_num_frames grid[:, 1:2] = grid[:, 1:2] * rope_interpolation_scale[1] * self.patch_size / self.base_height grid[:, 2:3] = grid[:, 2:3] * rope_interpolation_scale[2] * self.patch_size / self.base_width grid = grid.flatten(2, 4).transpose(1, 2) return grid def forward( self, hidden_states: torch.Tensor, num_frames: Optional[int] = None, height: Optional[int] = None, width: Optional[int] = None, rope_interpolation_scale: Optional[Tuple[torch.Tensor, float, float]] = None, video_coords: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: batch_size = hidden_states.size(0) if video_coords is None: grid = self._prepare_video_coords( batch_size, num_frames, height, width, rope_interpolation_scale=rope_interpolation_scale, device=hidden_states.device, ) else: grid = torch.stack( [ video_coords[:, 0] / self.base_num_frames, video_coords[:, 1] / self.base_height, video_coords[:, 2] / self.base_width, ], dim=-1, ) start = 1.0 end = self.theta freqs = self.theta ** torch.linspace( math.log(start, self.theta), math.log(end, self.theta), self.dim // 6, device=hidden_states.device, dtype=torch.float32, ) freqs = freqs * math.pi / 2.0 freqs = freqs * (grid.unsqueeze(-1) * 2 - 1) freqs = freqs.transpose(-1, -2).flatten(2) cos_freqs = freqs.cos().repeat_interleave(2, dim=-1) sin_freqs = freqs.sin().repeat_interleave(2, dim=-1) if self.dim % 6 != 0: cos_padding = torch.ones_like(cos_freqs[:, :, : self.dim % 6]) sin_padding = torch.zeros_like(cos_freqs[:, :, : self.dim % 6]) cos_freqs = torch.cat([cos_padding, cos_freqs], dim=-1) sin_freqs = torch.cat([sin_padding, sin_freqs], dim=-1) return cos_freqs, sin_freqs @maybe_allow_in_graph class LTXVideoTransformerBlock(nn.Module): r""" Transformer block used in [LTX](https://huggingface.co/Lightricks/LTX-Video). Args: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. qk_norm (`str`, defaults to `"rms_norm"`): The normalization layer to use. activation_fn (`str`, defaults to `"gelu-approximate"`): Activation function to use in feed-forward. eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, cross_attention_dim: int, qk_norm: str = "rms_norm_across_heads", activation_fn: str = "gelu-approximate", attention_bias: bool = True, attention_out_bias: bool = True, eps: float = 1e-6, elementwise_affine: bool = False, ): super().__init__() self.norm1 = RMSNorm(dim, eps=eps, elementwise_affine=elementwise_affine) self.attn1 = LTXAttention( query_dim=dim, heads=num_attention_heads, kv_heads=num_attention_heads, dim_head=attention_head_dim, bias=attention_bias, cross_attention_dim=None, out_bias=attention_out_bias, qk_norm=qk_norm, ) self.norm2 = RMSNorm(dim, eps=eps, elementwise_affine=elementwise_affine) self.attn2 = LTXAttention( query_dim=dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, kv_heads=num_attention_heads, dim_head=attention_head_dim, bias=attention_bias, out_bias=attention_out_bias, qk_norm=qk_norm, ) self.ff = FeedForward(dim, activation_fn=activation_fn) self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: batch_size = hidden_states.size(0) norm_hidden_states = self.norm1(hidden_states) num_ada_params = self.scale_shift_table.shape[0] ada_values = self.scale_shift_table[None, None] + temb.reshape(batch_size, temb.size(1), num_ada_params, -1) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ada_values.unbind(dim=2) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa attn_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=None, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + attn_hidden_states * gate_msa attn_hidden_states = self.attn2( hidden_states, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=None, attention_mask=encoder_attention_mask, ) hidden_states = hidden_states + attn_hidden_states norm_hidden_states = self.norm2(hidden_states) * (1 + scale_mlp) + shift_mlp ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + ff_output * gate_mlp return hidden_states @maybe_allow_in_graph class LTXVideoTransformer3DModel( ModelMixin, ConfigMixin, AttentionMixin, FromOriginalModelMixin, PeftAdapterMixin, CacheMixin ): r""" A Transformer model for video-like data used in [LTX](https://huggingface.co/Lightricks/LTX-Video). Args: in_channels (`int`, defaults to `128`): The number of channels in the input. out_channels (`int`, defaults to `128`): The number of channels in the output. patch_size (`int`, defaults to `1`): The size of the spatial patches to use in the patch embedding layer. patch_size_t (`int`, defaults to `1`): The size of the tmeporal patches to use in the patch embedding layer. num_attention_heads (`int`, defaults to `32`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `64`): The number of channels in each head. cross_attention_dim (`int`, defaults to `2048 `): The number of channels for cross attention heads. num_layers (`int`, defaults to `28`): The number of layers of Transformer blocks to use. activation_fn (`str`, defaults to `"gelu-approximate"`): Activation function to use in feed-forward. qk_norm (`str`, defaults to `"rms_norm_across_heads"`): The normalization layer to use. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["norm"] _repeated_blocks = ["LTXVideoTransformerBlock"] @register_to_config def __init__( self, in_channels: int = 128, out_channels: int = 128, patch_size: int = 1, patch_size_t: int = 1, num_attention_heads: int = 32, attention_head_dim: int = 64, cross_attention_dim: int = 2048, num_layers: int = 28, activation_fn: str = "gelu-approximate", qk_norm: str = "rms_norm_across_heads", norm_elementwise_affine: bool = False, norm_eps: float = 1e-6, caption_channels: int = 4096, attention_bias: bool = True, attention_out_bias: bool = True, ) -> None: super().__init__() out_channels = out_channels or in_channels inner_dim = num_attention_heads * attention_head_dim self.proj_in = nn.Linear(in_channels, inner_dim) self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5) self.time_embed = AdaLayerNormSingle(inner_dim, use_additional_conditions=False) self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim) self.rope = LTXVideoRotaryPosEmbed( dim=inner_dim, base_num_frames=20, base_height=2048, base_width=2048, patch_size=patch_size, patch_size_t=patch_size_t, theta=10000.0, ) self.transformer_blocks = nn.ModuleList( [ LTXVideoTransformerBlock( dim=inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, cross_attention_dim=cross_attention_dim, qk_norm=qk_norm, activation_fn=activation_fn, attention_bias=attention_bias, attention_out_bias=attention_out_bias, eps=norm_eps, elementwise_affine=norm_elementwise_affine, ) for _ in range(num_layers) ] ) self.norm_out = nn.LayerNorm(inner_dim, eps=1e-6, elementwise_affine=False) self.proj_out = nn.Linear(inner_dim, out_channels) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_attention_mask: torch.Tensor, num_frames: Optional[int] = None, height: Optional[int] = None, width: Optional[int] = None, rope_interpolation_scale: Optional[Union[Tuple[float, float, float], torch.Tensor]] = None, video_coords: Optional[torch.Tensor] = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> torch.Tensor: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) image_rotary_emb = self.rope(hidden_states, num_frames, height, width, rope_interpolation_scale, video_coords) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) batch_size = hidden_states.size(0) hidden_states = self.proj_in(hidden_states) temb, embedded_timestep = self.time_embed( timestep.flatten(), batch_size=batch_size, hidden_dtype=hidden_states.dtype, ) temb = temb.view(batch_size, -1, temb.size(-1)) embedded_timestep = embedded_timestep.view(batch_size, -1, embedded_timestep.size(-1)) encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.size(-1)) for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, encoder_attention_mask, ) else: hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, encoder_attention_mask=encoder_attention_mask, ) scale_shift_values = self.scale_shift_table[None, None] + embedded_timestep[:, :, None] shift, scale = scale_shift_values[:, :, 0], scale_shift_values[:, :, 1] hidden_states = self.norm_out(hidden_states) hidden_states = hidden_states * (1 + scale) + shift output = self.proj_out(hidden_states) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) def apply_rotary_emb(x, freqs): cos, sin = freqs x_real, x_imag = x.unflatten(2, (-1, 2)).unbind(-1) # [B, S, C // 2] x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(2) out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) return out
diffusers/src/diffusers/models/transformers/transformer_ltx.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_ltx.py", "repo_id": "diffusers", "token_count": 10322 }
165