text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
[package] name = "candle-wasm-example-bert" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } num-traits = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } safetensors = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.11" js-sys = "0.3.64" wasm-bindgen = "0.2.87" serde-wasm-bindgen = "0.6.0"
candle/candle-wasm-examples/bert/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/bert/Cargo.toml", "repo_id": "candle", "token_count": 304 }
65
[package] name = "candle-wasm-example-llama2" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } num-traits = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.11" js-sys = "0.3.64" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" wasm-logger = "0.2" yew-agent = "0.2.0" yew = { version = "0.20.0", features = ["csr"] } [dependencies.web-sys] version = "0.3.70" features = [ 'Blob', 'Document', 'Element', 'HtmlElement', 'Node', 'Window', 'Request', 'RequestCache', 'RequestInit', 'RequestMode', 'Response', 'Performance', ]
candle/candle-wasm-examples/llama2-c/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/Cargo.toml", "repo_id": "candle", "token_count": 434 }
66
import snarkdown from "https://cdn.skypack.dev/snarkdown"; import hljs from "https://cdn.skypack.dev/highlight.js"; // models base url const MODELS = { moondream2_q4k: { base_url: "https://huggingface.co/santiagomed/candle-moondream/resolve/main/", model: "model-q4_0.gguf", tokenizer: "tokenizer.json", quantized: true, size: "1.51 GB", }, }; const moodreamWorker = new Worker("./moondreamWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = model.model instanceof Array ? model.model.map((m) => model.base_url + m) : model.base_url + model.model; const tokenizerURL = model.base_url + model.tokenizer; const prompt = getValue("prompt").trim(); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); if (prompt?.value?.trim() === "") { return; } function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = snarkdown(prompt + sentence); outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; hljs.highlightAll(); break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { moodreamWorker.postMessage({ weightsURL, modelID, tokenizerURL, quantized: model.quantized, imageURL: currentImageURL, prompt, temp: temperature, top_p: topP, repeatPenalty, seed: seed, maxSeqLen, verbose_prompt: false, command: "start", }); const handleAbort = () => { moodreamWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { moodreamWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { moodreamWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { moodreamWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); moodreamWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); const dropArea = document.querySelector("#drop-area"); const canvas = document.querySelector("#canvas"); const ctxCanvas = canvas.getContext("2d"); const fileUpload = document.querySelector("#file-upload"); const clearImgBtn = document.querySelector("#clear-img-btn"); const imagesExamples = document.querySelector("#image-select"); let currentImageURL = null; let runController = new AbortController(); let isRunning = false; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelect.appendChild(option); } const query = new URLSearchParams(window.location.search); const modelID = query.get("model"); if (modelID) { modelSelect.value = modelID; } else { modelSelect.value = "moondream2_q4k"; } }); imagesExamples.addEventListener("click", (e) => { // if (isEmbedding || isSegmenting) { // return; // } const target = e.target; if (target.nodeName === "IMG") { const href = target.src; clearImageCanvas(); currentImageURL = href; drawImageCanvas(href); } }); modelSelect.addEventListener("change", (e) => { const query = new URLSearchParams(window.location.search); query.set("model", e.target.value); window.history.replaceState({}, "", `${window.location.pathname}?${query}`); window.parent.postMessage({ queryString: "?" + query }, "*"); const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = 200; }); clearImgBtn.addEventListener("click", () => { clearImageCanvas(); }); //add event listener to file input fileUpload.addEventListener("input", async (e) => { const target = e.target; if (target.files.length > 0 && !target.files[0].type.includes("svg")) { const href = URL.createObjectURL(target.files[0]); clearImageCanvas(); await drawImageCanvas(href); } }); // add event listener to drop-area dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", async (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); clearImageCanvas(); await drawImageCanvas(href); } else if (url) { clearImageCanvas(); await drawImageCanvas(url); } }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); async function drawImageCanvas(imgURL) { if (!imgURL) { throw new Error("No image URL provided"); } return new Promise((resolve, reject) => { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctxCanvas.drawImage(img, 0, 0); clearImgBtn.disabled = false; resolve(img); }; img.src = imgURL; currentImageURL = imgURL; }); } function clearImageCanvas() { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); clearImgBtn.disabled = true; canvas.parentElement.style.height = "auto"; currentImageURL = null; canvas.width = 0; canvas.height = 0; } function startRunning() { isRunning = true; runBtn.textContent = "Stop"; prompt.disabled = true; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; prompt.disabled = false; } prompt.addEventListener("input", (e) => { runBtn.disabled = false; });
candle/candle-wasm-examples/moondream/code.js/0
{ "file_path": "candle/candle-wasm-examples/moondream/code.js", "repo_id": "candle", "token_count": 2873 }
67
//load the candle SAM Model wasm module import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url, cacheModel = true) { if (!cacheModel) return new Uint8Array(await (await fetch(url)).arrayBuffer()); const cacheName = "sam-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class SAMModel { static instance = {}; // keep current image embeddings state static imageArrayHash = {}; // Add a new property to hold the current modelID static currentModelID = null; static async getInstance(modelURL, modelID) { if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: `Loading Model ${modelID}`, }); const weightsArrayU8 = await fetchArrayBuffer(modelURL); this.instance[modelID] = new Model( weightsArrayU8, /tiny|mobile/.test(modelID) ); } else { self.postMessage({ status: "loading", message: "Model Already Loaded" }); } // Set the current modelID to the modelID that was passed in this.currentModelID = modelID; return this.instance[modelID]; } // Remove the modelID parameter from setImageEmbeddings static setImageEmbeddings(imageArrayU8) { // check if image embeddings are already set for this image and model const imageArrayHash = this.getSimpleHash(imageArrayU8); if ( this.imageArrayHash[this.currentModelID] === imageArrayHash && this.instance[this.currentModelID] ) { self.postMessage({ status: "embedding", message: "Embeddings Already Set", }); return; } this.imageArrayHash[this.currentModelID] = imageArrayHash; this.instance[this.currentModelID].set_image_embeddings(imageArrayU8); self.postMessage({ status: "embedding", message: "Embeddings Set" }); } static getSimpleHash(imageArrayU8) { // get simple hash of imageArrayU8 let imageArrayHash = 0; for (let i = 0; i < imageArrayU8.length; i += 100) { imageArrayHash ^= imageArrayU8[i]; } return imageArrayHash.toString(16); } } async function createImageCanvas( { mask_shape, mask_data }, // mask { original_width, original_height, width, height } // original image ) { const [_, __, shape_width, shape_height] = mask_shape; const maskCanvas = new OffscreenCanvas(shape_width, shape_height); // canvas for mask const maskCtx = maskCanvas.getContext("2d"); const canvas = new OffscreenCanvas(original_width, original_height); // canvas for creating mask with original image size const ctx = canvas.getContext("2d"); const imageData = maskCtx.createImageData( maskCanvas.width, maskCanvas.height ); const data = imageData.data; for (let p = 0; p < data.length; p += 4) { data[p] = 0; data[p + 1] = 0; data[p + 2] = 0; data[p + 3] = mask_data[p / 4] * 255; } maskCtx.putImageData(imageData, 0, 0); let sx, sy; if (original_height < original_width) { sy = original_height / original_width; sx = 1; } else { sy = 1; sx = original_width / original_height; } ctx.drawImage( maskCanvas, 0, 0, maskCanvas.width * sx, maskCanvas.height * sy, 0, 0, original_width, original_height ); const blob = await canvas.convertToBlob(); return URL.createObjectURL(blob); } self.addEventListener("message", async (event) => { const { modelURL, modelID, imageURL, points } = event.data; try { self.postMessage({ status: "loading", message: "Starting SAM" }); const sam = await SAMModel.getInstance(modelURL, modelID); self.postMessage({ status: "loading", message: "Loading Image" }); const imageArrayU8 = await fetchArrayBuffer(imageURL, false); self.postMessage({ status: "embedding", message: "Creating Embeddings" }); SAMModel.setImageEmbeddings(imageArrayU8); if (!points) { // no points only do the embeddings self.postMessage({ status: "complete-embedding", message: "Embeddings Complete", }); return; } self.postMessage({ status: "segmenting", message: "Segmenting" }); const { mask, image } = sam.mask_for_point({ points }); const maskDataURL = await createImageCanvas(mask, image); // Send the segment back to the main thread as JSON self.postMessage({ status: "complete", message: "Segmentation Complete", output: { maskURL: maskDataURL }, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/segment-anything/samWorker.js/0
{ "file_path": "candle/candle-wasm-examples/segment-anything/samWorker.js", "repo_id": "candle", "token_count": 1747 }
68
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle YOLOv8 Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <script src="https://cdn.tailwindcss.com"></script> <script src="https://cdn.jsdelivr.net/gh/huggingface/hub-js-utils/share-canvas.js" type="module" ></script> <script type="module"> const MODEL_BASEURL = "https://huggingface.co/lmz/candle-yolo-v8/resolve/main/"; const MODELS = { yolov8n: { model_size: "n", url: "yolov8n.safetensors", }, yolov8s: { model_size: "s", url: "yolov8s.safetensors", }, yolov8m: { model_size: "m", url: "yolov8m.safetensors", }, yolov8l: { model_size: "l", url: "yolov8l.safetensors", }, yolov8x: { model_size: "x", url: "yolov8x.safetensors", }, yolov8n_pose: { model_size: "n", url: "yolov8n-pose.safetensors", }, yolov8s_pose: { model_size: "s", url: "yolov8s-pose.safetensors", }, yolov8m_pose: { model_size: "m", url: "yolov8m-pose.safetensors", }, yolov8l_pose: { model_size: "l", url: "yolov8l-pose.safetensors", }, yolov8x_pose: { model_size: "x", url: "yolov8x-pose.safetensors", }, }; const COCO_PERSON_SKELETON = [ [4, 0], // head [3, 0], [16, 14], // left lower leg [14, 12], // left upper leg [6, 12], // left torso [6, 5], // top torso [6, 8], // upper arm [8, 10], // lower arm [1, 2], // head [1, 3], // right head [2, 4], // left head [3, 5], // right neck [4, 6], // left neck [5, 7], // right upper arm [7, 9], // right lower arm [5, 11], // right torso [11, 12], // bottom torso [11, 13], // right upper leg [13, 15], // right lower leg ]; // init web worker const yoloWorker = new Worker("./yoloWorker.js", { type: "module" }); let hasImage = false; //add event listener to image examples document.querySelector("#image-select").addEventListener("click", (e) => { const target = e.target; if (target.nodeName === "IMG") { const href = target.src; drawImageCanvas(href); } }); //add event listener to file input document.querySelector("#file-upload").addEventListener("change", (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); drawImageCanvas(href); } }); // add event listener to drop-area const dropArea = document.querySelector("#drop-area"); dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); drawImageCanvas(href); } else if (url) { drawImageCanvas(url); } }); document.querySelector("#clear-btn").addEventListener("click", () => { drawImageCanvas(); }); function drawImageCanvas(imgURL) { const canvas = document.querySelector("#canvas"); const canvasResult = document.querySelector("#canvas-result"); canvasResult .getContext("2d") .clearRect(0, 0, canvas.width, canvas.height); const ctx = canvas.getContext("2d"); ctx.clearRect(0, 0, canvas.width, canvas.height); document.querySelector("#share-btn").classList.add("invisible"); document.querySelector("#clear-btn").classList.add("invisible"); document.querySelector("#detect").disabled = true; hasImage = false; canvas.parentElement.style.height = "auto"; if (imgURL && imgURL !== "") { const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctx.drawImage(img, 0, 0); canvas.parentElement.style.height = canvas.offsetHeight + "px"; hasImage = true; document.querySelector("#detect").disabled = false; document.querySelector("#clear-btn").classList.remove("invisible"); }; img.src = imgURL; } } async function classifyImage( imageURL, // URL of image to classify modelID, // ID of model to use modelURL, // URL to model file modelSize, // size of model confidence, // confidence threshold iou_threshold, // IoU threshold updateStatus // function receives status updates ) { return new Promise((resolve, reject) => { yoloWorker.postMessage({ imageURL, modelID, modelURL, modelSize, confidence, iou_threshold, }); function handleMessage(event) { console.log("message", event.data); if ("status" in event.data) { updateStatus(event.data.status); } if ("error" in event.data) { yoloWorker.removeEventListener("message", handleMessage); reject(new Error(event.data.error)); } if (event.data.status === "complete") { yoloWorker.removeEventListener("message", handleMessage); resolve(event.data); } } yoloWorker.addEventListener("message", handleMessage); }); } // add event listener to detect button document.querySelector("#detect").addEventListener("click", async () => { if (!hasImage) { return; } const modelID = document.querySelector("#model").value; const modelURL = MODEL_BASEURL + MODELS[modelID].url; const modelSize = MODELS[modelID].model_size; const confidence = parseFloat( document.querySelector("#confidence").value ); const iou_threshold = parseFloat( document.querySelector("#iou_threshold").value ); const canvasInput = document.querySelector("#canvas"); const canvas = document.querySelector("#canvas-result"); canvas.width = canvasInput.width; canvas.height = canvasInput.height; const scale = canvas.width / canvas.offsetWidth; const ctx = canvas.getContext("2d"); ctx.drawImage(canvasInput, 0, 0); const imageURL = canvas.toDataURL(); const results = await await classifyImage( imageURL, modelID, modelURL, modelSize, confidence, iou_threshold, updateStatus ); const { output } = results; ctx.lineWidth = 1 + 2 * scale; ctx.strokeStyle = "#3c8566"; ctx.fillStyle = "#0dff9a"; const fontSize = 14 * scale; ctx.font = `${fontSize}px sans-serif`; for (const detection of output) { // check keypoint for pose model data let xmin, xmax, ymin, ymax, label, confidence, keypoints; if ("keypoints" in detection) { xmin = detection.xmin; xmax = detection.xmax; ymin = detection.ymin; ymax = detection.ymax; confidence = detection.confidence; keypoints = detection.keypoints; } else { const [_label, bbox] = detection; label = _label; xmin = bbox.xmin; xmax = bbox.xmax; ymin = bbox.ymin; ymax = bbox.ymax; confidence = bbox.confidence; } const [x, y, w, h] = [xmin, ymin, xmax - xmin, ymax - ymin]; const text = `${label ? label + " " : ""}${confidence.toFixed(2)}`; const width = ctx.measureText(text).width; ctx.fillStyle = "#3c8566"; ctx.fillRect(x - 2, y - fontSize, width + 4, fontSize); ctx.fillStyle = "#e3fff3"; ctx.strokeRect(x, y, w, h); ctx.fillText(text, x, y - 2); if (keypoints) { ctx.save(); ctx.fillStyle = "magenta"; ctx.strokeStyle = "yellow"; for (const keypoint of keypoints) { const { x, y } = keypoint; ctx.beginPath(); ctx.arc(x, y, 3, 0, 2 * Math.PI); ctx.fill(); } ctx.beginPath(); for (const [xid, yid] of COCO_PERSON_SKELETON) { //draw line between skeleton keypoitns if (keypoints[xid] && keypoints[yid]) { ctx.moveTo(keypoints[xid].x, keypoints[xid].y); ctx.lineTo(keypoints[yid].x, keypoints[yid].y); } } ctx.stroke(); ctx.restore(); } } }); function updateStatus(statusMessage) { const button = document.querySelector("#detect"); if (statusMessage === "detecting") { button.disabled = true; button.classList.add("bg-blue-700"); button.classList.remove("bg-blue-950"); button.textContent = "Predicting..."; } else if (statusMessage === "complete") { button.disabled = false; button.classList.add("bg-blue-950"); button.classList.remove("bg-blue-700"); button.textContent = "Predict"; document.querySelector("#share-btn").classList.remove("invisible"); } } document.querySelector("#share-btn").addEventListener("click", () => { shareToCommunity( "lmz/candle-yolo", "Candle + YOLOv8", "YOLOv8 with [Candle](https://github.com/huggingface/candle)", "canvas-result", "share-btn" ); }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle YOLOv8</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> This demo showcases object detection and pose estimation models in your browser using Rust/WASM. It utilizes <a href="https://huggingface.co/lmz/candle-yolo-v8" target="_blank" class="underline hover:text-blue-500 hover:no-underline" > safetensor's YOLOv8 models </a> and a WASM runtime built with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a >. </p> <p> To run pose estimation, select a yolo pose model from the dropdown </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light" > <option value="yolov8n" selected>yolov8n (6.37 MB)</option> <option value="yolov8s">yolov8s (22.4 MB)</option> <option value="yolov8m">yolov8m (51.9 MB)</option> <option value="yolov8l">yolov8l (87.5 MB)</option> <option value="yolov8x">yolov8x (137 MB)</option> <!-- Pose models --> <option value="yolov8n_pose">yolov8n_pose (6.65 MB)</option> <option value="yolov8s_pose">yolov8s_pose (23.3 MB)</option> <option value="yolov8m_pose">yolov8m_pose (53 MB)</option> <option value="yolov8l_pose">yolov8l_pose (89.1 MB)</option> <option value="yolov8x_pose">yolov8x_pose (139 MB)</option> </select> </div> <div> <button id="detect" disabled class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 px-4 rounded disabled:bg-gray-300 disabled:cursor-not-allowed" > Predict </button> </div> <!-- drag and drop area --> <div class="relative max-w-lg"> <div class="py-1"> <button id="clear-btn" class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center ml-auto invisible" > <svg class="" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 13 12" height="1em" > <path d="M1.6.7 12 11.1M12 .7 1.6 11.1" stroke="#2E3036" stroke-width="2" /> </svg> Clear image </button> </div> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden" > <div class="flex flex-col items-center justify-center space-y-1 text-center" > <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg" > <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700" > <span>Drag and drop your image here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" class="sr-only" /> </div> <canvas id="canvas" class="absolute pointer-events-none w-full" ></canvas> <canvas id="canvas-result" class="absolute pointer-events-none w-full" ></canvas> </div> <div class="text-right py-2"> <button id="share-btn" class="bg-white rounded-md hover:outline outline-orange-200 disabled:opacity-50 invisible" > <img src="https://huggingface.co/datasets/huggingface/badges/raw/main/share-to-community-sm.svg" /> </button> </div> </div> <div> <div class="flex gap-3 items-center overflow-x-scroll" id="image-select" > <h3 class="font-medium">Examples:</h3> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg" class="cursor-pointer w-24 h-24 object-cover" /> </div> </div> <div> <div class="grid grid-cols-3 max-w-md items-center gap-3"> <label class="text-sm font-medium" for="confidence" >Confidence Threshold</label > <input type="range" id="confidence" name="confidence" min="0" max="1" step="0.01" value="0.25" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs font-light px-1 py-1 border border-gray-700 rounded-md w-min" >0.25</output > <label class="text-sm font-medium" for="iou_threshold" >IoU Threshold</label > <input type="range" id="iou_threshold" name="iou_threshold" min="0" max="1" step="0.01" value="0.45" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="font-extralight text-xs px-1 py-1 border border-gray-700 rounded-md w-min" >0.45</output > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/yolo/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/yolo/lib-example.html", "repo_id": "candle", "token_count": 9649 }
69
use candle::quantized::{gguf_file, GgmlDType, QTensor}; use candle::{Device, Result}; use clap::{Parser, Subcommand, ValueEnum}; use rayon::prelude::*; #[derive(ValueEnum, Debug, Clone)] enum QuantizationMode { /// The default quantization includes all 2d tensors, except the output tensor which always /// uses Q6_K. Llama, } impl QuantizationMode { fn quantize(&self, name: &str, tensor: QTensor, dtype: GgmlDType) -> Result<QTensor> { match self { Self::Llama => { // Same behavior as the llama.cpp quantization. let should_quantize = name.ends_with(".weight") && tensor.rank() == 2; if should_quantize { let tensor = tensor.dequantize(&Device::Cpu)?; if name == "output.weight" { QTensor::quantize(&tensor, GgmlDType::Q6K) } else { QTensor::quantize(&tensor, dtype) } } else { Ok(tensor) } } } } } #[derive(ValueEnum, Debug, Clone)] enum Quantization { #[value(name = "q4_0")] Q4_0, #[value(name = "q4_1")] Q4_1, #[value(name = "q5_0")] Q5_0, #[value(name = "q5_1")] Q5_1, #[value(name = "q8_0")] Q8_0, #[value(name = "q8_1")] Q8_1, Q2k, Q3k, Q4k, Q5k, Q6k, Q8k, F16, F32, } impl Quantization { fn dtype(&self) -> GgmlDType { match self { Quantization::Q4_0 => GgmlDType::Q4_0, Quantization::Q4_1 => GgmlDType::Q4_1, Quantization::Q5_0 => GgmlDType::Q5_0, Quantization::Q5_1 => GgmlDType::Q5_1, Quantization::Q8_0 => GgmlDType::Q8_0, Quantization::Q8_1 => GgmlDType::Q8_1, Quantization::Q2k => GgmlDType::Q2K, Quantization::Q3k => GgmlDType::Q3K, Quantization::Q4k => GgmlDType::Q4K, Quantization::Q5k => GgmlDType::Q5K, Quantization::Q6k => GgmlDType::Q6K, Quantization::Q8k => GgmlDType::Q8K, Quantization::F16 => GgmlDType::F16, Quantization::F32 => GgmlDType::F32, } } } #[derive(ValueEnum, Debug, Clone)] enum Format { Safetensors, Npz, Ggml, Gguf, Pth, Pickle, } impl Format { fn infer<P: AsRef<std::path::Path>>(p: P) -> Option<Self> { p.as_ref() .extension() .and_then(|e| e.to_str()) .and_then(|e| match e { // We don't infer any format for .bin as it can be used for ggml/gguf or pytorch. "safetensors" | "safetensor" => Some(Self::Safetensors), "npz" => Some(Self::Npz), "pth" | "pt" => Some(Self::Pth), "ggml" => Some(Self::Ggml), "gguf" => Some(Self::Gguf), _ => None, }) } } #[derive(Subcommand, Debug, Clone)] enum Command { Ls { files: Vec<std::path::PathBuf>, /// The file format to use, if unspecified infer from the file extension. #[arg(long, value_enum)] format: Option<Format>, /// Enable verbose mode. #[arg(short, long)] verbose: bool, }, Print { file: std::path::PathBuf, names: Vec<String>, /// The file format to use, if unspecified infer from the file extension. #[arg(long, value_enum)] format: Option<Format>, /// Print the whole content of each tensor. #[arg(long)] full: bool, /// Line width for printing the tensors. #[arg(long)] line_width: Option<usize>, }, Quantize { /// The input file(s), in safetensors format. in_file: Vec<std::path::PathBuf>, /// The output file, in gguf format. #[arg(long)] out_file: std::path::PathBuf, /// The quantization schema to apply. #[arg(long, value_enum)] quantization: Quantization, /// Which tensor to quantize. #[arg(long, value_enum, default_value_t = QuantizationMode::Llama)] mode: QuantizationMode, }, Dequantize { /// The input file, in gguf format. in_file: std::path::PathBuf, /// The output file, in safetensors format. #[arg(long)] out_file: std::path::PathBuf, }, } #[derive(Parser, Debug, Clone)] struct Args { #[command(subcommand)] command: Command, } fn run_print( file: &std::path::PathBuf, names: Vec<String>, format: Option<Format>, full: bool, line_width: Option<usize>, device: &Device, ) -> Result<()> { if full { candle::display::set_print_options_full(); } if let Some(line_width) = line_width { candle::display::set_line_width(line_width) } let format = match format { Some(format) => format, None => match Format::infer(file) { Some(format) => format, None => { println!( "{file:?}: cannot infer format from file extension, use the --format flag" ); return Ok(()); } }, }; match format { Format::Npz => { let tensors = candle::npy::NpzTensors::new(file)?; let names = if names.is_empty() { tensors.names().into_iter().map(|v| v.to_string()).collect() } else { names }; for name in names.iter() { println!("==== {name} ===="); match tensors.get(name)? { Some(tensor) => println!("{tensor}"), None => println!("not found"), } } } Format::Safetensors => { use candle::safetensors::Load; let tensors = unsafe { candle::safetensors::MmapedSafetensors::new(file)? }; let tensors: std::collections::HashMap<_, _> = tensors.tensors().into_iter().collect(); let names = if names.is_empty() { tensors.keys().map(|v| v.to_string()).collect() } else { names }; for name in names.iter() { println!("==== {name} ===="); match tensors.get(name) { Some(tensor_view) => { let tensor = tensor_view.load(device)?; println!("{tensor}") } None => println!("not found"), } } } Format::Pth => { let pth_file = candle::pickle::PthTensors::new(file, None)?; let names = if names.is_empty() { pth_file .tensor_infos() .keys() .map(|v| v.to_string()) .collect() } else { names }; for name in names.iter() { println!("==== {name} ===="); match pth_file.get(name)? { Some(tensor) => { println!("{tensor}") } None => println!("not found"), } } } Format::Pickle => { candle::bail!("pickle format is not supported for print") } Format::Ggml => { let mut file = std::fs::File::open(file)?; let content = candle::quantized::ggml_file::Content::read(&mut file, device)?; let names = if names.is_empty() { content.tensors.keys().map(|v| v.to_string()).collect() } else { names }; for name in names.iter() { println!("==== {name} ===="); match content.tensors.get(name) { Some(tensor) => { let tensor = tensor.dequantize(device)?; println!("{tensor}") } None => println!("not found"), } } } Format::Gguf => { let mut file = std::fs::File::open(file)?; let content = gguf_file::Content::read(&mut file)?; let names = if names.is_empty() { content.tensor_infos.keys().map(|v| v.to_string()).collect() } else { names }; for name in names.iter() { println!("==== {name} ===="); match content.tensor(&mut file, name, device) { Ok(tensor) => { let tensor = tensor.dequantize(device)?; println!("{tensor}") } Err(_) => println!("not found"), } } } } Ok(()) } fn run_ls( file: &std::path::PathBuf, format: Option<Format>, verbose: bool, device: &Device, ) -> Result<()> { let format = match format { Some(format) => format, None => match Format::infer(file) { Some(format) => format, None => { println!( "{file:?}: cannot infer format from file extension, use the --format flag" ); return Ok(()); } }, }; match format { Format::Npz => { let tensors = candle::npy::NpzTensors::new(file)?; let mut names = tensors.names(); names.sort(); for name in names { let shape_dtype = match tensors.get_shape_and_dtype(name) { Ok((shape, dtype)) => format!("[{shape:?}; {dtype:?}]"), Err(err) => err.to_string(), }; println!("{name}: {shape_dtype}") } } Format::Safetensors => { let tensors = unsafe { candle::safetensors::MmapedSafetensors::new(file)? }; let mut tensors = tensors.tensors(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, view) in tensors.iter() { let dtype = view.dtype(); let dtype = match candle::DType::try_from(dtype) { Ok(dtype) => format!("{dtype:?}"), Err(_) => format!("{dtype:?}"), }; let shape = view.shape(); println!("{name}: [{shape:?}; {dtype}]") } } Format::Pth => { let mut tensors = candle::pickle::read_pth_tensor_info(file, verbose, None)?; tensors.sort_by(|a, b| a.name.cmp(&b.name)); for tensor_info in tensors.iter() { println!( "{}: [{:?}; {:?}]", tensor_info.name, tensor_info.layout.shape(), tensor_info.dtype, ); if verbose { println!(" {tensor_info:?}"); } } } Format::Pickle => { let file = std::fs::File::open(file)?; let mut reader = std::io::BufReader::new(file); let mut stack = candle::pickle::Stack::empty(); stack.read_loop(&mut reader)?; for (i, obj) in stack.stack().iter().enumerate() { println!("{i} {obj:?}"); } } Format::Ggml => { let mut file = std::fs::File::open(file)?; let content = candle::quantized::ggml_file::Content::read(&mut file, device)?; let mut tensors = content.tensors.into_iter().collect::<Vec<_>>(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, qtensor) in tensors.iter() { println!("{name}: [{:?}; {:?}]", qtensor.shape(), qtensor.dtype()); } } Format::Gguf => { let mut file = std::fs::File::open(file)?; let content = gguf_file::Content::read(&mut file)?; if verbose { let mut metadata = content.metadata.into_iter().collect::<Vec<_>>(); metadata.sort_by(|a, b| a.0.cmp(&b.0)); println!("metadata entries ({})", metadata.len()); for (key, value) in metadata.iter() { println!(" {key}: {value:?}"); } } let mut tensors = content.tensor_infos.into_iter().collect::<Vec<_>>(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, info) in tensors.iter() { println!("{name}: [{:?}; {:?}]", info.shape, info.ggml_dtype); } } } Ok(()) } fn run_quantize_safetensors( in_files: &[std::path::PathBuf], out_file: std::path::PathBuf, q: Quantization, ) -> Result<()> { let mut out_file = std::fs::File::create(out_file)?; let mut tensors = std::collections::HashMap::new(); for in_file in in_files.iter() { let in_tensors = candle::safetensors::load(in_file, &Device::Cpu)?; tensors.extend(in_tensors) } println!("tensors: {}", tensors.len()); let dtype = q.dtype(); let block_size = dtype.block_size(); let qtensors = tensors .into_par_iter() .map(|(name, tensor)| { let should_quantize = tensor.rank() == 2 && tensor.dim(1)? % block_size == 0; println!(" quantizing {name} {tensor:?} {should_quantize}"); let tensor = if should_quantize { QTensor::quantize(&tensor, dtype)? } else { QTensor::quantize(&tensor, GgmlDType::F32)? }; Ok((name, tensor)) }) .collect::<Result<Vec<_>>>()?; let qtensors = qtensors .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); gguf_file::write(&mut out_file, &[], &qtensors)?; Ok(()) } fn run_dequantize( in_file: std::path::PathBuf, out_file: std::path::PathBuf, device: &Device, ) -> Result<()> { let mut in_file = std::fs::File::open(in_file)?; let content = gguf_file::Content::read(&mut in_file)?; let mut tensors = std::collections::HashMap::new(); for (tensor_name, _) in content.tensor_infos.iter() { let tensor = content.tensor(&mut in_file, tensor_name, device)?; let tensor = tensor.dequantize(device)?; tensors.insert(tensor_name.to_string(), tensor); } candle::safetensors::save(&tensors, out_file)?; Ok(()) } fn run_quantize( in_files: &[std::path::PathBuf], out_file: std::path::PathBuf, q: Quantization, qmode: QuantizationMode, device: &Device, ) -> Result<()> { if in_files.is_empty() { candle::bail!("no specified input files") } if let Some(extension) = out_file.extension() { if extension == "safetensors" { candle::bail!("the generated file cannot use the safetensors extension") } } if let Some(extension) = in_files[0].extension() { if extension == "safetensors" { return run_quantize_safetensors(in_files, out_file, q); } } if in_files.len() != 1 { candle::bail!("only a single in-file can be used when quantizing gguf files") } // Open the out file early so as to fail directly on missing directories etc. let mut out_file = std::fs::File::create(out_file)?; let mut in_ = std::fs::File::open(&in_files[0])?; let content = gguf_file::Content::read(&mut in_)?; println!("tensors: {}", content.tensor_infos.len()); let dtype = q.dtype(); let qtensors = content .tensor_infos .par_iter() .map(|(name, _)| { println!(" quantizing {name}"); let mut in_file = std::fs::File::open(&in_files[0])?; let tensor = content.tensor(&mut in_file, name, device)?; let tensor = qmode.quantize(name, tensor, dtype)?; Ok((name, tensor)) }) .collect::<Result<Vec<_>>>()?; let qtensors = qtensors .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); let metadata = content .metadata .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); gguf_file::write(&mut out_file, metadata.as_slice(), &qtensors)?; Ok(()) } fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = Device::Cpu; match args.command { Command::Ls { files, format, verbose, } => { let multiple_files = files.len() > 1; for file in files.iter() { if multiple_files { println!("--- {file:?} ---"); } run_ls(file, format.clone(), verbose, &device)? } } Command::Print { file, names, format, full, line_width, } => run_print(&file, names, format, full, line_width, &device)?, Command::Quantize { in_file, out_file, quantization, mode, } => run_quantize(&in_file, out_file, quantization, mode, &device)?, Command::Dequantize { in_file, out_file } => run_dequantize(in_file, out_file, &device)?, } Ok(()) }
candle/tensor-tools/src/main.rs/0
{ "file_path": "candle/tensor-tools/src/main.rs", "repo_id": "candle", "token_count": 9444 }
70
export default { "*.{js,jsx,ts,tsx}": ["prettier --write", "eslint --fix", "eslint"], "*.json": ["prettier --write"], };
chat-ui/.husky/lint-stage-config.js/0
{ "file_path": "chat-ui/.husky/lint-stage-config.js", "repo_id": "chat-ui", "token_count": 54 }
71
apiVersion: apps/v1 kind: Deployment metadata: labels: {{ include "labels.standard" . | nindent 4 }} name: {{ include "name" . }} namespace: {{ .Release.Namespace }} {{- if .Values.infisical.enabled }} annotations: secrets.infisical.com/auto-reload: "true" {{- end }} spec: progressDeadlineSeconds: 600 {{- if not $.Values.autoscaling.enabled }} replicas: {{ .Values.replicas }} {{- end }} revisionHistoryLimit: 10 selector: matchLabels: {{ include "labels.standard" . | nindent 6 }} strategy: rollingUpdate: maxSurge: 25% maxUnavailable: 25% type: RollingUpdate template: metadata: labels: {{ include "labels.standard" . | nindent 8 }} annotations: checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} {{- if $.Values.envVars.NODE_LOG_STRUCTURED_DATA }} co.elastic.logs/json.expand_keys: "true" {{- end }} spec: {{- if .Values.serviceAccount.enabled }} serviceAccountName: "{{ .Values.serviceAccount.name | default (include "name" .) }}" {{- end }} containers: - name: chat-ui image: "{{ .Values.image.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} readinessProbe: failureThreshold: 30 periodSeconds: 10 httpGet: path: {{ $.Values.envVars.APP_BASE | default "" }}/healthcheck port: {{ $.Values.envVars.APP_PORT | default 3000 | int }} livenessProbe: failureThreshold: 30 periodSeconds: 10 httpGet: path: {{ $.Values.envVars.APP_BASE | default "" }}/healthcheck port: {{ $.Values.envVars.APP_PORT | default 3000 | int }} ports: - containerPort: {{ $.Values.envVars.APP_PORT | default 3000 | int }} name: http protocol: TCP {{- if $.Values.monitoring.enabled }} - containerPort: {{ $.Values.envVars.METRICS_PORT | default 5565 | int }} name: metrics protocol: TCP {{- end }} resources: {{ toYaml .Values.resources | nindent 12 }} {{- with $.Values.extraEnv }} env: {{- toYaml . | nindent 14 }} {{- end }} envFrom: - configMapRef: name: {{ include "name" . }} {{- if $.Values.infisical.enabled }} - secretRef: name: {{ include "name" $ }}-secs {{- end }} {{- with $.Values.extraEnvFrom }} {{- toYaml . | nindent 14 }} {{- end }} nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }} tolerations: {{ toYaml .Values.tolerations | nindent 8 }} volumes: - name: config configMap: name: {{ include "name" . }}
chat-ui/chart/templates/deployment.yaml/0
{ "file_path": "chat-ui/chart/templates/deployment.yaml", "repo_id": "chat-ui", "token_count": 1374 }
72
# Anthropic | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | Yes | We also support Anthropic models (including multimodal ones via `multmodal: true`) through the official SDK. You may provide your API key via the `ANTHROPIC_API_KEY` env variable, or alternatively, through the `endpoints.apiKey` as per the following example. ```ini MODELS=`[ { "name": "claude-3-haiku-20240307", "displayName": "Claude 3 Haiku", "description": "Fastest and most compact model for near-instant responsiveness", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-sonnet-20240229", "displayName": "Claude 3 Sonnet", "description": "Ideal balance of intelligence and speed", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-opus-20240229", "displayName": "Claude 3 Opus", "description": "Most powerful model for highly complex tasks", "multimodal": true, "parameters": { "max_new_tokens": 4096 }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] } ]` ``` ## VertexAI We also support using Anthropic models running on Vertex AI. Authentication is done using Google Application Default Credentials. Project ID can be provided through the `endpoints.projectId` as per the following example: ```ini MODELS=`[ { "name": "claude-3-haiku@20240307", "displayName": "Claude 3 Haiku", "description": "Fastest, most compact model for near-instant responsiveness", "multimodal": true, "parameters": { "max_new_tokens": 4096 }, "endpoints": [ { "type": "anthropic-vertex", "region": "us-central1", "projectId": "gcp-project-id", // optionals "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-sonnet@20240229", "displayName": "Claude 3 Sonnet", "description": "Ideal balance of intelligence and speed", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic-vertex", "region": "us-central1", "projectId": "gcp-project-id", // optionals "defaultHeaders": {}, "defaultQuery": {} } ] }, ]` ```
chat-ui/docs/source/configuration/models/providers/anthropic.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/anthropic.md", "repo_id": "chat-ui", "token_count": 1541 }
73
# Copy HuggingChat The config file for HuggingChat is stored in the `chart/env/prod.yaml` file. It is the source of truth for the environment variables used for our CI/CD pipeline. For HuggingChat, as we need to customize the app color, as well as the base path, we build a custom docker image. You can find the workflow here. <Tip> If you want to make changes to the model config used in production for HuggingChat, you should do so against `chart/env/prod.yaml`. </Tip> ### Running a copy of HuggingChat locally If you want to run an exact copy of HuggingChat locally, you will need to do the following first: 1. Create an [OAuth App on the hub](https://huggingface.co/settings/applications/new) with `openid profile email` permissions. Make sure to set the callback URL to something like `http://localhost:5173/chat/login/callback` which matches the right path for your local instance. 2. Create a [HF Token](https://huggingface.co/settings/tokens) with your Hugging Face account. You will need a Pro account to be able to access some of the larger models available through HuggingChat. 3. Create a free account with [serper.dev](https://serper.dev/) (you will get 2500 free search queries) 4. Run an instance of MongoDB, however you want. (Local or remote) You can then create a new `.env.SECRET_CONFIG` file with the following content ```ini MONGODB_URL=<link to your mongo DB from step 4> HF_TOKEN=<your HF token from step 2> OPENID_CONFIG=`{ PROVIDER_URL: "https://huggingface.co", CLIENT_ID: "<your client ID from step 1>", CLIENT_SECRET: "<your client secret from step 1>", }` SERPER_API_KEY=<your serper API key from step 3> MESSAGES_BEFORE_LOGIN=<can be any numerical value, or set to 0 to require login> ``` You can then run `npm run updateLocalEnv` in the root of chat-ui. This will create a `.env.local` file which combines the `chart/env/prod.yaml` and the `.env.SECRET_CONFIG` file. You can then run `npm run dev` to start your local instance of HuggingChat. ### Populate database <Tip warning={true}> The `MONGODB_URL` used for this script will be fetched from `.env.local`. Make sure it's correct! The command runs directly on the database. </Tip> You can populate the database using faker data using the `populate` script: ```bash npm run populate <flags here> ``` At least one flag must be specified, the following flags are available: - `reset` - resets the database - `all` - populates all tables - `users` - populates the users table - `settings` - populates the settings table for existing users - `assistants` - populates the assistants table for existing users - `conversations` - populates the conversations table for existing users For example, you could use it like so: ```bash npm run populate reset ``` to clear out the database. Then login in the app to create your user and run the following command: ```bash npm run populate users settings assistants conversations ``` to populate the database with fake data, including fake conversations and assistants for your user.
chat-ui/docs/source/developing/copy-huggingchat.md/0
{ "file_path": "chat-ui/docs/source/developing/copy-huggingchat.md", "repo_id": "chat-ui", "token_count": 870 }
74
import fs from "fs"; import yaml from "js-yaml"; const file = fs.readFileSync("chart/env/prod.yaml", "utf8"); // have to do a weird stringify/parse because of some node error const prod = JSON.parse(JSON.stringify(yaml.load(file))); const vars = prod.envVars as Record<string, string>; let PUBLIC_CONFIG = ""; Object.entries(vars) // filter keys used in prod with the proxy .filter( ([key]) => ![ "XFF_DEPTH", "ADDRESS_HEADER", "APP_BASE", "PUBLIC_ORIGIN", "PUBLIC_SHARE_PREFIX", "ADMIN_CLI_LOGIN", ].includes(key) ) .forEach(([key, value]) => { PUBLIC_CONFIG += `${key}=\`${value}\`\n`; }); const SECRET_CONFIG = (fs.existsSync(".env.SECRET_CONFIG") ? fs.readFileSync(".env.SECRET_CONFIG", "utf8") : process.env.SECRET_CONFIG) ?? ""; // Prepend the content of the env variable SECRET_CONFIG let full_config = `${PUBLIC_CONFIG}\n${SECRET_CONFIG}`; // replace the internal proxy url with the public endpoint full_config = full_config.replaceAll( "https://internal.api-inference.huggingface.co", "https://router.huggingface.co/hf-inference" ); full_config = full_config.replaceAll("COOKIE_SECURE=`true`", "COOKIE_SECURE=`false`"); full_config = full_config.replaceAll("LOG_LEVEL=`debug`", "LOG_LEVEL=`info`"); full_config = full_config.replaceAll("NODE_ENV=`prod`", "NODE_ENV=`development`"); // Write full_config to .env.local fs.writeFileSync(".env.local", full_config);
chat-ui/scripts/updateLocalEnv.ts/0
{ "file_path": "chat-ui/scripts/updateLocalEnv.ts", "repo_id": "chat-ui", "token_count": 581 }
75
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/state"; import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte"; import Modal from "$lib/components/Modal.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled"; import Logo from "./icons/Logo.svelte"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; const publicConfig = usePublicConfig(); const settings = useSettingsStore(); </script> <Modal on:close width="!max-w-[400px] !m-4"> <div class="from-primary-500/40 via-primary-500/10 to-primary-500/0 flex w-full flex-col items-center gap-6 bg-gradient-to-b px-5 pb-8 pt-9 text-center sm:px-6" > <h2 class="flex items-center text-2xl font-semibold text-gray-800"> <Logo classNames="mr-1" /> {publicConfig.PUBLIC_APP_NAME} </h2> <p class="text-lg font-semibold leading-snug text-gray-800" style="text-wrap: balance;"> {publicConfig.PUBLIC_APP_DESCRIPTION} </p> <p class="text-sm text-gray-500"> {publicConfig.PUBLIC_APP_DISCLAIMER_MESSAGE} </p> <div class="flex w-full flex-col items-center gap-2"> <button class="w-full justify-center rounded-full border-2 border-gray-300 bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" class:bg-white={page.data.loginEnabled} class:text-gray-800={page.data.loginEnabled} class:hover:bg-slate-100={page.data.loginEnabled} onclick={(e) => { e.preventDefault(); e.stopPropagation(); if (!cookiesAreEnabled()) { window.open(window.location.href, "_blank"); } $settings.ethicsModalAccepted = true; }} > {#if page.data.loginEnabled} {#if page.data.guestMode} Continue as guest {:else} Explore the app {/if} {:else} Start chatting {/if} </button> {#if page.data.loginEnabled} <a href="{base}/login" class="flex w-full flex-wrap items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" > Sign in {#if publicConfig.isHuggingChat} <span class="flex items-center"> &nbsp;with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5 flex-none" /> Hugging Face </span> {/if} </a> {/if} </div> </div> </Modal>
chat-ui/src/lib/components/DisclaimerModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/DisclaimerModal.svelte", "repo_id": "chat-ui", "token_count": 1060 }
76
<script lang="ts"> import { fade } from "svelte/transition"; import IconChevron from "./icons/IconChevron.svelte"; interface Props { scrollNode: HTMLElement; class?: string; } let { scrollNode, class: className = "" }: Props = $props(); let visible = $state(false); let observer: ResizeObserver | null = $state(null); function updateVisibility() { if (!scrollNode) return; visible = Math.ceil(scrollNode.scrollTop) + 200 < scrollNode.scrollHeight - scrollNode.clientHeight; } function destroy() { observer?.disconnect(); scrollNode?.removeEventListener("scroll", updateVisibility); } const cleanup = $effect.root(() => { $effect(() => { if (scrollNode) { if (window.ResizeObserver) { observer = new ResizeObserver(() => updateVisibility()); observer.observe(scrollNode); cleanup(); } scrollNode?.addEventListener("scroll", updateVisibility); } }); return () => destroy(); }); </script> {#if visible} <button transition:fade={{ duration: 150 }} onclick={() => scrollNode.scrollTo({ top: scrollNode.scrollHeight, behavior: "smooth" })} class="btn absolute flex h-[41px] w-[41px] rounded-full border bg-white shadow-md transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:shadow-gray-950 dark:hover:bg-gray-600 {className}" ><IconChevron classNames="mt-[2px]" /></button > {/if}
chat-ui/src/lib/components/ScrollToBottomBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/ScrollToBottomBtn.svelte", "repo_id": "chat-ui", "token_count": 508 }
77
<script lang="ts"> import Logo from "$lib/components/icons/Logo.svelte"; import { createEventDispatcher } from "svelte"; import IconGear from "~icons/bi/gear-fill"; import AnnouncementBanner from "../AnnouncementBanner.svelte"; import type { Model } from "$lib/types/Model"; import ModelCardMetadata from "../ModelCardMetadata.svelte"; import { base } from "$app/paths"; import JSON5 from "json5"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; const publicConfig = usePublicConfig(); interface Props { currentModel: Model; } let { currentModel }: Props = $props(); const dispatch = createEventDispatcher<{ message: string }>(); </script> <div class="my-auto grid gap-8 lg:grid-cols-3"> <div class="lg:col-span-1"> <div> <div class="mb-3 flex items-center text-2xl font-semibold"> <Logo classNames="mr-1 flex-none" /> {publicConfig.PUBLIC_APP_NAME} <div class="ml-3 flex h-6 items-center rounded-lg border border-gray-100 bg-gray-50 px-2 text-base text-gray-400 dark:border-gray-700/60 dark:bg-gray-800" > v{publicConfig.PUBLIC_VERSION} </div> </div> <p class="text-base text-gray-600 dark:text-gray-400"> {publicConfig.PUBLIC_APP_DESCRIPTION || "Making the community's best AI chat models available to everyone."} </p> </div> </div> <div class="lg:col-span-2 lg:pl-24"> {#each JSON5.parse(publicConfig.PUBLIC_ANNOUNCEMENT_BANNERS || "[]") as banner} <AnnouncementBanner classNames="mb-4" title={banner.title}> <a target={banner.external ? "_blank" : "_self"} href={banner.linkHref} class="mr-2 flex items-center underline hover:no-underline">{banner.linkTitle}</a > </AnnouncementBanner> {/each} <div class="overflow-hidden rounded-xl border dark:border-gray-800"> <div class="flex p-3"> <div> <div class="text-sm text-gray-600 dark:text-gray-400">Current Model</div> <div class="flex items-center gap-1.5 font-semibold max-sm:text-smd"> {#if currentModel.logoUrl} <img class=" overflown aspect-square size-4 rounded border dark:border-gray-700" src={currentModel.logoUrl} alt="" /> {:else} <div class="size-4 rounded border border-transparent bg-gray-300 dark:bg-gray-800" ></div> {/if} {currentModel.displayName} </div> </div> <a href="{base}/settings/{currentModel.id}" aria-label="Settings" class="btn ml-auto flex h-7 w-7 self-start rounded-full bg-gray-100 p-1 text-xs hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-600" ><IconGear /></a > </div> <ModelCardMetadata variant="dark" model={currentModel} /> </div> </div> {#if currentModel.promptExamples} <div class="lg:col-span-3 lg:mt-6"> <p class="mb-3 text-center text-gray-600 dark:text-gray-300 lg:text-left">Examples</p> <div class="flex max-h-60 gap-2 overflow-x-auto pb-2 text-center scrollbar-thin scrollbar-thumb-gray-300 dark:scrollbar-thumb-gray-700 lg:grid lg:grid-cols-3 lg:overflow-y-auto lg:text-left" > {#each currentModel.promptExamples as example} <button type="button" class="flex-shrink-0 rounded-xl border bg-gray-50 p-2.5 text-sm text-gray-600 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-300 dark:hover:bg-gray-700 sm:p-3 lg:w-full xl:p-3.5 xl:text-base" onclick={() => dispatch("message", example.prompt)} > {example.title} </button> {/each} </div> </div> {/if} <div class="h-40 sm:h-24"></div> </div>
chat-ui/src/lib/components/chat/ChatIntroduction.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatIntroduction.svelte", "repo_id": "chat-ui", "token_count": 1561 }
78
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId, type AnyBulkWriteOperation } from "mongodb"; import type { Assistant } from "$lib/types/Assistant"; import { generateSearchTokens } from "$lib/utils/searchTokens"; const migration: Migration = { _id: new ObjectId("5f9f3e3e3e3e3e3e3e3e3e3e"), name: "Update search assistants", up: async () => { const { assistants } = collections; let ops: AnyBulkWriteOperation<Assistant>[] = []; for await (const assistant of assistants .find() .project<Pick<Assistant, "_id" | "name">>({ _id: 1, name: 1 })) { ops.push({ updateOne: { filter: { _id: assistant._id, }, update: { $set: { searchTokens: generateSearchTokens(assistant.name), }, }, }, }); if (ops.length >= 1000) { process.stdout.write("."); await assistants.bulkWrite(ops, { ordered: false }); ops = []; } } if (ops.length) { await assistants.bulkWrite(ops, { ordered: false }); } return true; }, down: async () => { const { assistants } = collections; await assistants.updateMany({}, { $unset: { searchTokens: "" } }); return true; }, }; export default migration;
chat-ui/src/lib/migrations/routines/01-update-search-assistants.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/01-update-search-assistants.ts", "repo_id": "chat-ui", "token_count": 483 }
79
import { Elysia, t } from "elysia"; import { authPlugin } from "$api/authPlugin"; import { collections } from "$lib/server/database"; import { ObjectId, type Filter } from "mongodb"; import { authCondition } from "$lib/server/auth"; import { SortKey, type Assistant } from "$lib/types/Assistant"; import type { User } from "$lib/types/User"; import { ReviewStatus } from "$lib/types/Review"; import { generateQueryTokens } from "$lib/utils/searchTokens"; import { config } from "$lib/server/config"; const NUM_PER_PAGE = 24; export const assistantGroup = new Elysia().use(authPlugin).group("/assistants", (app) => { return app .get("/", () => { // todo: get assistants throw new Error("Not implemented"); }) .post("/", () => { // todo: post new assistant throw new Error("Not implemented"); }) .get( "/search", async ({ query, locals, error }) => { if (!config.ENABLE_ASSISTANTS) { error(403, "Assistants are not enabled"); } const modelId = query.modelId; const pageIndex = query.p ?? 0; const username = query.user; const search = query.q?.trim() ?? null; const sort = query.sort ?? SortKey.TRENDING; const showUnfeatured = query.showUnfeatured ?? false; const createdByCurrentUser = locals.user?.username && locals.user.username === username; let user: Pick<User, "_id"> | null = null; if (username) { user = await collections.users.findOne<Pick<User, "_id">>( { username }, { projection: { _id: 1 } } ); if (!user) { error(404, `User "${username}" doesn't exist`); } } // if we require featured assistants, that we are not on a user page and we are not an admin who wants to see unfeatured assistants, we show featured assistants let shouldBeFeatured = {}; if (config.REQUIRE_FEATURED_ASSISTANTS === "true" && !(locals.isAdmin && showUnfeatured)) { if (!user) { // only show featured assistants on the community page shouldBeFeatured = { review: ReviewStatus.APPROVED }; } else if (!createdByCurrentUser) { // on a user page show assistants that have been approved or are pending shouldBeFeatured = { review: { $in: [ReviewStatus.APPROVED, ReviewStatus.PENDING] } }; } } const noSpecificSearch = !user && !search; // fetch the top assistants sorted by user count from biggest to smallest. // filter by model too if modelId is provided or query if query is provided // only show assistants that have been used by more than 5 users if no specific search is made const filter: Filter<Assistant> = { ...(modelId && { modelId }), ...(user && { createdById: user._id }), ...(search && { searchTokens: { $all: generateQueryTokens(search) } }), ...(noSpecificSearch && { userCount: { $gte: 5 } }), ...shouldBeFeatured, }; const assistants = await collections.assistants .find(filter) .sort({ ...(sort === SortKey.TRENDING && { last24HoursCount: -1 }), userCount: -1, _id: 1, }) .skip(NUM_PER_PAGE * pageIndex) .limit(NUM_PER_PAGE) .toArray(); const numTotalItems = await collections.assistants.countDocuments(filter); return { assistants, selectedModel: modelId ?? "", numTotalItems, numItemsPerPage: NUM_PER_PAGE, query: search, sort, showUnfeatured, }; }, { query: t.Object({ user: t.Optional(t.String()), q: t.Optional(t.String()), sort: t.Optional(t.Enum(SortKey)), p: t.Optional(t.Numeric()), showUnfeatured: t.Optional(t.Boolean()), modelId: t.Optional(t.String()), }), } ) .group("/:id", (app) => { return app .derive(async ({ params, error }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.id), }); if (!assistant) { return error(404, "Assistant not found"); } return { assistant }; }) .get("", ({ assistant }) => { return assistant; }) .patch("", () => { // todo: patch assistant throw new Error("Not implemented"); }) .delete("/", () => { // todo: delete assistant throw new Error("Not implemented"); }) .post("/report", () => { // todo: report assistant throw new Error("Not implemented"); }) .patch("/review", () => { // todo: review assistant throw new Error("Not implemented"); }) .post("/follow", async ({ locals, assistant }) => { const result = await collections.settings.updateOne(authCondition(locals), { $addToSet: { assistants: assistant._id }, $set: { activeModel: assistant._id.toString() }, }); if (result.modifiedCount > 0) { await collections.assistants.updateOne( { _id: assistant._id }, { $inc: { userCount: 1 } } ); } return { message: "Assistant subscribed" }; }) .delete("/follow", async ({ locals, assistant }) => { const result = await collections.settings.updateOne(authCondition(locals), { $pull: { assistants: assistant._id }, }); if (result.modifiedCount > 0) { await collections.assistants.updateOne( { _id: assistant._id }, { $inc: { userCount: -1 } } ); } return { message: "Assistant unsubscribed" }; }); }); });
chat-ui/src/lib/server/api/routes/groups/assistants.ts/0
{ "file_path": "chat-ui/src/lib/server/api/routes/groups/assistants.ts", "repo_id": "chat-ui", "token_count": 2132 }
80
import { z } from "zod"; import type { Endpoint } from "../endpoints"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import { createImageProcessorOptionsValidator } from "../images"; import { endpointMessagesToAnthropicMessages } from "./utils"; import type { MessageParam } from "@anthropic-ai/sdk/resources/messages.mjs"; export const endpointAnthropicVertexParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("anthropic-vertex"), region: z.string().default("us-central1"), projectId: z.string(), defaultHeaders: z.record(z.string()).optional(), defaultQuery: z.record(z.string()).optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"], preferredMimeType: "image/webp", // The 4 / 3 compensates for the 33% increase in size when converting to base64 maxSizeInMB: (5 / 4) * 3, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), }); export async function endpointAnthropicVertex( input: z.input<typeof endpointAnthropicVertexParametersSchema> ): Promise<Endpoint> { const { region, projectId, model, defaultHeaders, defaultQuery, multimodal } = endpointAnthropicVertexParametersSchema.parse(input); let AnthropicVertex; try { AnthropicVertex = (await import("@anthropic-ai/vertex-sdk")).AnthropicVertex; } catch (e) { throw new Error("Failed to import @anthropic-ai/vertex-sdk", { cause: e }); } const anthropic = new AnthropicVertex({ baseURL: `https://${region}-aiplatform.googleapis.com/v1`, region, projectId, defaultHeaders, defaultQuery, }); return async ({ messages, preprompt }) => { let system = preprompt; if (messages?.[0]?.from === "system") { system = messages[0].content; } let tokenId = 0; return (async function* () { const stream = anthropic.messages.stream({ model: model.id ?? model.name, messages: (await endpointMessagesToAnthropicMessages( messages, multimodal )) as MessageParam[], max_tokens: model.parameters?.max_new_tokens, temperature: model.parameters?.temperature, top_p: model.parameters?.top_p, top_k: model.parameters?.top_k, stop_sequences: model.parameters?.stop, system, }); while (true) { const result = await Promise.race([stream.emitted("text"), stream.emitted("end")]); // Stream end if (result === undefined) { yield { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: await stream.finalText(), details: null, } satisfies TextGenerationStreamOutput; return; } // Text delta yield { token: { id: tokenId++, text: result as unknown as string, special: false, logprob: 0, }, generated_text: null, details: null, } satisfies TextGenerationStreamOutput; } })(); }; }
chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropicVertex.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropicVertex.ts", "repo_id": "chat-ui", "token_count": 1193 }
81
import { z } from "zod"; import { openAICompletionToTextGenerationStream } from "./openAICompletionToTextGenerationStream"; import { openAIChatToTextGenerationSingle, openAIChatToTextGenerationStream, } from "./openAIChatToTextGenerationStream"; import type { CompletionCreateParamsStreaming } from "openai/resources/completions"; import type { ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, ChatCompletionTool, } from "openai/resources/chat/completions"; import type { FunctionDefinition, FunctionParameters } from "openai/resources/shared"; import { buildPrompt } from "$lib/buildPrompt"; import { config } from "$lib/server/config"; import type { Endpoint } from "../endpoints"; import type OpenAI from "openai"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import type { MessageFile } from "$lib/types/Message"; import { type Tool } from "$lib/types/Tool"; import type { EndpointMessage } from "../endpoints"; import { v4 as uuidv4 } from "uuid"; function createChatCompletionToolsArray(tools: Tool[] | undefined): ChatCompletionTool[] { const toolChoices = [] as ChatCompletionTool[]; if (tools === undefined) { return toolChoices; } for (const t of tools) { const requiredProperties = [] as string[]; const properties = {} as Record<string, unknown>; for (const idx in t.inputs) { const parameterDefinition = t.inputs[idx]; const parameter = {} as Record<string, unknown>; switch (parameterDefinition.type) { case "str": parameter.type = "string"; break; case "float": case "int": parameter.type = "number"; break; case "bool": parameter.type = "boolean"; break; case "file": throw new Error("File type's currently not supported"); default: throw new Error(`Unknown tool IO type: ${t}`); } if ("description" in parameterDefinition) { parameter.description = parameterDefinition.description; } if (parameterDefinition.paramType == "required") { requiredProperties.push(t.inputs[idx].name); } properties[t.inputs[idx].name] = parameter; } const functionParameters: FunctionParameters = { type: "object", ...(requiredProperties.length > 0 ? { required: requiredProperties } : {}), properties, }; const functionDefinition: FunctionDefinition = { name: t.name, description: t.description, parameters: functionParameters, }; const toolDefinition: ChatCompletionTool = { type: "function", function: functionDefinition, }; toolChoices.push(toolDefinition); } return toolChoices; } export const endpointOAIParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("openai"), baseURL: z.string().url().default("https://api.openai.com/v1"), apiKey: z.string().default(config.OPENAI_API_KEY || config.HF_TOKEN || "sk-"), completion: z .union([z.literal("completions"), z.literal("chat_completions")]) .default("chat_completions"), defaultHeaders: z.record(z.string()).optional(), defaultQuery: z.record(z.string()).optional(), extraBody: z.record(z.any()).optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: [ "image/png", "image/jpeg", "image/webp", "image/avif", "image/tiff", "image/gif", ], preferredMimeType: "image/webp", maxSizeInMB: Infinity, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), /* enable use of max_completion_tokens in place of max_tokens */ useCompletionTokens: z.boolean().default(false), streamingSupported: z.boolean().default(true), }); export async function endpointOai( input: z.input<typeof endpointOAIParametersSchema> ): Promise<Endpoint> { const { baseURL, apiKey, completion, model, defaultHeaders, defaultQuery, multimodal, extraBody, useCompletionTokens, streamingSupported, } = endpointOAIParametersSchema.parse(input); let OpenAI; try { OpenAI = (await import("openai")).OpenAI; } catch (e) { throw new Error("Failed to import OpenAI", { cause: e }); } const openai = new OpenAI({ apiKey: apiKey || "sk-", baseURL, defaultHeaders, defaultQuery, }); const imageProcessor = makeImageProcessor(multimodal.image); if (completion === "completions") { if (model.tools) { throw new Error( "Tools are not supported for 'completions' mode, switch to 'chat_completions' instead" ); } return async ({ messages, preprompt, continueMessage, generateSettings, conversationId }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const parameters = { ...model.parameters, ...generateSettings }; const body: CompletionCreateParamsStreaming = { model: model.id ?? model.name, prompt, stream: true, max_tokens: parameters?.max_new_tokens, stop: parameters?.stop, temperature: parameters?.temperature, top_p: parameters?.top_p, frequency_penalty: parameters?.repetition_penalty, presence_penalty: parameters?.presence_penalty, }; const openAICompletion = await openai.completions.create(body, { body: { ...body, ...extraBody }, headers: { "ChatUI-Conversation-ID": conversationId?.toString() ?? "", "X-use-cache": "false", }, }); return openAICompletionToTextGenerationStream(openAICompletion); }; } else if (completion === "chat_completions") { return async ({ messages, preprompt, generateSettings, tools, toolResults, conversationId, }) => { // Format messages for the chat API, handling multimodal content if supported let messagesOpenAI: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = await prepareMessages(messages, imageProcessor, !model.tools && model.multimodal); // Check if a system message already exists as the first message const hasSystemMessage = messagesOpenAI.length > 0 && messagesOpenAI[0]?.role === "system"; if (hasSystemMessage) { // System message exists - preserve user configuration if (preprompt !== undefined) { // Prepend preprompt to existing system message if preprompt exists const userSystemPrompt = messagesOpenAI[0].content || ""; messagesOpenAI[0].content = preprompt + (userSystemPrompt ? "\n\n" + userSystemPrompt : ""); } // If no preprompt, user's system message remains unchanged } else { // No system message exists - create a new one with preprompt or empty string messagesOpenAI = [{ role: "system", content: preprompt ?? "" }, ...messagesOpenAI]; } // Handle models that don't support system role by converting to user message // This maintains compatibility with older or non-standard models if ( !model.systemRoleSupported && messagesOpenAI.length > 0 && messagesOpenAI[0]?.role === "system" ) { messagesOpenAI[0] = { ...messagesOpenAI[0], role: "user", }; } // Format tool results for the API to provide context for follow-up tool calls // This creates the full conversation flow needed for multi-step tool interactions if (toolResults && toolResults.length > 0) { const toolCallRequests: OpenAI.Chat.Completions.ChatCompletionAssistantMessageParam = { role: "assistant", content: null, tool_calls: [], }; const responses: Array<OpenAI.Chat.Completions.ChatCompletionToolMessageParam> = []; for (const result of toolResults) { const id = result?.call?.toolId || uuidv4(); const toolCallResult: OpenAI.Chat.Completions.ChatCompletionMessageToolCall = { type: "function", function: { name: result.call.name, arguments: JSON.stringify(result.call.parameters), }, id, }; toolCallRequests.tool_calls?.push(toolCallResult); const toolCallResponse: OpenAI.Chat.Completions.ChatCompletionToolMessageParam = { role: "tool", content: "", tool_call_id: id, }; if ("outputs" in result) { toolCallResponse.content = JSON.stringify(result.outputs); } responses.push(toolCallResponse); } messagesOpenAI.push(toolCallRequests); messagesOpenAI.push(...responses); } // Combine model defaults with request-specific parameters const parameters = { ...model.parameters, ...generateSettings }; const toolCallChoices = createChatCompletionToolsArray(tools); const body = { model: model.id ?? model.name, messages: messagesOpenAI, stream: streamingSupported, // Support two different ways of specifying token limits depending on the model ...(useCompletionTokens ? { max_completion_tokens: parameters?.max_new_tokens } : { max_tokens: parameters?.max_new_tokens }), stop: parameters?.stop, temperature: parameters?.temperature, top_p: parameters?.top_p, frequency_penalty: parameters?.repetition_penalty, presence_penalty: parameters?.presence_penalty, // Only include tool configuration if tools are provided ...(toolCallChoices.length > 0 ? { tools: toolCallChoices, tool_choice: "auto" } : {}), }; // Handle both streaming and non-streaming responses with appropriate processors if (streamingSupported) { const openChatAICompletion = await openai.chat.completions.create( body as ChatCompletionCreateParamsStreaming, { body: { ...body, ...extraBody }, headers: { "ChatUI-Conversation-ID": conversationId?.toString() ?? "", "X-use-cache": "false", }, } ); return openAIChatToTextGenerationStream(openChatAICompletion); } else { const openChatAICompletion = await openai.chat.completions.create( body as ChatCompletionCreateParamsNonStreaming, { body: { ...body, ...extraBody }, headers: { "ChatUI-Conversation-ID": conversationId?.toString() ?? "", "X-use-cache": "false", }, } ); return openAIChatToTextGenerationSingle(openChatAICompletion); } }; } else { throw new Error("Invalid completion type"); } } async function prepareMessages( messages: EndpointMessage[], imageProcessor: ReturnType<typeof makeImageProcessor>, isMultimodal: boolean ): Promise<OpenAI.Chat.Completions.ChatCompletionMessageParam[]> { return Promise.all( messages.map(async (message) => { if (message.from === "user" && isMultimodal) { return { role: message.from, content: [ ...(await prepareFiles(imageProcessor, message.files ?? [])), { type: "text", text: message.content }, ], }; } return { role: message.from, content: message.content, }; }) ); } async function prepareFiles( imageProcessor: ReturnType<typeof makeImageProcessor>, files: MessageFile[] ): Promise<OpenAI.Chat.Completions.ChatCompletionContentPartImage[]> { const processedFiles = await Promise.all( files.filter((file) => file.mime.startsWith("image/")).map(imageProcessor) ); return processedFiles.map((file) => ({ type: "image_url" as const, image_url: { url: `data:${file.mime};base64,${file.image.toString("base64")}`, }, })); }
chat-ui/src/lib/server/endpoints/openai/endpointOai.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/openai/endpointOai.ts", "repo_id": "chat-ui", "token_count": 4166 }
82
import type { ProcessedModel } from "../models"; import type { Endpoint } from "../endpoints/endpoints"; import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import type { Assistant } from "$lib/types/Assistant"; export interface TextGenerationContext { model: ProcessedModel; endpoint: Endpoint; conv: Conversation; messages: Message[]; assistant?: Pick<Assistant, "rag" | "dynamicPrompt" | "generateSettings" | "tools">; isContinue: boolean; webSearch: boolean; toolsPreference: Array<string>; promptedAt: Date; ip: string; username?: string; }
chat-ui/src/lib/server/textGeneration/types.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/types.ts", "repo_id": "chat-ui", "token_count": 190 }
83
import { sentences as splitBySentences } from "sbd"; import { MarkdownElementType, type MarkdownElement } from "../types"; export function chunkElements(elements: MarkdownElement[], maxLength: number): MarkdownElement[] { return elements.flatMap((elem) => { // Can't split headers because it would break the tree, and this situation should be rare // so we just cut off the end if (elem.type === MarkdownElementType.Header) { return { ...elem, content: elem.content.slice(0, maxLength) }; } const contentChunks = enforceMaxLength(elem.content, maxLength); return contentChunks.map<MarkdownElement>((content) => ({ ...elem, content })); }); } const delimitersByPriority = ["?", "!", ".", ";", ":", ",", "|", " - ", " ", "-"]; function enforceMaxLength(text: string, maxLength: number): string[] { if (text.length <= maxLength) return [text].filter(Boolean); return splitBySentences(text) .flatMap((sentence) => { if (sentence.length <= maxLength) return sentence; // Discover all necessary split points to fit the sentence within the max length const indices: [number, number][] = []; while ((indices.at(-1)?.[1] ?? 0) < sentence.length) { const prevIndex = indices.at(-1)?.[1] ?? 0; // Remaining text fits within maxLength if (prevIndex + maxLength >= sentence.length) { indices.push([prevIndex, sentence.length]); continue; } const bestDelimiter = delimitersByPriority.find( (delimiter) => sentence.lastIndexOf(delimiter, prevIndex + maxLength) !== -1 ); // Fallback in the unusual case that no delimiter is found if (!bestDelimiter) { indices.push([prevIndex, prevIndex + maxLength]); continue; } const closestDelimiter = sentence.lastIndexOf(bestDelimiter, prevIndex + maxLength); indices.push([prevIndex, Math.max(prevIndex + 1, closestDelimiter)]); } return indices.map((sliceIndices) => sentence.slice(...sliceIndices)); }) .reduce<string[]>( (chunks, sentence) => { const lastChunk = chunks[chunks.length - 1]; if (lastChunk.length + sentence.length <= maxLength) { return [...chunks.slice(0, -1), lastChunk + sentence]; } return [...chunks, sentence]; }, [""] ) .filter(Boolean); }
chat-ui/src/lib/server/websearch/markdown/utils/chunk.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/utils/chunk.ts", "repo_id": "chat-ui", "token_count": 801 }
84
import { config } from "$lib/server/config"; import { isURL } from "$lib/utils/isUrl"; import type { WebSearchSource } from "$lib/types/WebSearch"; interface YouWebSearch { hits: YouSearchHit[]; latency: number; } interface YouSearchHit { url: string; title: string; description: string; snippets: string[]; } export default async function searchWebYouApi(query: string): Promise<WebSearchSource[]> { const response = await fetch(`https://api.ydc-index.io/search?query=${query}`, { method: "GET", headers: { "X-API-Key": config.YDC_API_KEY, "Content-type": "application/json; charset=UTF-8", }, }); if (!response.ok) { throw new Error(`You.com API returned error code ${response.status} - ${response.statusText}`); } const data = (await response.json()) as YouWebSearch; const formattedResultsWithSnippets = data.hits .filter(({ url }) => isURL(url)) .map(({ title, url, snippets }) => ({ title, link: url, text: snippets?.join("\n") || "", })) .sort((a, b) => b.text.length - a.text.length); // desc order by text length return formattedResultsWithSnippets; }
chat-ui/src/lib/server/websearch/search/endpoints/youApi.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/youApi.ts", "repo_id": "chat-ui", "token_count": 401 }
85
export interface ConfigKey { key: string; // unique value: string; }
chat-ui/src/lib/types/ConfigKey.ts/0
{ "file_path": "chat-ui/src/lib/types/ConfigKey.ts", "repo_id": "chat-ui", "token_count": 22 }
86
export interface Timestamps { createdAt: Date; updatedAt: Date; }
chat-ui/src/lib/types/Timestamps.ts/0
{ "file_path": "chat-ui/src/lib/types/Timestamps.ts", "repo_id": "chat-ui", "token_count": 23 }
87
import type { Model } from "$lib/types/Model"; import { AutoTokenizer, PreTrainedTokenizer } from "@huggingface/transformers"; export async function getTokenizer(_modelTokenizer: Exclude<Model["tokenizer"], undefined>) { if (typeof _modelTokenizer === "string") { // return auto tokenizer return await AutoTokenizer.from_pretrained(_modelTokenizer); } else { // construct & return pretrained tokenizer const { tokenizerUrl, tokenizerConfigUrl } = _modelTokenizer satisfies { tokenizerUrl: string; tokenizerConfigUrl: string; }; const tokenizerJSON = await (await fetch(tokenizerUrl)).json(); const tokenizerConfig = await (await fetch(tokenizerConfigUrl)).json(); return new PreTrainedTokenizer(tokenizerJSON, tokenizerConfig); } }
chat-ui/src/lib/utils/getTokenizer.ts/0
{ "file_path": "chat-ui/src/lib/utils/getTokenizer.ts", "repo_id": "chat-ui", "token_count": 229 }
88
export function sum(nums: number[]): number { return nums.reduce((a, b) => a + b, 0); }
chat-ui/src/lib/utils/sum.ts/0
{ "file_path": "chat-ui/src/lib/utils/sum.ts", "repo_id": "chat-ui", "token_count": 35 }
89
export type TreeId = string; export type Tree<T> = { rootMessageId?: TreeId; messages: TreeNode<T>[]; }; export type TreeNode<T> = T & { id: TreeId; ancestors?: TreeId[]; children?: TreeId[]; }; export type NewNode<T> = Omit<TreeNode<T>, "id">;
chat-ui/src/lib/utils/tree/tree.d.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/tree.d.ts", "repo_id": "chat-ui", "token_count": 102 }
90
import { collections } from "$lib/server/database"; import type { Assistant } from "$lib/types/Assistant"; import type { User } from "$lib/types/User"; import { generateQueryTokens } from "$lib/utils/searchTokens.js"; import type { Filter } from "mongodb"; import { config } from "$lib/server/config"; import { ReviewStatus } from "$lib/types/Review"; const NUM_PER_PAGE = 24; export async function GET({ url, locals }) { const modelId = url.searchParams.get("modelId"); const pageIndex = parseInt(url.searchParams.get("p") ?? "0"); const username = url.searchParams.get("user"); const query = url.searchParams.get("q")?.trim() ?? null; const showUnfeatured = url.searchParams.get("showUnfeatured") === "true"; const createdByCurrentUser = locals.user?.username && locals.user.username === username; let user: Pick<User, "_id"> | null = null; if (username) { user = await collections.users.findOne<Pick<User, "_id">>( { username }, { projection: { _id: 1 } } ); if (!user) { return Response.json({ message: `User "${username}" doesn't exist` }, { status: 404 }); } } // if we require featured assistants, that we are not on a user page and we are not an admin who wants to see unfeatured assistants, we show featured assistants let shouldBeFeatured = {}; if (config.REQUIRE_FEATURED_ASSISTANTS === "true" && !(locals.isAdmin && showUnfeatured)) { if (!user) { // only show featured assistants on the community page shouldBeFeatured = { review: ReviewStatus.APPROVED }; } else if (!createdByCurrentUser) { // on a user page show assistants that have been approved or are pending shouldBeFeatured = { review: { $in: [ReviewStatus.APPROVED, ReviewStatus.PENDING] } }; } } // fetch the top assistants sorted by user count from biggest to smallest, filter out all assistants with only 1 users. filter by model too if modelId is provided const filter: Filter<Assistant> = { ...(modelId && { modelId }), ...(user && { createdById: user._id }), ...(query && { searchTokens: { $all: generateQueryTokens(query) } }), ...shouldBeFeatured, }; const assistants = await collections.assistants .find(filter) .skip(NUM_PER_PAGE * pageIndex) .sort({ userCount: -1 }) .limit(NUM_PER_PAGE) .toArray(); const numTotalItems = await collections.assistants.countDocuments(filter); return Response.json({ assistants, selectedModel: modelId ?? "", numTotalItems, numItemsPerPage: NUM_PER_PAGE, query, }); }
chat-ui/src/routes/api/assistants/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/assistants/+server.ts", "repo_id": "chat-ui", "token_count": 803 }
91
import ChatThumbnail from "./ChatThumbnail.svelte"; import { collections } from "$lib/server/database"; import { error, type RequestHandler } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { render } from "svelte/server"; import { Resvg } from "@resvg/resvg-js"; import satori from "satori"; import { html } from "satori-html"; import InterRegular from "$lib/server/fonts/Inter-Regular.ttf"; import InterBold from "$lib/server/fonts/Inter-Bold.ttf"; import sharp from "sharp"; export const GET: RequestHandler = (async ({ params }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { error(404, "Assistant not found."); } let avatar = ""; const fileId = collections.bucket.find({ filename: assistant._id.toString() }); const file = await fileId.next(); if (file) { avatar = await (async () => { const fileStream = collections.bucket.openDownloadStream(file?._id); const fileBuffer = await new Promise<Buffer>((resolve, reject) => { const chunks: Uint8Array[] = []; fileStream.on("data", (chunk) => chunks.push(chunk)); fileStream.on("error", reject); fileStream.on("end", () => resolve(Buffer.concat(chunks))); }); return fileBuffer; })() .then(async (buf) => sharp(buf).jpeg().toBuffer()) // convert to jpeg bc satori png is really slow .then(async (buf) => "data:image/jpeg;base64," + buf.toString("base64")); } const renderedComponent = render(ChatThumbnail, { props: { name: assistant.name, description: assistant.description, createdByName: assistant.createdByName, avatar, }, }); const reactLike = html("<style>" + renderedComponent.head + "</style>" + renderedComponent.body); const svg = await satori(reactLike, { width: 1200, height: 648, fonts: [ { name: "Inter", data: InterRegular as unknown as ArrayBuffer, weight: 500, }, { name: "Inter", data: InterBold as unknown as ArrayBuffer, weight: 700, }, ], }); const png = new Resvg(svg, { fitTo: { mode: "original" }, }) .render() .asPng(); return new Response(png, { headers: { "Content-Type": "image/png", }, }); }) satisfies RequestHandler;
chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/+server.ts/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/+server.ts", "repo_id": "chat-ui", "token_count": 824 }
92
import { assert, it, describe, afterEach, vi, expect } from "vitest"; import type { Cookies } from "@sveltejs/kit"; import { collections } from "$lib/server/database"; import { updateUser } from "./updateUser"; import { ObjectId } from "mongodb"; import { DEFAULT_SETTINGS } from "$lib/types/Settings"; import { defaultModel } from "$lib/server/models"; import { findUser } from "$lib/server/auth"; import { defaultEmbeddingModel } from "$lib/server/embeddingModels"; const userData = { preferred_username: "new-username", name: "name", picture: "https://example.com/avatar.png", sub: "1234567890", }; Object.freeze(userData); const locals = { userId: "1234567890", sessionId: "1234567890", isAdmin: false, }; // @ts-expect-error SvelteKit cookies dumb mock const cookiesMock: Cookies = { set: vi.fn(), }; const insertRandomUser = async () => { const res = await collections.users.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), username: "base-username", name: userData.name, avatarUrl: userData.picture, hfUserId: userData.sub, }); return res.insertedId; }; const insertRandomConversations = async (count: number) => { const res = await collections.conversations.insertMany( new Array(count).fill(0).map(() => ({ _id: new ObjectId(), title: "random title", messages: [], model: defaultModel.id, embeddingModel: defaultEmbeddingModel.id, createdAt: new Date(), updatedAt: new Date(), sessionId: locals.sessionId, })) ); return res.insertedIds; }; describe("login", () => { it("should update user if existing", async () => { await insertRandomUser(); await updateUser({ userData, locals, cookies: cookiesMock }); const existingUser = await collections.users.findOne({ hfUserId: userData.sub }); assert.equal(existingUser?.name, userData.name); expect(cookiesMock.set).toBeCalledTimes(1); }); it("should migrate pre-existing conversations for new user", async () => { const insertedId = await insertRandomUser(); await insertRandomConversations(2); await updateUser({ userData, locals, cookies: cookiesMock }); const conversationCount = await collections.conversations.countDocuments({ userId: insertedId, sessionId: { $exists: false }, }); assert.equal(conversationCount, 2); await collections.conversations.deleteMany({ userId: insertedId }); }); it("should create default settings for new user", async () => { await updateUser({ userData, locals, cookies: cookiesMock }); const user = await findUser(locals.sessionId); assert.exists(user); const settings = await collections.settings.findOne({ userId: user?._id }); expect(settings).toMatchObject({ userId: user?._id, updatedAt: expect.any(Date), createdAt: expect.any(Date), ethicsModalAcceptedAt: expect.any(Date), ...DEFAULT_SETTINGS, }); await collections.settings.deleteOne({ userId: user?._id }); }); it("should migrate pre-existing settings for pre-existing user", async () => { const { insertedId } = await collections.settings.insertOne({ sessionId: locals.sessionId, ethicsModalAcceptedAt: new Date(), updatedAt: new Date(), createdAt: new Date(), ...DEFAULT_SETTINGS, shareConversationsWithModelAuthors: false, }); await updateUser({ userData, locals, cookies: cookiesMock }); const settings = await collections.settings.findOne({ _id: insertedId, sessionId: { $exists: false }, }); assert.exists(settings); const user = await collections.users.findOne({ hfUserId: userData.sub }); expect(settings).toMatchObject({ userId: user?._id, updatedAt: expect.any(Date), createdAt: expect.any(Date), ethicsModalAcceptedAt: expect.any(Date), ...DEFAULT_SETTINGS, shareConversationsWithModelAuthors: false, }); await collections.settings.deleteOne({ userId: user?._id }); }); }); afterEach(async () => { await collections.users.deleteMany({ hfUserId: userData.sub }); await collections.sessions.deleteMany({}); locals.userId = "1234567890"; locals.sessionId = "1234567890"; vi.clearAllMocks(); });
chat-ui/src/routes/login/callback/updateUser.spec.ts/0
{ "file_path": "chat-ui/src/routes/login/callback/updateUser.spec.ts", "repo_id": "chat-ui", "token_count": 1415 }
93
<script lang="ts"> import CarbonTrashCan from "~icons/carbon/trash-can"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import { useSettingsStore } from "$lib/stores/settings"; import Switch from "$lib/components/Switch.svelte"; import { goto } from "$app/navigation"; import { error } from "$lib/stores/errors"; import { base } from "$app/paths"; import { page } from "$app/state"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; import { useAPIClient } from "$lib/APIClient"; const publicConfig = usePublicConfig(); let settings = useSettingsStore(); const client = useAPIClient(); </script> <div class="flex w-full flex-col gap-5"> <h2 class="text-center text-xl font-semibold text-gray-800 md:text-left">Application Settings</h2> {#if !!publicConfig.PUBLIC_COMMIT_SHA} <div class="flex flex-col items-start justify-between text-xl font-semibold text-gray-800"> <a href={`https://github.com/huggingface/chat-ui/commit/${publicConfig.PUBLIC_COMMIT_SHA}`} target="_blank" rel="noreferrer" class="text-sm font-light text-gray-500" > Latest deployment <span class="gap-2 font-mono" >{publicConfig.PUBLIC_COMMIT_SHA.slice(0, 7)}</span > </a> </div> {/if} {#if page.data.isAdmin} <p class="text-red-500">You are an admin.</p> {/if} <div class="flex h-full max-w-2xl flex-col gap-2 max-sm:pt-0"> {#if publicConfig.PUBLIC_APP_DATA_SHARING === "1"} <label class="flex items-center"> <Switch name="shareConversationsWithModelAuthors" bind:checked={$settings.shareConversationsWithModelAuthors} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2"> Share conversations with model authors </div> </label> <p class="text-sm text-gray-500"> Sharing your data will help improve the training data and make open models better over time. </p> {/if} <label class="mt-6 flex items-center"> <Switch name="hideEmojiOnSidebar" bind:checked={$settings.hideEmojiOnSidebar} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2 font-semibold"> Hide emoticons in conversation topics <p class="text-sm font-normal text-gray-500"> Emoticons are shown in the sidebar by default, enable this to hide them. </p> </div> </label> <label class="mt-6 flex items-center"> <Switch name="disableStream" bind:checked={$settings.disableStream} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2 font-semibold"> Disable streaming tokens </div> </label> <label class="mt-6 flex items-center"> <Switch name="directPaste" bind:checked={$settings.directPaste} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2 font-semibold"> Paste text directly into chat <p class="text-sm font-normal text-gray-500"> By default, when pasting long text into the chat, we treat it as a plaintext file. Enable this to paste directly into the chat instead. </p> </div> </label> <div class="mt-12 flex flex-col gap-3"> <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions" target="_blank" rel="noreferrer" class="flex items-center underline decoration-gray-300 underline-offset-2 hover:decoration-gray-700" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-sm " /> Share your feedback on HuggingChat</a > {#if publicConfig.isHuggingChat} <a href="{base}/privacy" class="flex items-center underline decoration-gray-300 underline-offset-2 hover:decoration-gray-700" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-sm " /> About & Privacy</a > {/if} <button onclick={async (e) => { e.preventDefault(); confirm("Are you sure you want to delete all conversations?") && client.conversations .delete() .then(async () => { await goto(`${base}/`, { invalidateAll: true }); }) .catch((err) => { console.error(err); $error = err.message; }); }} type="submit" class="flex items-center underline decoration-gray-300 underline-offset-2 hover:decoration-gray-700" ><CarbonTrashCan class="mr-2 inline text-sm text-red-500" />Delete all conversations</button > </div> </div> </div>
chat-ui/src/routes/settings/(nav)/application/+page.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/application/+page.svelte", "repo_id": "chat-ui", "token_count": 1691 }
94
<script lang="ts"> import Modal from "$lib/components/Modal.svelte"; import ToolEdit from "../../ToolEdit.svelte"; let { data } = $props(); </script> <Modal on:close={() => window.history.back()} width="h-[95dvh] w-[90dvw] overflow-hidden rounded-2xl bg-white shadow-2xl outline-none sm:h-[85dvh] xl:w-[1200px] 2xl:h-[75dvh]" closeButton > <ToolEdit tool={data.tool} readonly={!data.tool.createdByMe} on:close={() => { window.history.back(); }} /> </Modal>
chat-ui/src/routes/tools/[toolId]/edit/+page.svelte/0
{ "file_path": "chat-ui/src/routes/tools/[toolId]/edit/+page.svelte", "repo_id": "chat-ui", "token_count": 210 }
95
{ "name": "@reflink/reflink", "version": "0.0.0", "main": "index.js" }
chat-ui/stub/@reflink/reflink/package.json/0
{ "file_path": "chat-ui/stub/@reflink/reflink/package.json", "repo_id": "chat-ui", "token_count": 40 }
96
import json import os import tempfile import datasets from datasets.arrow_writer import ArrowWriter from datasets.features import Array2D from utils import generate_examples, get_duration SHAPE_TEST_1 = (30, 487) SHAPE_TEST_2 = (36, 1024) SPEED_TEST_SHAPE = (100, 100) SPEED_TEST_N_EXAMPLES = 100 DEFAULT_FEATURES = datasets.Features( {"text": Array2D(SHAPE_TEST_1, dtype="float32"), "image": Array2D(SHAPE_TEST_2, dtype="float32")} ) RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def write(my_features, dummy_data, tmp_dir): with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: for key, record in dummy_data: example = my_features.encode_example(record) writer.write(example) num_examples, num_bytes = writer.finalize() @get_duration def read_unformated(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for _ in dataset: pass @get_duration def read_formatted_as_numpy(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for _ in dataset: pass @get_duration def read_batch_unformated(feats, tmp_dir): batch_size = 10 dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_batch_formatted_as_numpy(feats, tmp_dir): batch_size = 10 dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_col_unformated(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for col in feats: _ = dataset[col] @get_duration def read_col_formatted_as_numpy(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for col in feats: _ = dataset[col] def benchmark_array_xd(): times = {} read_functions = ( read_unformated, read_formatted_as_numpy, read_batch_unformated, read_batch_formatted_as_numpy, read_col_unformated, read_col_formatted_as_numpy, ) with tempfile.TemporaryDirectory() as tmp_dir: feats = datasets.Features({"image": Array2D(SPEED_TEST_SHAPE, dtype="float32")}) data = generate_examples(features=feats, num_examples=SPEED_TEST_N_EXAMPLES) times["write_array2d"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_array2d"] = read_func(feats, tmp_dir) with tempfile.TemporaryDirectory() as tmp_dir: # don't use fixed length for fair comparison # feats = datasets.Features( # {"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[1]), SPEED_TEST_SHAPE[0])} # ) feats = datasets.Features({"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))}) data = generate_examples( features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": SPEED_TEST_SHAPE} ) times["write_nested_sequence"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_nested_sequence"] = read_func(feats, tmp_dir) with tempfile.TemporaryDirectory() as tmp_dir: # don't use fixed length for fair comparison # feats = datasets.Features( # {"image": datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1])} # ) feats = datasets.Features({"image": datasets.Sequence(datasets.Value("float32"))}) data = generate_examples( features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": [SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1]]}, ) times["write_flattened_sequence"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_flattened_sequence"] = read_func(feats, tmp_dir) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_array_xd()
datasets/benchmarks/benchmark_array_xd.py/0
{ "file_path": "datasets/benchmarks/benchmark_array_xd.py", "repo_id": "datasets", "token_count": 2176 }
97
- sections: - local: index title: 🤗 Datasets - local: quickstart title: Quickstart - local: installation title: Installation title: Get started - sections: - local: tutorial title: Overview - local: load_hub title: Load a dataset from the Hub - local: access title: Know your dataset - local: use_dataset title: Preprocess - local: create_dataset title: Create a dataset - local: upload_dataset title: Share a dataset to the Hub title: "Tutorials" - sections: - local: how_to title: Overview - sections: - local: loading title: Load - local: process title: Process - local: stream title: Stream - local: use_with_pytorch title: Use with PyTorch - local: use_with_tensorflow title: Use with TensorFlow - local: use_with_numpy title: Use with NumPy - local: use_with_jax title: Use with JAX - local: use_with_pandas title: Use with Pandas - local: use_with_polars title: Use with Polars - local: use_with_pyarrow title: Use with PyArrow - local: use_with_spark title: Use with Spark - local: cache title: Cache management - local: filesystems title: Cloud storage - local: faiss_es title: Search index - local: cli title: CLI - local: troubleshoot title: Troubleshooting title: "General usage" - sections: - local: audio_load title: Load audio data - local: audio_process title: Process audio data - local: audio_dataset title: Create an audio dataset title: "Audio" - sections: - local: image_load title: Load image data - local: image_process title: Process image data - local: image_dataset title: Create an image dataset - local: depth_estimation title: Depth estimation - local: image_classification title: Image classification - local: semantic_segmentation title: Semantic segmentation - local: object_detection title: Object detection - local: video_load title: Load video data - local: video_dataset title: Create a video dataset - local: document_load title: Load document data - local: document_dataset title: Create a document dataset title: "Vision" - sections: - local: nlp_load title: Load text data - local: nlp_process title: Process text data title: "Text" - sections: - local: tabular_load title: Load tabular data title: "Tabular" - sections: - local: share title: Share - local: dataset_card title: Create a dataset card - local: repository_structure title: Structure your repository title: "Dataset repository" title: "How-to guides" - sections: - local: about_arrow title: Datasets 🤝 Arrow - local: about_cache title: The cache - local: about_mapstyle_vs_iterable title: Dataset or IterableDataset - local: about_dataset_features title: Dataset features - local: about_dataset_load title: Build and load - local: about_map_batch title: Batch mapping title: "Conceptual guides" - sections: - local: package_reference/main_classes title: Main classes - local: package_reference/builder_classes title: Builder classes - local: package_reference/loading_methods title: Loading methods - local: package_reference/table_classes title: Table Classes - local: package_reference/utilities title: Utilities title: "Reference"
datasets/docs/source/_toctree.yml/0
{ "file_path": "datasets/docs/source/_toctree.yml", "repo_id": "datasets", "token_count": 1337 }
98
# Create a document dataset This guide will show you how to create a document dataset with `PdfFolder` and some metadata. This is a no-code solution for quickly creating a document dataset with several thousand pdfs. <Tip> You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub. </Tip> ## PdfFolder The `PdfFolder` is a dataset builder designed to quickly load a document dataset with several thousand pdfs without requiring you to write any code. <Tip> 💡 Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `PdfFolder` creates dataset splits based on your dataset repository structure. </Tip> `PdfFolder` automatically infers the class labels of your dataset based on the directory name. Store your dataset in a directory structure like: ``` folder/train/resume/0001.pdf folder/train/resume/0002.pdf folder/train/resume/0003.pdf folder/train/invoice/0001.pdf folder/train/invoice/0002.pdf folder/train/invoice/0003.pdf ``` If the dataset follows the `PdfFolder` structure, then you can load it directly with [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("path/to/folder") ``` This is equivalent to passing `pdffolder` manually in [`load_dataset`] and the directory in `data_dir`: ```py >>> dataset = load_dataset("pdffolder", data_dir="/path/to/folder") ``` You can also use `pdffolder` to load datasets involving multiple splits. To do so, your dataset directory should have the following structure: ``` folder/train/resume/0001.pdf folder/train/resume/0002.pdf folder/test/invoice/0001.pdf folder/test/invoice/0002.pdf ``` <Tip warning={true}> If all PDF files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly. </Tip> If there is additional information you'd like to include about your dataset, like text captions or bounding boxes, add it as a `metadata.csv` file in your folder. This lets you quickly create datasets for different computer vision tasks like text captioning or object detection. You can also use a JSONL file `metadata.jsonl` or a Parquet file `metadata.parquet`. ``` folder/train/metadata.csv folder/train/0001.pdf folder/train/0002.pdf folder/train/0003.pdf ``` Your `metadata.csv` file must have a `file_name` or `*_file_name` field which links PDF files with their metadata: ```csv file_name,additional_feature 0001.pdf,This is a first value of a text feature you added to your pdfs 0002.pdf,This is a second value of a text feature you added to your pdfs 0003.pdf,This is a third value of a text feature you added to your pdfs ``` or using `metadata.jsonl`: ```jsonl {"file_name": "0001.pdf", "additional_feature": "This is a first value of a text feature you added to your pdfs"} {"file_name": "0002.pdf", "additional_feature": "This is a second value of a text feature you added to your pdfs"} {"file_name": "0003.pdf", "additional_feature": "This is a third value of a text feature you added to your pdfs"} ``` Here the `file_name` must be the name of the PDF file next to the metadata file. More generally, it must be the relative path from the directory containing the metadata to the PDF file. It's possible to point to more than one pdf in each row in your dataset, for example if both your input and output are pdfs: ```jsonl {"input_file_name": "0001.pdf", "output_file_name": "0001_output.pdf"} {"input_file_name": "0002.pdf", "output_file_name": "0002_output.pdf"} {"input_file_name": "0003.pdf", "output_file_name": "0003_output.pdf"} ``` You can also define lists of pdfs. In that case you need to name the field `file_names` or `*_file_names`. Here is an example: ```jsonl {"pdfs_file_names": ["0001_part1.pdf", "0001_part2.pdf"], "label": "urgent"} {"pdfs_file_names": ["0002_part1.pdf", "0002_part2.pdf"], "label": "urgent"} {"pdfs_file_names": ["0003_part1.pdf", "0002_part2.pdf"], "label": "normal"} ``` ### OCR (Optical character recognition) OCR datasets have the text contained in a pdf. An example `metadata.csv` may look like: ```csv file_name,text 0001.pdf,Invoice 1234 from 01/01/1970... 0002.pdf,Software Engineer Resume. Education: ... 0003.pdf,Attention is all you need. Abstract. The ... ``` Load the dataset with `PdfFolder`, and it will create a `text` column for the pdf captions: ```py >>> dataset = load_dataset("pdffolder", data_dir="/path/to/folder", split="train") >>> dataset[0]["text"] "Invoice 1234 from 01/01/1970..." ``` ### Upload dataset to the Hub Once you've created a dataset, you can share it to the using `huggingface_hub` for example. Make sure you have the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library installed and you're logged in to your Hugging Face account (see the [Upload with Python tutorial](upload_dataset#upload-with-python) for more details). Upload your dataset with `huggingface_hub.HfApi.upload_folder`: ```py from huggingface_hub import HfApi api = HfApi() api.upload_folder( folder_path="/path/to/local/dataset", repo_id="username/my-cool-dataset", repo_type="dataset", ) ```
datasets/docs/source/document_dataset.mdx/0
{ "file_path": "datasets/docs/source/document_dataset.mdx", "repo_id": "datasets", "token_count": 1662 }
99
# Process text data This guide shows specific methods for processing text datasets. Learn how to: - Tokenize a dataset with [`~Dataset.map`]. - Align dataset labels with label ids for NLI datasets. For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>. ## Map The [`~Dataset.map`] function supports processing batches of examples at once which speeds up tokenization. Load a tokenizer from 🤗 [Transformers](https://huggingface.co/transformers/): ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") ``` Set the `batched` parameter to `True` in the [`~Dataset.map`] function to apply the tokenizer to batches of examples: ```py >>> dataset = dataset.map(lambda examples: tokenizer(examples["text"]), batched=True) >>> dataset[0] {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1, 'input_ids': [101, 1996, 2600, 2003, 16036, 2000, 2022, 1996, 7398, 2301, 1005, 1055, 2047, 1000, 16608, 1000, 1998, 2008, 2002, 1005, 1055, 2183, 2000, 2191, 1037, 17624, 2130, 3618, 2084, 7779, 29058, 8625, 13327, 1010, 3744, 1011, 18856, 19513, 3158, 5477, 4168, 2030, 7112, 16562, 2140, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The [`~Dataset.map`] function converts the returned values to a PyArrow-supported format. But explicitly returning the tensors as NumPy arrays is faster because it is a natively supported PyArrow format. Set `return_tensors="np"` when you tokenize your text: ```py >>> dataset = dataset.map(lambda examples: tokenizer(examples["text"], return_tensors="np"), batched=True) ``` ## Align The [`~Dataset.align_labels_with_mapping`] function aligns a dataset label id with the label name. Not all 🤗 Transformers models follow the prescribed label mapping of the original dataset, especially for NLI datasets. For example, the [MNLI](https://huggingface.co/datasets/glue) dataset uses the following label mapping: ```py >>> label2id = {"entailment": 0, "neutral": 1, "contradiction": 2} ``` To align the dataset label mapping with the mapping used by a model, create a dictionary of the label name and id to align on: ```py >>> label2id = {"contradiction": 0, "neutral": 1, "entailment": 2} ``` Pass the dictionary of the label mappings to the [`~Dataset.align_labels_with_mapping`] function, and the column to align on: ```py >>> from datasets import load_dataset >>> mnli = load_dataset("nyu-mll/glue", "mnli", split="train") >>> mnli_aligned = mnli.align_labels_with_mapping(label2id, "label") ``` You can also use this function to assign a custom mapping of labels to ids.
datasets/docs/source/nlp_process.mdx/0
{ "file_path": "datasets/docs/source/nlp_process.mdx", "repo_id": "datasets", "token_count": 1115 }
100
# Share a dataset to the Hub The [Hub](https://huggingface.co/datasets) is home to an extensive collection of community-curated and popular research datasets. We encourage you to share your dataset to the Hub to help grow the ML community and accelerate progress for everyone. All contributions are welcome; adding a dataset is just a drag and drop away! Start by [creating a Hugging Face Hub account](https://huggingface.co/join) if you don't have one yet. ## Upload with the Hub UI The Hub's web-based interface allows users without any developer experience to upload a dataset. ### Create a repository A repository hosts all your dataset files, including the revision history, making storing more than one dataset version possible. 1. Click on your profile and select **New Dataset** to create a new dataset repository. 2. Pick a name for your dataset, and choose whether it is a public or private dataset. A public dataset is visible to anyone, whereas a private dataset can only be viewed by you or members of your organization. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/create_repo.png"/> </div> ### Upload dataset 1. Once you've created a repository, navigate to the **Files and versions** tab to add a file. Select **Add file** to upload your dataset files. We support many text, audio, and image data extensions such as `.csv`, `.mp3`, and `.jpg` among many others. For text data extensions like `.csv`, `.json`, `.jsonl`, and `.txt`, we recommend compressing them before uploading to the Hub (to `.zip` or `.gz` file extension for example). Text file extensions are not tracked by Git LFS by default, and if they're greater than 10MB, they will not be committed and uploaded. Take a look at the `.gitattributes` file in your repository for a complete list of tracked file extensions. For this tutorial, you can use the following sample `.csv` files since they're small: <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/train.csv" download>train.csv</a>, <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/test.csv" download>test.csv</a>. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/upload_files.png"/> </div> 2. Drag and drop your dataset files and add a brief descriptive commit message. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/commit_files.png"/> </div> 3. After uploading your dataset files, they are stored in your dataset repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/files_stored.png"/> </div> ### Create a Dataset card Adding a Dataset card is super valuable for helping users find your dataset and understand how to use it responsibly. 1. Click on **Create Dataset Card** to create a Dataset card. This button creates a `README.md` file in your repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/dataset_card.png"/> </div> 2. At the top, you'll see the **Metadata UI** with several fields to select from like license, language, and task categories. These are the most important tags for helping users discover your dataset on the Hub. When you select an option from each field, they'll be automatically added to the top of the dataset card. You can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1), which has a complete set of (but not required) tag options like `annotations_creators`, to help you choose the appropriate tags. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/metadata_ui.png"/> </div> 3. Click on the **Import dataset card template** link at the top of the editor to automatically create a dataset card template. Filling out the template is a great way to introduce your dataset to the community and help users understand how to use it. For a detailed example of what a good Dataset card should look like, take a look at the [CNN DailyMail Dataset card](https://huggingface.co/datasets/cnn_dailymail). ### Load dataset Once your dataset is stored on the Hub, anyone can load it with the [`load_dataset`] function: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") ``` ## Upload with Python Users who prefer to upload a dataset programmatically can use the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library. This library allows users to interact with the Hub from Python. 1. Begin by installing the library: ```bash pip install huggingface_hub ``` 2. To upload a dataset on the Hub in Python, you need to log in to your Hugging Face account: ```bash huggingface-cli login ``` 3. Use the [`push_to_hub()`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict.push_to_hub) function to help you add, commit, and push a file to your repository: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") # dataset = dataset.map(...) # do all your processing here >>> dataset.push_to_hub("stevhliu/processed_demo") ``` To set your dataset as private, set the `private` parameter to `True`. This parameter will only work if you are creating a repository for the first time. ```py >>> dataset.push_to_hub("stevhliu/private_processed_demo", private=True) ``` To add a new configuration (or subset) to a dataset or to add a new split (train/validation/test), please refer to the [`Dataset.push_to_hub`] documentation. ### Privacy A private dataset is only accessible by you. Similarly, if you share a dataset within your organization, then members of the organization can also access the dataset. Load a private dataset by providing your authentication token to the `token` parameter: ```py >>> from datasets import load_dataset # Load a private individual dataset >>> dataset = load_dataset("stevhliu/demo", token=True) # Load a private organization dataset >>> dataset = load_dataset("organization/dataset_name", token=True) ``` ## What's next? Congratulations, you've completed the tutorials! 🥳 From here, you can go on to: - Learn more about how to use 🤗 Datasets other functions to [process your dataset](process). - [Stream large datasets](stream) without downloading it locally. - [Define your dataset splits and configurations](repository_structure) and share your dataset with the community. If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
datasets/docs/source/upload_dataset.mdx/0
{ "file_path": "datasets/docs/source/upload_dataset.mdx", "repo_id": "datasets", "token_count": 1999 }
101
# Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Download manager interface.""" import enum import io import multiprocessing import os from datetime import datetime from functools import partial from typing import Optional, Union import fsspec from fsspec.core import url_to_fs from tqdm.contrib.concurrent import thread_map from .. import config from ..utils import tqdm as hf_tqdm from ..utils.file_utils import ( ArchiveIterable, FilesIterable, cached_path, is_relative_path, stack_multiprocessing_download_progress_bars, url_or_path_join, ) from ..utils.info_utils import get_size_checksum_dict from ..utils.logging import get_logger, tqdm from ..utils.py_utils import NestedDataStructure, map_nested from ..utils.track import tracked_str from .download_config import DownloadConfig logger = get_logger(__name__) class DownloadMode(enum.Enum): """`Enum` for how to treat pre-existing downloads and data. The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both raw downloads and the prepared dataset if they exist. The generations modes: | | Downloads | Dataset | |-------------------------------------|-----------|---------| | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse | | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh | | `FORCE_REDOWNLOAD` | Fresh | Fresh | """ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists" REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists" FORCE_REDOWNLOAD = "force_redownload" class DownloadManager: is_streaming = False def __init__( self, dataset_name: Optional[str] = None, data_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, base_path: Optional[str] = None, record_checksums=True, ): """Download manager constructor. Args: data_dir: can be used to specify a manual directory to get the files from. dataset_name (`str`): name of dataset this instance will be used for. If provided, downloads will contain which datasets they were used for. download_config (`DownloadConfig`): to specify the cache directory and other download options base_path (`str`): base path that is used when relative paths are used to download files. This can be a remote url. record_checksums (`bool`, defaults to `True`): Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder. """ self._dataset_name = dataset_name self._data_dir = data_dir self._base_path = base_path or os.path.abspath(".") # To record what is being used: {url: {num_bytes: int, checksum: str}} self._recorded_sizes_checksums: dict[str, dict[str, Optional[Union[int, str]]]] = {} self.record_checksums = record_checksums self.download_config = download_config or DownloadConfig() self.downloaded_paths = {} self.extracted_paths = {} @property def manual_dir(self): return self._data_dir @property def downloaded_size(self): """Returns the total size of downloaded files.""" return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values()) def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure): """Record size/checksum of downloaded files.""" delay = 5 for url, path in hf_tqdm( list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())), delay=delay, desc="Computing checksums", ): # call str to support PathLike objects self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict( path, record_checksum=self.record_checksums ) def download(self, url_or_urls): """Download given URL(s). By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior. Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download. Each URL is a `str`. Returns: `str` or `list` or `dict`: The downloaded paths matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ download_config = self.download_config.copy() download_config.extract_compressed_file = False if download_config.download_desc is None: download_config.download_desc = "Downloading data" download_func = partial(self._download_batched, download_config=download_config) start_time = datetime.now() with stack_multiprocessing_download_progress_bars(): downloaded_path_or_paths = map_nested( download_func, url_or_urls, map_tuple=True, num_proc=download_config.num_proc, desc="Downloading data files", batched=True, batch_size=-1, ) duration = datetime.now() - start_time logger.info(f"Downloading took {duration.total_seconds() // 60} min") url_or_urls = NestedDataStructure(url_or_urls) downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) start_time = datetime.now() self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) duration = datetime.now() - start_time logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min") return downloaded_path_or_paths.data def _download_batched( self, url_or_filenames: list[str], download_config: DownloadConfig, ) -> list[str]: if len(url_or_filenames) >= 16: download_config = download_config.copy() download_config.disable_tqdm = True download_func = partial(self._download_single, download_config=download_config) fs: fsspec.AbstractFileSystem path = str(url_or_filenames[0]) if is_relative_path(path): # append the relative path to the base_path path = url_or_path_join(self._base_path, path) fs, path = url_to_fs(path, **download_config.storage_options) size = 0 try: size = fs.info(path).get("size", 0) except Exception: pass max_workers = ( config.HF_DATASETS_MULTITHREADING_MAX_WORKERS if size < (20 << 20) else 1 ) # enable multithreading if files are small return thread_map( download_func, url_or_filenames, desc=download_config.download_desc or "Downloading", unit="files", position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1" and multiprocessing.current_process()._identity else None, max_workers=max_workers, tqdm_class=tqdm, ) else: return [ self._download_single(url_or_filename, download_config=download_config) for url_or_filename in url_or_filenames ] def _download_single(self, url_or_filename: str, download_config: DownloadConfig) -> str: url_or_filename = str(url_or_filename) if is_relative_path(url_or_filename): # append the relative path to the base_path url_or_filename = url_or_path_join(self._base_path, url_or_filename) out = cached_path(url_or_filename, download_config=download_config) out = tracked_str(out) out.set_origin(url_or_filename) return out def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): """Iterate over files within an archive. Args: path_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(path_or_buf, "read"): return ArchiveIterable.from_buf(path_or_buf) else: return ArchiveIterable.from_urlpath(path_or_buf) def iter_files(self, paths: Union[str, list[str]]): """Iterate over file paths. Args: paths (`str` or `list` of `str`): Root paths. Yields: `str`: File path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_urlpaths(paths) def extract(self, path_or_paths): """Extract given path(s). Args: path_or_paths (path or `list` or `dict`): Path of file to extract. Each path is a `str`. Returns: extracted_path(s): `str`, The extracted paths matching the given input path_or_paths. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ download_config = self.download_config.copy() download_config.extract_compressed_file = True extract_func = partial(self._download_single, download_config=download_config) extracted_paths = map_nested( extract_func, path_or_paths, num_proc=download_config.num_proc, desc="Extracting data files", ) path_or_paths = NestedDataStructure(path_or_paths) extracted_paths = NestedDataStructure(extracted_paths) self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten()))) return extracted_paths.data def download_and_extract(self, url_or_urls): """Download and extract given `url_or_urls`. Is roughly equivalent to: ``` extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`. Returns: extracted_path(s): `str`, extracted paths of given URL(s). """ return self.extract(self.download(url_or_urls)) def get_recorded_sizes_checksums(self): return self._recorded_sizes_checksums.copy() def delete_extracted_files(self): paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values()) for key, path in list(self.extracted_paths.items()): if path in paths_to_delete and os.path.isfile(path): os.remove(path) del self.extracted_paths[key] def manage_extracted_files(self): if self.download_config.delete_extracted: self.delete_extracted_files()
datasets/src/datasets/download/download_manager.py/0
{ "file_path": "datasets/src/datasets/download/download_manager.py", "repo_id": "datasets", "token_count": 5650 }
102
# Copyright 2021 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib logger = get_logger() DEVICE_MAPPING: Optional[dict] = None class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]): def __init__(self, features=None, device=None, token_per_repo_id=None, **jnp_array_kwargs): super().__init__(features=features, token_per_repo_id=token_per_repo_id) import jax from jaxlib.xla_client import Device if isinstance(device, Device): raise ValueError( f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` " "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) self.device = device if isinstance(device, str) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: DEVICE_MAPPING = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( f"Device with string identifier {self.device} not listed among the available " f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default " f"device: {str(jax.devices()[0])}." ) self.device = str(jax.devices()[0]) self.jnp_array_kwargs = jnp_array_kwargs @staticmethod def _map_devices_to_str() -> dict[str, "jaxlib.xla_extension.Device"]: import jax return {str(device): device for device in jax.devices()} def _consolidate(self, column): import jax import jax.numpy as jnp if isinstance(column, list) and column: if all( isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(column, axis=0) return column def _tensorize(self, value): import jax import jax.numpy as jnp if isinstance(value, (str, bytes, type(None))): return value elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): return value.tolist() default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_x64: default_dtype = {"dtype": jnp.int64} else: default_dtype = {"dtype": jnp.int32} elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": jnp.float32} if config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): value = np.asarray(value) if config.TORCHVISION_AVAILABLE and "torchvision" in sys.modules: from torchvision.io import VideoReader if isinstance(value, VideoReader): return value # TODO(QL): set output to jax arrays ? if config.TORCHCODEC_AVAILABLE and "torchcodec" in sys.modules: from torchcodec.decoders import AudioDecoder, VideoDecoder if isinstance(value, (VideoDecoder, AudioDecoder)): return value # TODO(QL): set output to jax arrays ? # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: DEVICE_MAPPING = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) def _recursive_tensorize(self, data_struct): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) elif isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "jax.Array": column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
datasets/src/datasets/formatting/jax_formatter.py/0
{ "file_path": "datasets/src/datasets/formatting/jax_formatter.py", "repo_id": "datasets", "token_count": 3107 }
103
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class TextDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Text( cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset
datasets/src/datasets/io/text.py/0
{ "file_path": "datasets/src/datasets/io/text.py", "repo_id": "datasets", "token_count": 961 }
104
import copy import os from collections.abc import Iterator from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc from .utils.logging import get_logger if TYPE_CHECKING: from .features.features import Features, FeatureType logger = get_logger(__name__) def inject_arrow_table_documentation(arrow_table_method): def wrapper(fn): fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "") fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table") if hasattr(arrow_table_method, "__annotations__"): fn.__annotations__ = arrow_table_method.__annotations__ return fn return wrapper def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: in_memory_stream = pa.input_stream(filename) opened_stream = pa.ipc.open_stream(in_memory_stream) pa_table = opened_stream.read_all() return pa_table def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: stream = pa.BufferReader(buffer) opened_stream = pa.ipc.open_stream(stream) table = opened_stream.read_all() return table def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader: memory_mapped_stream = pa.memory_map(filename) return pa.ipc.open_stream(memory_mapped_stream) def read_schema_from_file(filename: str) -> pa.Schema: """ Infer arrow table schema from file without loading whole file into memory. Useful especially while having very big files. """ with pa.memory_map(filename) as memory_mapped_stream: schema = pa.ipc.open_stream(memory_mapped_stream).schema return schema def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: opened_stream = _memory_mapped_record_batch_reader_from_file(filename) pa_table = opened_stream.read_all() return pa_table def _deepcopy(x, memo: dict): """deepcopy a regular class instance""" cls = x.__class__ result = cls.__new__(cls) memo[id(x)] = result for k, v in x.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) return result def _interpolation_search(arr: list[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """ i, j = 0, len(arr) - 1 while i < j and arr[i] <= x < arr[j]: k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) if arr[k] <= x < arr[k + 1]: return k elif arr[k] < x: i, j = k + 1, j else: i, j = i, k raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") class IndexedTableMixin: def __init__(self, table: pa.Table): self._schema: pa.Schema = table.schema self._batches: list[pa.RecordBatch] = [ recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0 ] self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64) def fast_gather(self, indices: Union[list[int], np.ndarray]) -> pa.Table: """ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute the binary searches in parallel, highly optimized C """ if not len(indices): raise ValueError("Indices must be non-empty") batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1 return pa.Table.from_batches( [ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1) for batch_idx, i in zip(batch_indices, indices) ], schema=self._schema, ) def fast_slice(self, offset=0, length=None) -> pa.Table: """ Slice the Table using interpolation search. The behavior is the same as `pyarrow.Table.slice` but it's significantly faster. Interpolation search is used to find the start and end indexes of the batches we want to keep. The batches to keep are then concatenated to form the sliced Table. """ if offset < 0: raise IndexError("Offset must be non-negative") elif offset >= self._offsets[-1] or (length is not None and length <= 0): return pa.Table.from_batches([], schema=self._schema) i = _interpolation_search(self._offsets, offset) if length is None or length + offset >= self._offsets[-1]: batches = self._batches[i:] batches[0] = batches[0].slice(offset - self._offsets[i]) else: j = _interpolation_search(self._offsets, offset + length - 1) batches = self._batches[i : j + 1] batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) batches[0] = batches[0].slice(offset - self._offsets[i]) return pa.Table.from_batches(batches, schema=self._schema) class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ return self.table.to_pylist(*args, **kwargs) def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) @property def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema @property def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns @property def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns @property def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows @property def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape @property def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes @property def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() class TableBlock(Table): """ `TableBlock` is the allowed class inside a `ConcanetationTable`. Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`. This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`. """ pass class InMemoryTable(TableBlock): """ The table is said in-memory when it is loaded into the user's RAM. Pickling it does copy all the data using memory. Its implementation is simple and uses the underlying pyarrow Table methods directly. This is different from the `MemoryMapped` table, for which pickling doesn't copy all the data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ @classmethod def from_file(cls, filename: str): table = _in_memory_arrow_table_from_file(filename) return cls(table) @classmethod def from_buffer(cls, buffer: pa.Buffer): table = _in_memory_arrow_table_from_buffer(buffer) return cls(table) @classmethod def from_pandas(cls, *args, **kwargs): """ Convert pandas.DataFrame to an Arrow Table. The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to null. This behavior can be avoided by constructing an explicit schema and passing it to this function. Args: df (`pandas.DataFrame`): schema (`pyarrow.Schema`, *optional*): The expected schema of the Arrow Table. This can be used to indicate the type of columns if we cannot infer it automatically. If passed, the output will have exactly this schema. Columns specified in the schema that are not found in the DataFrame columns or its index will raise an error. Additional columns or index levels in the DataFrame which are not specified in the schema will be ignored. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting `Table`. The default of None will store the index as a column, except for RangeIndex which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. nthreads (`int`, defaults to `None` (may use up to system CPU count threads)) If greater than 1, convert columns to Arrow in parallel using indicated number of threads. columns (`List[str]`, *optional*): List of column to be converted. If `None`, use all columns. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions, Returns: `datasets.table.Table`: Examples: ```python >>> import pandas as pd >>> import pyarrow as pa >>> df = pd.DataFrame({ ... 'int': [1, 2], ... 'str': ['a', 'b'] ... }) >>> pa.Table.from_pandas(df) <pyarrow.lib.Table object at 0x7f05d1fb1b40> ``` """ return cls(pa.Table.from_pandas(*args, **kwargs)) @classmethod def from_arrays(cls, *args, **kwargs): """ Construct a Table from Arrow arrays. Args: arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`): Equal-length arrays that should form the table. names (`List[str]`, *optional*): Names for the table columns. If not passed, schema must be passed. schema (`Schema`, defaults to `None`): Schema for the created table. If not passed, names must be passed. metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_arrays(*args, **kwargs)) @classmethod def from_pydict(cls, *args, **kwargs): """ Construct a Table from Arrow arrays or columns. Args: mapping (`Union[dict, Mapping]`): A mapping of strings to Arrays or Python lists. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pydict(*args, **kwargs)) @classmethod def from_pylist(cls, mapping, *args, **kwargs): """ Construct a Table from list of rows / dictionaries. Args: mapping (`List[dict]`): A mapping of strings to row values. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pylist(mapping, *args, **kwargs)) @classmethod def from_batches(cls, *args, **kwargs): """ Construct a Table from a sequence or iterator of Arrow `RecordBatches`. Args: batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`): Sequence of `RecordBatch` to be converted, all schemas must be equal. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the first `RecordBatch`. Returns: `datasets.table.Table`: """ return cls(pa.Table.from_batches(*args, **kwargs)) def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ # Use fast slicing here return InMemoryTable(self.fast_slice(offset=offset, length=length)) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ return InMemoryTable(self.table.filter(*args, **kwargs)) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(table_flatten(self.table, *args, **kwargs)) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ return InMemoryTable(table_cast(self.table, *args, **kwargs)) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.add_column(*args, **kwargs)) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.append_column(*args, **kwargs)) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ return InMemoryTable(self.table.remove_column(*args, **kwargs)) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ return InMemoryTable(self.table.set_column(*args, **kwargs)) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ return InMemoryTable(self.table.rename_columns(*args, **kwargs)) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ return InMemoryTable(self.table.drop(*args, **kwargs)) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ return InMemoryTable(self.table.select(*args, **kwargs)) # The MemoryMappedTable needs replays to properly reload tables from the disk Replay = tuple[str, tuple, dict] class MemoryMappedTable(TableBlock): """ The table is said memory mapped when it doesn't use the user's RAM but loads the data from the disk instead. Pickling it doesn't copy the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replay" when reloading the table from the disk. Its implementation requires to store an history of all the transforms that were applied to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table from the disk. This is different from the `InMemoryTable` table, for which pickling does copy all the data in memory. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ def __init__(self, table: pa.Table, path: str, replays: Optional[list[Replay]] = None): super().__init__(table) self.path = os.path.abspath(path) self.replays: list[Replay] = replays if replays is not None else [] @classmethod def from_file(cls, filename: str, replays=None): table = _memory_mapped_arrow_table_from_file(filename) table = cls._apply_replays(table, replays) return cls(table, filename, replays) def __getstate__(self): return {"path": self.path, "replays": self.replays} def __setstate__(self, state): path = state["path"] replays = state["replays"] table = _memory_mapped_arrow_table_from_file(path) table = self._apply_replays(table, replays) MemoryMappedTable.__init__(self, table, path=path, replays=replays) @staticmethod def _apply_replays(table: pa.Table, replays: Optional[list[Replay]] = None) -> pa.Table: if replays is not None: for name, args, kwargs in replays: if name == "cast": table = table_cast(table, *args, **kwargs) elif name == "flatten": table = table_flatten(table, *args, **kwargs) else: table = getattr(table, name)(*args, **kwargs) return table def _append_replay(self, replay: Replay) -> list[Replay]: replays = copy.deepcopy(self.replays) replays.append(replay) return replays def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ replay = ("slice", (offset, length), {}) replays = self._append_replay(replay) # Use fast slicing here return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the ChunkedArray of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) def cast(self, *args, **kwargs): """ Cast table values to another schema Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata. Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays) # A ConcatenationTable is the concatenation of several tables. # The ``blocks`` attributes stores a list of list of blocks. # The first axis concatenates the tables along the axis 0 (it appends rows), # while the second axis concatenates tables along the axis 1 (it appends columns). TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, list[TableBlock], list[list[TableBlock]]) class ConcatenationTable(Table): """ The table comes from the concatenation of several tables called blocks. It enables concatenation on both axis 0 (append rows) and axis 1 (append columns). The underlying tables are called "blocks" and can be either `InMemoryTable` or `MemoryMappedTable` objects. This allows to combine tables that come from memory or that are memory mapped. When a `ConcatenationTable` is pickled, then each block is pickled: - the `InMemoryTable` objects are pickled by copying all the data in memory. - the MemoryMappedTable objects are pickled without copying the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replays" when reloading the table from the disk. Its implementation requires to store each block separately. The `blocks` attributes stores a list of list of blocks. The first axis concatenates the tables along the axis 0 (it appends rows), while the second axis concatenates tables along the axis 1 (it appends columns). If some columns are missing when concatenating on axis 0, they are filled with null values. This is done using `pyarrow.concat_tables(tables, promote=True)`. You can access the fully combined table by accessing the `ConcatenationTable.table` attribute, and the blocks by accessing the `ConcatenationTable.blocks` attribute. """ def __init__(self, table: pa.Table, blocks: list[list[TableBlock]]): super().__init__(table) self.blocks = blocks # Check that all the blocks have the right type. # Only InMemoryTable and MemoryMappedTable are allowed. for subtables in blocks: for subtable in subtables: if not isinstance(subtable, TableBlock): raise TypeError( "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects" f", but got {_short_str(subtable)}." ) def __getstate__(self): return {"blocks": self.blocks, "schema": self.table.schema} def __setstate__(self, state): blocks = state["blocks"] schema = state["schema"] table = self._concat_blocks_horizontally_and_vertically(blocks) if schema is not None and table.schema != schema: # We fix the columns by concatenating with an empty table with the right columns empty_table = pa.Table.from_batches([], schema=schema) # We set promote_options="default" to fill missing columns with null values table = pa.concat_tables([table, empty_table], promote_options="default") ConcatenationTable.__init__(self, table, blocks=blocks) @staticmethod def _concat_blocks(blocks: list[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table: pa_tables = [table.table if hasattr(table, "table") else table for table in blocks] if axis == 0: # We set promote_options="default" to fill missing columns with null values return pa.concat_tables(pa_tables, promote_options="default") elif axis == 1: for i, table in enumerate(pa_tables): if i == 0: pa_table = table else: for name, col in zip(table.column_names, table.columns): pa_table = pa_table.append_column(name, col) return pa_table else: raise ValueError("'axis' must be either 0 or 1") @classmethod def _concat_blocks_horizontally_and_vertically(cls, blocks: list[list[TableBlock]]) -> pa.Table: pa_tables_to_concat_vertically = [] for i, tables in enumerate(blocks): if not tables: continue pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) @classmethod def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer: if axis is not None: merged_blocks = [] for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): if is_in_memory: block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] merged_blocks += list(block_group) else: # both merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] if all(len(row_block) == 1 for row_block in merged_blocks): merged_blocks = cls._merge_blocks( [block for row_block in merged_blocks for block in row_block], axis=0 ) return merged_blocks @classmethod def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: if isinstance(blocks, TableBlock): return blocks elif isinstance(blocks[0], TableBlock): return cls._merge_blocks(blocks, axis=0) else: return cls._merge_blocks(blocks) @classmethod def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable": blocks = cls._consolidate_blocks(blocks) if isinstance(blocks, TableBlock): table = blocks return cls(table.table, [[table]]) elif isinstance(blocks[0], TableBlock): table = cls._concat_blocks(blocks, axis=0) blocks = [[t] for t in blocks] return cls(table, blocks) else: table = cls._concat_blocks_horizontally_and_vertically(blocks) return cls(table, blocks) @classmethod def from_tables(cls, tables: list[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable": """Create `ConcatenationTable` from list of tables. Args: tables (list of `Table` or list of `pyarrow.Table`): List of tables. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> """ def to_blocks(table: Union[pa.Table, Table]) -> list[list[TableBlock]]: if isinstance(table, pa.Table): return [[InMemoryTable(table)]] elif isinstance(table, ConcatenationTable): return copy.deepcopy(table.blocks) else: return [[table]] def _slice_row_block(row_block: list[TableBlock], length: int) -> tuple[list[TableBlock], list[TableBlock]]: sliced = [table.slice(0, length) for table in row_block] remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block] return sliced, remainder def _split_both_like( result: list[list[TableBlock]], blocks: list[list[TableBlock]] ) -> tuple[list[list[TableBlock]], list[list[TableBlock]]]: """ Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1. To do so, we modify both blocks sets to have the same row_blocks boundaries. For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows, we modify both to have 4 row_blocks of size 2, 1, 1 and 2: [ x x x | x x x ] + [ y y | y y | y y ] ----------------------------- = [ x x | x | x | x x ] [ y y | y | y | y y ] """ result, blocks = list(result), list(blocks) new_result, new_blocks = [], [] while result and blocks: # we slice the longest row block to save two row blocks of same length # and we replace the long row block by its remainder if necessary if len(result[0][0]) > len(blocks[0][0]): new_blocks.append(blocks[0]) sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0])) new_result.append(sliced) elif len(result[0][0]) < len(blocks[0][0]): new_result.append(result[0]) sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0])) new_blocks.append(sliced) else: new_result.append(result.pop(0)) new_blocks.append(blocks.pop(0)) if result or blocks: raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows") return new_result, new_blocks def _extend_blocks( result: list[list[TableBlock]], blocks: list[list[TableBlock]], axis: int = 0 ) -> list[list[TableBlock]]: if axis == 0: result.extend(blocks) elif axis == 1: # We make sure each row_block have the same num_rows result, blocks = _split_both_like(result, blocks) for i, row_block in enumerate(blocks): result[i].extend(row_block) return result blocks = to_blocks(tables[0]) for table in tables[1:]: table_blocks = to_blocks(table) blocks = _extend_blocks(blocks, table_blocks, axis=axis) return cls.from_blocks(blocks) @property def _slices(self): offset = 0 for tables in self.blocks: length = len(tables[0]) yield (offset, length) offset += length def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ table = self.table.slice(offset, length=length) length = length if length is not None else self.num_rows - offset blocks = [] for tables in self.blocks: n_rows = len(tables[0]) if length == 0: break elif n_rows <= offset: offset = offset - n_rows elif n_rows <= offset + length: blocks.append([t.slice(offset) for t in tables]) length, offset = length + offset - n_rows, 0 else: blocks.append([t.slice(offset, length) for t in tables]) length, offset = 0, 0 return ConcatenationTable(table, blocks) def filter(self, mask, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ table = self.table.filter(mask, *args, **kwargs) blocks = [] for (offset, length), tables in zip(self._slices, self.blocks): submask = mask.slice(offset, length) blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = table_flatten(self.table, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.flatten(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = self.table.combine_chunks(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def cast(self, target_schema, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ from .features import Features table = table_cast(self.table, target_schema, *args, **kwargs) target_features = Features.from_arrow_schema(target_schema) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields}) subschema = subfeatures.arrow_schema new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ table = self.table.replace_schema_metadata(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) return ConcatenationTable(table, self.blocks) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, i, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, names, *args, **kwargs): """ Create new table with columns renamed to provided names. """ table = self.table.rename_columns(names, *args, **kwargs) names = dict(zip(self.table.column_names, names)) blocks = [] for tables in self.blocks: blocks.append( [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables] ) return ConcatenationTable(table, blocks) def drop(self, columns, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ table = self.table.drop(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def select(self, columns, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ table = self.table.select(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def concat_tables(tables: list[Table], axis: int = 0) -> Table: """ Concatenate tables. Args: tables (list of `Table`): List of tables to be concatenated. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Returns: `datasets.table.Table`: If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`. Otherwise if there's only one table, it is returned as is. """ tables = list(tables) if len(tables) == 1: return tables[0] return ConcatenationTable.from_tables(tables, axis=axis) def list_table_cache_files(table: Table) -> list[str]: """ Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: `List[str]`: A list of paths to the cache files loaded by the table. """ if isinstance(table, ConcatenationTable): cache_files = [] for subtables in table.blocks: for subtable in subtables: cache_files += list_table_cache_files(subtable) return cache_files elif isinstance(table, MemoryMappedTable): return [table.path] else: return [] def _wrap_for_chunked_arrays(func): """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly""" def wrapper(array, *args, **kwargs): if isinstance(array, pa.ChunkedArray): return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) else: return func(array, *args, **kwargs) return wrapper def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool: """Check if all the sub-lists of a `pa.ListArray` have the specified length.""" return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array) def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array: """Add the null bitmap to the offsets of a `pa.ListArray`.""" offsets = array.offsets if array.null_count > 0: offsets = pa.concat_arrays( [ pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())), offsets[-1:], ] ) return offsets def _storage_type(type: pa.DataType) -> pa.DataType: """Convert a (possibly nested) `pa.ExtensionType` to its storage type.""" if isinstance(type, pa.ExtensionType): return _storage_type(type.storage_type) elif isinstance(type, pa.StructType): return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type]) elif isinstance(type, pa.ListType): return pa.list_(_storage_type(type.value_type)) elif isinstance(type, pa.FixedSizeListType): return pa.list_(_storage_type(type.value_type), type.list_size) return type def _short_str(value: Any) -> str: out = str(value) if len(out) > 3000: out = out[:1500] + "\n...\n" + out[-1500:] return out @_wrap_for_chunked_arrays def array_cast( array: pa.Array, pa_type: pa.DataType, allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True ) -> Union[pa.Array, pa.FixedSizeListArray, pa.ListArray, pa.StructArray, pa.ExtensionArray]: """Improved version of `pa.Array.cast` It supports casting `pa.StructArray` objects to re-order the fields. It also let you control certain aspects of the casting, e.g. whether to disable casting primitives (`booleans`, `floats` or `ints`) or disable casting decimals to strings. Args: array (`pa.Array`): PyArrow array to cast pa_type (`pa.DataType`): Target PyArrow type allow_primitive_to_str (`bool`, defaults to `True`): Whether to allow casting primitives to strings. Defaults to `True`. allow_decimal_to_str (`bool`, defaults to `True`): Whether to allow casting decimals to strings. Defaults to `True`. Raises: `pa.ArrowInvalidError`: if the arrow data casting fails `TypeError`: if the target type is not supported according, e.g. - if a field is missing - if casting from primitives to strings and `allow_primitive_to_str` is `False` - if casting from decimals to strings and `allow_decimal_to_str` is `False` Returns: `List[pyarrow.Array]`: the casted array """ _c = partial(array_cast, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) if isinstance(array, pa.ExtensionArray): array = array.storage if isinstance(pa_type, pa.ExtensionType): return pa_type.wrap_array(_c(array, pa_type.storage_type)) elif array.type == pa_type: return array elif pa.types.is_struct(array.type): if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}): if array.type.num_fields == 0: return array arrays = [_c(array.field(field.name), field.type) for field in pa_type] return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null()) elif pa.types.is_list(array.type) or pa.types.is_large_list(array.type): if pa.types.is_fixed_size_list(pa_type): if _are_list_values_of_length(array, pa_type.list_size): if array.null_count > 0: # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array array_type = array.type storage_type = _storage_type(array_type) if array_type != storage_type: # Temporarily convert to the storage type to support extension types in the slice operation array = _c(array, storage_type) array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True) array = _c(array, array_type) else: array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True) array_values = array.values return pa.FixedSizeListArray.from_arrays( _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null() ) else: array_values = array.values[ array.offset * pa_type.list_size : (array.offset + len(array)) * pa_type.list_size ] return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size) elif pa.types.is_list(pa_type): # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type)) elif pa.types.is_large_list(pa_type): # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.LargeListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type)) elif pa.types.is_fixed_size_list(array.type): if pa.types.is_fixed_size_list(pa_type): if pa_type.list_size == array.type.list_size: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] return pa.FixedSizeListArray.from_arrays( _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null() ) elif pa.types.is_list(pa_type): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null()) elif pa.types.is_large_list(pa_type): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.LargeListArray.from_arrays( array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null() ) else: if pa.types.is_string(pa_type): if not allow_primitive_to_str and pa.types.is_primitive(array.type): raise TypeError( f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)} " f"since allow_primitive_to_str is set to {allow_primitive_to_str} " ) if not allow_decimal_to_str and pa.types.is_decimal(array.type): raise TypeError( f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)} " f"and allow_decimal_to_str is set to {allow_decimal_to_str}" ) if pa.types.is_null(pa_type) and not pa.types.is_null(array.type): raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}") return array.cast(pa_type) raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}") @_wrap_for_chunked_arrays def cast_array_to_feature( array: pa.Array, feature: "FeatureType", allow_primitive_to_str: bool = True, allow_decimal_to_str: bool = True ) -> pa.Array: """Cast an array to the arrow type that corresponds to the requested feature type. For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods they defined to enable casting from other arrow types. Args: array (`pa.Array`): The PyArrow array to cast. feature (`datasets.features.FeatureType`): The target feature type. allow_primitive_to_str (`bool`, defaults to `True`): Whether to allow casting primitives to strings. Defaults to `True`. allow_decimal_to_str (`bool`, defaults to `True`): Whether to allow casting decimals to strings. Defaults to `True`. Raises: `pa.ArrowInvalidError`: if the arrow data casting fails `TypeError`: if the target type is not supported according, e.g. - if a field is missing - if casting from primitives and `allow_primitive_to_str` is `False` - if casting from decimals and `allow_decimal_to_str` is `False` Returns: array (`pyarrow.Array`): the casted array """ from .features.features import LargeList, List, get_nested_type _c = partial( cast_array_to_feature, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "cast_storage"): return feature.cast_storage(array) if pa.types.is_struct(array.type): # feature must be a dict if isinstance(feature, dict) and (array_fields := {field.name for field in array.type}) <= set(feature): null_array = pa.array([None] * len(array)) arrays = [ _c(array.field(name) if name in array_fields else null_array, subfeature) for name, subfeature in feature.items() ] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type) or pa.types.is_large_list(array.type): # feature must be either List(subfeature) or LargeList(subfeature) if isinstance(feature, LargeList): casted_array_values = _c(array.values, feature.feature) if pa.types.is_large_list(array.type) and casted_array_values.type == array.values.type: # Both array and feature have equal large_list type and values (within the list) type return array else: # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.LargeListArray.from_arrays(array_offsets, casted_array_values) elif isinstance(feature, List): if feature.length > -1: if _are_list_values_of_length(array, feature.length): if array.null_count > 0: # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array array_type = array.type storage_type = _storage_type(array_type) if array_type != storage_type: # Temporarily convert to the storage type to support extension types in the slice operation array = array_cast( array, storage_type, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) array = array_cast( array, array_type, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) else: array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) array_values = array.values casted_array_values = _c(array_values, feature.feature) return pa.FixedSizeListArray.from_arrays( casted_array_values, feature.length, mask=array.is_null() ) else: array_values = array.values[ array.offset * feature.length : (array.offset + len(array)) * feature.length ] return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length) else: casted_array_values = _c(array.values, feature.feature) if pa.types.is_list(array.type) and casted_array_values.type == array.values.type: # Both array and feature have equal list type and values (within the list) type return array else: # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.ListArray.from_arrays(array_offsets, casted_array_values) elif pa.types.is_fixed_size_list(array.type): # feature must be List(subfeature) if isinstance(feature, LargeList): array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.LargeListArray.from_arrays( array_offsets, _c(array.values, feature.feature), mask=array.is_null() ) elif isinstance(feature, List): if feature.length > -1: if feature.length == array.type.list_size: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] casted_array_values = _c(array_values, feature.feature) return pa.FixedSizeListArray.from_arrays(casted_array_values, feature.length, mask=array.is_null()) else: array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null()) if pa.types.is_null(array.type): return array_cast( array, get_nested_type(feature), allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) elif not isinstance(feature, (List, LargeList, dict)): return array_cast( array, feature(), allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str, ) raise TypeError(f"Couldn't cast array of type\n{_short_str(array.type)}\nto\n{_short_str(feature)}") @_wrap_for_chunked_arrays def embed_array_storage(array: pa.Array, feature: "FeatureType", token_per_repo_id=None): """Embed data into an arrays's storage. For custom features like Audio or Image, it takes into account the "embed_storage" methods they define to embed external data (e.g. an image file) into an array. <Added version="2.4.0"/> Args: array (`pa.Array`): The PyArrow array in which to embed data. feature (`datasets.features.FeatureType`): Array features. Raises: `TypeError`: if the target type is not supported according, e.g. - if a field is missing Returns: array (`pyarrow.Array`): the casted array """ from .features import LargeList, List _e = partial(embed_array_storage, token_per_repo_id=token_per_repo_id) if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "embed_storage"): return feature.embed_storage(array, token_per_repo_id=token_per_repo_id) elif pa.types.is_struct(array.type): # feature must be a dict if isinstance(feature, dict): arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either List(subfeature) # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) if isinstance(feature, List) and feature.length == -1: return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature)) elif pa.types.is_large_list(array.type): # feature must be LargeList(subfeature) # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError array_offsets = _combine_list_array_offsets_with_mask(array) return pa.LargeListArray.from_arrays(array_offsets, _e(array.values, feature.feature)) elif pa.types.is_fixed_size_list(array.type): # feature must be List(subfeature) if isinstance(feature, List) and feature.length > -1: array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] embedded_array_values = _e(array_values, feature.feature) return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null()) if not isinstance(feature, (List, LargeList, dict)): return array raise TypeError(f"Couldn't embed array of type\n{_short_str(array.type)}\nwith\n{_short_str(feature)}") class CastError(ValueError): """When it's not possible to cast an Arrow table to a specific schema or set of features""" def __init__(self, *args, table_column_names: list[str], requested_column_names: list[str]) -> None: super().__init__(*args) self.table_column_names = table_column_names self.requested_column_names = requested_column_names def __reduce__(self): # Fix unpickling: TypeError: __init__() missing 2 required keyword-only arguments: 'table_column_names' and 'requested_column_names' return partial( CastError, table_column_names=self.table_column_names, requested_column_names=self.requested_column_names ), () def details(self): new_columns = set(self.table_column_names) - set(self.requested_column_names) missing_columns = set(self.requested_column_names) - set(self.table_column_names) if new_columns and missing_columns: return f"there are {len(new_columns)} new columns ({_short_str(new_columns)}) and {len(missing_columns)} missing columns ({_short_str(missing_columns)})." elif new_columns: return f"there are {len(new_columns)} new columns ({_short_str(new_columns)})" else: return f"there are {len(missing_columns)} missing columns ({_short_str(missing_columns)})" def cast_table_to_features(table: pa.Table, features: "Features"): """Cast a table to the arrow schema that corresponds to the requested features. Args: table (`pyarrow.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: table (`pyarrow.Table`): the casted table """ if sorted(table.column_names) != sorted(features): raise CastError( f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features), ) arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] return pa.Table.from_arrays(arrays, schema=features.arrow_schema) def cast_table_to_schema(table: pa.Table, schema: pa.Schema): """Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability. Args: table (`pa.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: `pa.Table`: the casted table """ from .features import Features features = Features.from_arrow_schema(schema) table_column_names = set(table.column_names) if not table_column_names <= set(schema.names): raise CastError( f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features), ) arrays = [ cast_array_to_feature( table[name] if name in table_column_names else pa.array([None] * len(table), type=schema.field(name).type), feature, ) for name, feature in features.items() ] return pa.Table.from_arrays(arrays, schema=schema) def embed_table_storage(table: pa.Table, token_per_repo_id=None): """Embed external data into a table's storage. <Added version="2.4.0"/> Args: table (`pyarrow.Table`): PyArrow table in which to embed data. Returns: table (`pyarrow.Table`): the table with embedded data """ from .features.features import Features, require_storage_embed features = Features.from_arrow_schema(table.schema) arrays = [ embed_array_storage(table[name], feature, token_per_repo_id=token_per_repo_id) if require_storage_embed(feature) else table[name] for name, feature in features.items() ] return pa.Table.from_arrays(arrays, schema=features.arrow_schema) def table_cast(table: pa.Table, schema: pa.Schema): """Improved version of `pa.Table.cast`. It supports casting to feature types stored in the schema metadata. Args: table (`pyarrow.Table`): PyArrow table to cast. schema (`pyarrow.Schema`): Target PyArrow schema. Returns: table (`pyarrow.Table`): the casted table """ if table.schema != schema: return cast_table_to_schema(table, schema) elif table.schema.metadata != schema.metadata: return table.replace_schema_metadata(schema.metadata) else: return table def table_flatten(table: pa.Table): """Improved version of `pa.Table.flatten`. It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field, but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False. Args: table (`pa.Table`): PyArrow table to flatten. Returns: `Table`: the flattened table """ from .features import Features features = Features.from_arrow_schema(table.schema) if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()): flat_arrays = [] flat_column_names = [] for field in table.schema: array = table.column(field.name) subfeature = features[field.name] if pa.types.is_struct(field.type) and ( not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature ): flat_arrays.extend(array.flatten()) flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type]) else: flat_arrays.append(array) flat_column_names.append(field.name) flat_table = pa.Table.from_arrays( flat_arrays, names=flat_column_names, ) else: flat_table = table.flatten() # Preserve complex types in the metadata flat_features = features.flatten(max_depth=2) flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names}) return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata) def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]): """Visit all arrays in a table and apply a function to them. Args: table (`pyarrow.Table`): PyArrow table to visit. function (`Callable[[pa.Array], None]`): Function to apply to each array. """ from .features import Features, LargeList, List features = Features.from_arrow_schema(table.schema) def _visit(array, feature): if isinstance(array, pa.ChunkedArray): for chunk in array.chunks: _visit(chunk, feature) else: if isinstance(array, pa.ExtensionArray): array = array.storage function(array, feature) if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"): for name, subfeature in feature.items(): _visit(array.field(name), subfeature) elif pa.types.is_list(array.type): if isinstance(feature, (LargeList, List)): _visit(array.values, feature.feature) for name, feature in features.items(): _visit(table[name], feature) def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]: """Iterate over sub-tables of size `batch_size`. Args: table (`pyarrow.Table`): PyArrow table to iterate over. batch_size (`int`): Size of each sub-table to yield. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. """ chunks_buffer = [] chunks_buffer_size = 0 for chunk in table.to_reader(max_chunksize=batch_size): if len(chunk) == 0: continue elif chunks_buffer_size + len(chunk) < batch_size: chunks_buffer.append(chunk) chunks_buffer_size += len(chunk) continue elif chunks_buffer_size + len(chunk) == batch_size: chunks_buffer.append(chunk) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [] chunks_buffer_size = 0 else: cropped_chunk_length = batch_size - chunks_buffer_size chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] chunks_buffer_size = len(chunk) - cropped_chunk_length if not drop_last_batch and chunks_buffer: yield pa.Table.from_batches(chunks_buffer)
datasets/src/datasets/table.py/0
{ "file_path": "datasets/src/datasets/table.py", "repo_id": "datasets", "token_count": 41094 }
105
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Some python utils function and classes.""" import copy import functools import itertools import multiprocessing.pool import os import queue import re import types import warnings from collections.abc import Iterable from contextlib import contextmanager from dataclasses import fields, is_dataclass from multiprocessing import Manager from queue import Empty from shutil import disk_usage from typing import Any, Callable, Optional, TypeVar, Union import multiprocess import multiprocess.pool import numpy as np from tqdm.auto import tqdm from .. import config from ..parallel import parallel_map from . import logging from . import tqdm as hf_tqdm from ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0 Pickler, dump, dumps, pklregister, ) try: # pragma: no branch from typing import Final import typing_extensions as _typing_extensions from typing_extensions import Literal except ImportError: _typing_extensions = Literal = Final = None logger = logging.get_logger(__name__) # NOTE: When used on an instance method, the cache is shared across all # instances and IS NOT per-instance. # See # https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance # For @property methods, use @memoized_property below. memoize = functools.lru_cache def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "Unknown size". For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """ if not size_in_bytes: return "Unknown size" _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)] size_in_bytes = float(size_in_bytes) for name, size_bytes in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return f"{value:.2f} {name}" return f"{int(size_in_bytes)} bytes" def convert_file_size_to_int(size: Union[int, str]) -> int: """ Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ if isinstance(size, int): return size if size.upper().endswith("PIB"): return int(size[:-3]) * (2**50) if size.upper().endswith("TIB"): return int(size[:-3]) * (2**40) if size.upper().endswith("GIB"): return int(size[:-3]) * (2**30) if size.upper().endswith("MIB"): return int(size[:-3]) * (2**20) if size.upper().endswith("KIB"): return int(size[:-3]) * (2**10) if size.upper().endswith("PB"): int_size = int(size[:-2]) * (10**15) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("TB"): int_size = int(size[:-2]) * (10**12) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("GB"): int_size = int(size[:-2]) * (10**9) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("MB"): int_size = int(size[:-2]) * (10**6) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("KB"): int_size = int(size[:-2]) * (10**3) return int_size // 8 if size.endswith("b") else int_size raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") def glob_pattern_to_regex(pattern): # partially taken from fsspec: # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735 return ( pattern.replace("\\", r"\\") .replace(".", r"\.") .replace("*", ".*") .replace("+", r"\+") .replace("//", "/") .replace("(", r"\(") .replace(")", r"\)") .replace("|", r"\|") .replace("^", r"\^") .replace("$", r"\$") .rstrip("/") .replace("?", ".") ) def string_to_dict(string: str, pattern: str) -> Optional[dict[str, str]]: """Un-format a string using a python f-string pattern. From https://stackoverflow.com/a/36838374 Example:: >>> p = 'hello, my name is {name} and I am a {age} year old {what}' >>> s = p.format(name='cody', age=18, what='quarterback') >>> s 'hello, my name is cody and I am a 18 year old quarterback' >>> string_to_dict(s, p) {'age': '18', 'name': 'cody', 'what': 'quarterback'} Args: string (str): input string pattern (str): pattern formatted like a python f-string This can be a regex - so in case of un-formatting paths you should use posix paths. Otherwise backslashes for windows paths can cause issues. Returns: Optional[dict[str, str]]: dictionary of variable -> value, retrieved from the input using the pattern, or `None` if the string does not match the pattern. """ pattern = re.sub(r"{([^:}]+)(?::[^}]+)?}", r"{\1}", pattern) # remove format specifiers, e.g. {rank:05d} -> {rank} regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern) result = re.search(regex, string) if result is None: return None values = list(result.groups()) keys = re.findall(r"{(.+?)}", pattern) _dict = dict(zip(keys, values)) return _dict def asdict(obj): """Convert an object to its dictionary representation recursively. <Added version="2.4.0"/> """ # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict def _is_dataclass_instance(obj): # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass return is_dataclass(obj) and not isinstance(obj, type) def _asdict_inner(obj): if _is_dataclass_instance(obj): result = {} for f in fields(obj): value = _asdict_inner(getattr(obj, f.name)) if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False): result[f.name] = value return result elif isinstance(obj, tuple) and hasattr(obj, "_fields"): # obj is a namedtuple return type(obj)(*[_asdict_inner(v) for v in obj]) elif isinstance(obj, (list, tuple)): # Assume we can create an object of this type by passing in a # generator (which is not true for namedtuples, handled # above). return type(obj)(_asdict_inner(v) for v in obj) elif isinstance(obj, dict): return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()} else: return copy.deepcopy(obj) if not isinstance(obj, dict) and not _is_dataclass_instance(obj): raise TypeError(f"{obj} is not a dict or a dataclass") return _asdict_inner(obj) @contextmanager def temporary_assignment(obj, attr, value): """Temporarily assign obj.attr to value.""" original = getattr(obj, attr, None) setattr(obj, attr, value) try: yield finally: setattr(obj, attr, original) @contextmanager def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False): """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow.""" np_state = np.random.get_state() np.random.seed(seed) if set_pytorch and config.TORCH_AVAILABLE: import torch torch_state = torch.random.get_rng_state() torch.random.manual_seed(seed) if torch.cuda.is_available(): torch_cuda_states = torch.cuda.get_rng_state_all() torch.cuda.manual_seed_all(seed) if set_tensorflow and config.TF_AVAILABLE: import tensorflow as tf from tensorflow.python.eager import context as tfpycontext tf_state = tf.random.get_global_generator() temp_gen = tf.random.Generator.from_seed(seed) tf.random.set_global_generator(temp_gen) if not tf.executing_eagerly(): raise ValueError("Setting random seed for TensorFlow is only available in eager mode") tf_context = tfpycontext.context() # eager mode context tf_seed = tf_context._seed tf_rng_initialized = hasattr(tf_context, "_rng") if tf_rng_initialized: tf_rng = tf_context._rng tf_context._set_global_seed(seed) try: yield finally: np.random.set_state(np_state) if set_pytorch and config.TORCH_AVAILABLE: torch.random.set_rng_state(torch_state) if torch.cuda.is_available(): torch.cuda.set_rng_state_all(torch_cuda_states) if set_tensorflow and config.TF_AVAILABLE: tf.random.set_global_generator(tf_state) tf_context._seed = tf_seed if tf_rng_initialized: tf_context._rng = tf_rng else: delattr(tf_context, "_rng") def unique_values(values): """Iterate over iterable and return only unique values in order.""" seen = set() for value in values: if value not in seen: seen.add(value) yield value def no_op_if_value_is_null(func): """If the value is None, return None, else call `func`.""" def wrapper(value): return func(value) if value is not None else None return wrapper def first_non_null_value(iterable): """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index.""" for i, value in enumerate(iterable): if value is not None: return i, value return -1, None def first_non_null_non_empty_value(iterable): """Return the index and the value of the first non-null non-empty value in the iterable. If all values are None or empty, return -1 as index.""" for i, value in enumerate(iterable): if value is not None and not (isinstance(value, (dict, list)) and len(value) == 0): return i, value return -1, None def zip_dict(*dicts): """Iterate over items of dictionaries grouped by their keys.""" for key in unique_values(itertools.chain(*dicts)): # set merge all keys # Will raise KeyError if the dict don't have the same keys yield key, tuple(d[key] for d in dicts) class NonMutableDict(dict): """Dict where keys can only be added but not modified. Will raise an error if the user try to overwrite one key. The error message can be customized during construction. It will be formatted using {key} for the overwritten key. """ def __init__(self, *args, **kwargs): self._error_msg = kwargs.pop( "error_msg", "Try to overwrite existing key: {key}", ) if kwargs: raise ValueError("NonMutableDict cannot be initialized with kwargs.") super().__init__(*args, **kwargs) def __setitem__(self, key, value): if key in self: raise ValueError(self._error_msg.format(key=key)) return super().__setitem__(key, value) def update(self, other): if any(k in self for k in other): raise ValueError(self._error_msg.format(key=set(self) & set(other))) return super().update(other) class classproperty(property): # pylint: disable=invalid-name """Descriptor to be used as decorator for @classmethods.""" def __get__(self, obj, objtype=None): return self.fget.__get__(None, objtype)() def _single_map_nested(args): """Apply a function recursively to each element of a nested data struct.""" function, data_struct, batched, batch_size, types, rank, disable_tqdm, desc = args # Singleton first to spare some computation if not isinstance(data_struct, dict) and not isinstance(data_struct, types): if batched: return function([data_struct])[0] else: return function(data_struct) if ( batched and not isinstance(data_struct, dict) and isinstance(data_struct, types) and all(not isinstance(v, (dict, types)) for v in data_struct) ): return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)] # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): print(" ", end="", flush=True) # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar: if isinstance(data_struct, dict): return { k: _single_map_nested((function, v, batched, batch_size, types, None, True, None)) for k, v in pbar } else: mapped = [_single_map_nested((function, v, batched, batch_size, types, None, True, None)) for v in pbar] if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) def map_nested( function: Callable[[Any], Any], data_struct: Any, dict_only: bool = False, map_list: bool = True, map_tuple: bool = False, map_numpy: bool = False, num_proc: Optional[int] = None, parallel_min_length: int = 2, batched: bool = False, batch_size: Optional[int] = 1000, types: Optional[tuple] = None, disable_tqdm: bool = True, desc: Optional[str] = None, ) -> Any: """Apply a function recursively to each element of a nested data struct. Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to `parallel_min_length`. <Changed version="2.5.0"> Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``. Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and multiprocessing is used. </Changed> Args: function (`Callable`): Function to be applied to `data_struct`. data_struct (`Any`): Data structure to apply `function` to. dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in `data_struct`. map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict` values). map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides `dict` values). map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides `dict` values). num_proc (`int`, *optional*): Number of processes. The level in the data struct used for multiprocessing is the first level that has smaller sub-structs, starting from the root. parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel processing. <Added version="2.5.0"/> batched (`bool`, defaults to `False`): Provide batch of items to `function`. <Added version="2.19.0"/> batch_size (`int`, *optional*, defaults to `1000`): Number of items per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`. <Added version="2.19.0"/> types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar. desc (`str`, *optional*): Prefix for the tqdm progressbar. Returns: `Any` """ if types is None: types = [] if not dict_only: if map_list: types.append(list) if map_tuple: types.append(tuple) if map_numpy: types.append(np.ndarray) types = tuple(types) # Singleton if not isinstance(data_struct, dict) and not isinstance(data_struct, types): if batched: data_struct = [data_struct] mapped = function(data_struct) if batched: mapped = mapped[0] return mapped iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct if num_proc is None: num_proc = 1 if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable): mapped = [ map_nested( function=function, data_struct=obj, num_proc=num_proc, parallel_min_length=parallel_min_length, batched=batched, batch_size=batch_size, types=types, ) for obj in iterable ] elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: if batched: if batch_size is None or batch_size <= 0: batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1) iterable = list(iter_batched(iterable, batch_size)) mapped = [ _single_map_nested((function, obj, batched, batch_size, types, None, True, None)) for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) ] if batched: mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch] else: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".* is experimental and might be subject to breaking changes in the future\\.$", category=UserWarning, ) if batched: if batch_size is None or batch_size <= 0: batch_size = len(iterable) // num_proc + int(len(iterable) % num_proc > 0) iterable = list(iter_batched(iterable, batch_size)) mapped = parallel_map( function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, _single_map_nested ) if batched: mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch] if isinstance(data_struct, dict): return dict(zip(data_struct.keys(), mapped)) else: if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) class NestedDataStructure: def __init__(self, data=None): self.data = data if data is not None else [] def flatten(self, data=None): data = data if data is not None else self.data if isinstance(data, dict): return self.flatten(list(data.values())) elif isinstance(data, (list, tuple)): return [flattened for item in data for flattened in self.flatten(item)] else: return [data] def has_sufficient_disk_space(needed_bytes, directory="."): try: free_bytes = disk_usage(os.path.abspath(directory)).free except OSError: return True return needed_bytes < free_bytes def copyfunc(func): result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__) result.__kwdefaults__ = func.__kwdefaults__ return result Y = TypeVar("Y") def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int: for i, result in enumerate(func(**kwargs)): queue.put(result) return i def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> set[int]: return {f.pid for f in pool._pool} def iflatmap_unordered( pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool], func: Callable[..., Iterable[Y]], *, kwargs_iterable: Iterable[dict], ) -> Iterable[Y]: initial_pool_pid = _get_pool_pid(pool) pool_changed = False manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager with manager_cls() as manager: queue = manager.Queue() async_results = [ pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable ] try: while True: try: yield queue.get(timeout=0.05) except Empty: if all(async_result.ready() for async_result in async_results) and queue.empty(): break if _get_pool_pid(pool) != initial_pool_pid: pool_changed = True # One of the subprocesses has died. We should not wait forever. raise RuntimeError( "One of the subprocesses has abruptly died during map operation." "To debug the error, disable multiprocessing." ) finally: if not pool_changed: # we get the result in case there's an error to raise [async_result.get(timeout=0.05) for async_result in async_results] T = TypeVar("T") def iter_batched(iterable: Iterable[T], n: int) -> Iterable[list[T]]: if n < 1: raise ValueError(f"Invalid batch size {n}") batch = [] for item in iterable: batch.append(item) if len(batch) == n: yield batch batch = [] if batch: yield batch
datasets/src/datasets/utils/py_utils.py/0
{ "file_path": "datasets/src/datasets/utils/py_utils.py", "repo_id": "datasets", "token_count": 9896 }
106
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _check_json_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader(jsonl_path, features=features, cache_dir=cache_dir).read() _check_json_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ], ) def test_dataset_from_json_with_unsorted_column_names(features, jsonl_312_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_3": "float64", "col_1": "string", "col_2": "int64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() assert isinstance(dataset, Dataset) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def test_dataset_from_json_with_mismatched_features(jsonl_312_path, tmp_path): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} features = {"col_2": "int64", "col_3": "float64", "col_1": "string"} expected_features = features.copy() features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) cache_dir = tmp_path / "cache" dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() assert isinstance(dataset, Dataset) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_json_split(split, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, split=split).read() _check_json_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path): if issubclass(path_type, str): path = jsonl_path elif issubclass(path_type, list): path = [jsonl_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() _check_json_dataset(dataset, expected_features) def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = JsonDatasetReader({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = JsonDatasetReader({"train": jsonl_path}, features=features, cache_dir=cache_dir).read() _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): if split: path = {split: jsonl_path} else: split = "train" path = {"train": jsonl_path, "test": jsonl_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def load_json(buffer): return json.load(buffer) def load_json_lines(buffer): return [json.loads(line) for line in buffer] class TestJsonDatasetWriter: @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) def test_dataset_to_json_lines(self, lines, load_json_function, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=lines).write() buffer.seek(0) exported_content = load_json_function(buffer) assert isinstance(exported_content, list) assert isinstance(exported_content[0], dict) assert len(exported_content) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at", [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789"), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ], ) def test_dataset_to_json_orient(self, orient, container, keys, len_at, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=False, orient=orient).write() buffer.seek(0) exported_content = load_json(buffer) assert isinstance(exported_content, container) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") if len_at: assert len(exported_content[len_at]) == 10 else: assert len(exported_content) == 10 @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) def test_dataset_to_json_lines_multiproc(self, lines, load_json_function, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=lines, num_proc=2).write() buffer.seek(0) exported_content = load_json_function(buffer) assert isinstance(exported_content, list) assert isinstance(exported_content[0], dict) assert len(exported_content) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at", [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789"), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ], ) def test_dataset_to_json_orient_multiproc(self, orient, container, keys, len_at, dataset): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, lines=False, orient=orient, num_proc=2).write() buffer.seek(0) exported_content = load_json(buffer) assert isinstance(exported_content, container) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") if len_at: assert len(exported_content[len_at]) == 10 else: assert len(exported_content) == 10 def test_dataset_to_json_orient_invalidproc(self, dataset): with pytest.raises(ValueError): with io.BytesIO() as buffer: JsonDatasetWriter(dataset, buffer, num_proc=0) @pytest.mark.parametrize("compression, extension", [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")]) def test_dataset_to_json_compression(self, shared_datadir, tmp_path_factory, extension, compression, dataset): path = tmp_path_factory.mktemp("data") / f"test.json.{extension}" original_path = str(shared_datadir / f"test_file.json.{extension}") JsonDatasetWriter(dataset, path, compression=compression).write() with fsspec.open(path, "rb", compression="infer") as f: exported_content = f.read() with fsspec.open(original_path, "rb", compression="infer") as f: original_content = f.read() assert exported_content == original_content def test_dataset_to_json_fsspec(self, dataset, mockfs): dataset_path = "mock://my_dataset.json" writer = JsonDatasetWriter(dataset, dataset_path, storage_options=mockfs.storage_options) assert writer.write() > 0 assert mockfs.isfile(dataset_path) with fsspec.open(dataset_path, "rb", **mockfs.storage_options) as f: assert f.read()
datasets/tests/io/test_json.py/0
{ "file_path": "datasets/tests/io/test_json.py", "repo_id": "datasets", "token_count": 5153 }
107
import pytest from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.sql.sql import SqlConfig def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = SqlConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = SqlConfig(name="name", data_files=data_files)
datasets/tests/packaged_modules/test_sql.py/0
{ "file_path": "datasets/tests/packaged_modules/test_sql.py", "repo_id": "datasets", "token_count": 225 }
108
import os from datasets.utils._filelock import FileLock def test_long_path(tmpdir): filename = "a" * 1000 + ".lock" lock1 = FileLock(str(tmpdir / filename)) assert lock1.lock_file.endswith(".lock") assert not lock1.lock_file.endswith(filename) assert len(os.path.basename(lock1.lock_file)) <= 255
datasets/tests/test_filelock.py/0
{ "file_path": "datasets/tests/test_filelock.py", "repo_id": "datasets", "token_count": 120 }
109
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected", [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10)]), ({"num_shards": 10, "max_num_jobs": 10}, [range(i, i + 1) for i in range(10)]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1)]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0, 4), range(4, 7), range(7, 10)]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0, 1), range(1, 2), range(2, 3)]), ], ) def test_distribute_shards(kwargs, expected): out = _distribute_shards(**kwargs) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected", [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ], ) def test_split_gen_kwargs(gen_kwargs, max_num_jobs, expected): out = _split_gen_kwargs(gen_kwargs, max_num_jobs) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected", [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ], ) def test_number_of_shards_in_gen_kwargs(gen_kwargs, expected): if expected is RuntimeError: with pytest.raises(expected): _number_of_shards_in_gen_kwargs(gen_kwargs) else: out = _number_of_shards_in_gen_kwargs(gen_kwargs) assert out == expected
datasets/tests/test_sharding_utils.py/0
{ "file_path": "datasets/tests/test_sharding_utils.py", "repo_id": "datasets", "token_count": 977 }
110
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples scripts src tests utils benchmarks modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ ruff check $(modified_py_files) --fix; \ ruff format $(modified_py_files);\ else \ echo "No library .py files were modified"; \ fi # Update src/diffusers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py # this target runs checks on all files quality: ruff check $(check_dirs) setup.py ruff format --check $(check_dirs) setup.py doc-builder style src/diffusers docs/source --max_len 119 --check_only python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: ruff check $(check_dirs) setup.py --fix ruff format $(check_dirs) setup.py doc-builder style src/diffusers docs/source --max_len 119 ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/ # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch
diffusers/Makefile/0
{ "file_path": "diffusers/Makefile", "repo_id": "diffusers", "token_count": 929 }
111
FROM ubuntu:20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt-get -y update \ && apt-get install -y software-properties-common \ && add-apt-repository ppa:deadsnakes/ppa RUN apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ libgl1 \ python3.10 \ python3-pip \ python3.10-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3.10 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) # follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ python3 -m uv pip install --upgrade --no-cache-dir \ clu \ "jax[cpu]>=0.2.16,!=0.3.2" \ "flax>=0.4.1" \ "jaxlib>=0.1.65" && \ python3 -m uv pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy==1.26.4 \ scipy \ tensorboard \ transformers \ hf_transfer CMD ["/bin/bash"]
diffusers/docker/diffusers-flax-cpu/Dockerfile/0
{ "file_path": "diffusers/docker/diffusers-flax-cpu/Dockerfile", "repo_id": "diffusers", "token_count": 652 }
112
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Configuration Schedulers from [`~schedulers.scheduling_utils.SchedulerMixin`] and models from [`ModelMixin`] inherit from [`ConfigMixin`] which stores all the parameters that are passed to their respective `__init__` methods in a JSON-configuration file. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> ## ConfigMixin [[autodoc]] ConfigMixin - load_config - from_config - save_config - to_json_file - to_json_string
diffusers/docs/source/en/api/configuration.md/0
{ "file_path": "diffusers/docs/source/en/api/configuration.md", "repo_id": "diffusers", "token_count": 322 }
113
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoencoderKLHunyuanVideo The 3D variational autoencoder (VAE) model with KL loss used in [HunyuanVideo](https://github.com/Tencent/HunyuanVideo/), which was introduced in [HunyuanVideo: A Systematic Framework For Large Video Generative Models](https://huggingface.co/papers/2412.03603) by Tencent. The model can be loaded with the following code snippet. ```python from diffusers import AutoencoderKLHunyuanVideo vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder="vae", torch_dtype=torch.float16) ``` ## AutoencoderKLHunyuanVideo [[autodoc]] AutoencoderKLHunyuanVideo - decode - all ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput
diffusers/docs/source/en/api/models/autoencoder_kl_hunyuan_video.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoder_kl_hunyuan_video.md", "repo_id": "diffusers", "token_count": 383 }
114
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Transformer2DModel A Transformer model for image-like data from [CompVis](https://huggingface.co/CompVis) that is based on the [Vision Transformer](https://huggingface.co/papers/2010.11929) introduced by Dosovitskiy et al. The [`Transformer2DModel`] accepts discrete (classes of vector embeddings) or continuous (actual embeddings) inputs. When the input is **continuous**: 1. Project the input and reshape it to `(batch_size, sequence_length, feature_dimension)`. 2. Apply the Transformer blocks in the standard way. 3. Reshape to image. When the input is **discrete**: <Tip> It is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image don't contain a prediction for the masked pixel because the unnoised image cannot be masked. </Tip> 1. Convert input (classes of latent pixels) to embeddings and apply positional embeddings. 2. Apply the Transformer blocks in the standard way. 3. Predict classes of unnoised image. ## Transformer2DModel [[autodoc]] Transformer2DModel ## Transformer2DModelOutput [[autodoc]] models.modeling_outputs.Transformer2DModelOutput
diffusers/docs/source/en/api/models/transformer2d.md/0
{ "file_path": "diffusers/docs/source/en/api/models/transformer2d.md", "repo_id": "diffusers", "token_count": 465 }
115
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Outputs All model outputs are subclasses of [`~utils.BaseOutput`], data structures containing all the information returned by the model. The outputs can also be used as tuples or dictionaries. For example: ```python from diffusers import DDIMPipeline pipeline = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32") outputs = pipeline() ``` The `outputs` object is a [`~pipelines.ImagePipelineOutput`] which means it has an image attribute. You can access each attribute as you normally would or with a keyword lookup, and if that attribute is not returned by the model, you will get `None`: ```python outputs.images outputs["images"] ``` When considering the `outputs` object as a tuple, it only considers the attributes that don't have `None` values. For instance, retrieving an image by indexing into it returns the tuple `(outputs.images)`: ```python outputs[:1] ``` <Tip> To check a specific pipeline or model output, refer to its corresponding API documentation. </Tip> ## BaseOutput [[autodoc]] utils.BaseOutput - to_tuple ## ImagePipelineOutput [[autodoc]] pipelines.ImagePipelineOutput ## FlaxImagePipelineOutput [[autodoc]] pipelines.pipeline_flax_utils.FlaxImagePipelineOutput ## AudioPipelineOutput [[autodoc]] pipelines.AudioPipelineOutput ## ImageTextPipelineOutput [[autodoc]] ImageTextPipelineOutput
diffusers/docs/source/en/api/outputs.md/0
{ "file_path": "diffusers/docs/source/en/api/outputs.md", "repo_id": "diffusers", "token_count": 554 }
116
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --> # QwenImage <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </div> Qwen-Image from the Qwen team is an image generation foundation model in the Qwen series that achieves significant advances in complex text rendering and precise image editing. Experiments show strong general capabilities in both image generation and editing, with exceptional performance in text rendering, especially for Chinese. Qwen-Image comes in the following variants: | model type | model id | |:----------:|:--------:| | Qwen-Image | [`Qwen/Qwen-Image`](https://huggingface.co/Qwen/Qwen-Image) | | Qwen-Image-Edit | [`Qwen/Qwen-Image-Edit`](https://huggingface.co/Qwen/Qwen-Image-Edit) | <Tip> [Caching](../../optimization/cache) may also speed up inference by storing and reusing intermediate outputs. </Tip> ## LoRA for faster inference Use a LoRA from `lightx2v/Qwen-Image-Lightning` to speed up inference by reducing the number of steps. Refer to the code snippet below: <details> <summary>Code</summary> ```py from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler import torch import math ckpt_id = "Qwen/Qwen-Image" # From # https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10 scheduler_config = { "base_image_seq_len": 256, "base_shift": math.log(3), # We use shift=3 in distillation "invert_sigmas": False, "max_image_seq_len": 8192, "max_shift": math.log(3), # We use shift=3 in distillation "num_train_timesteps": 1000, "shift": 1.0, "shift_terminal": None, # set shift_terminal to None "stochastic_sampling": False, "time_shift_type": "exponential", "use_beta_sigmas": False, "use_dynamic_shifting": True, "use_exponential_sigmas": False, "use_karras_sigmas": False, } scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config) pipe = DiffusionPipeline.from_pretrained( ckpt_id, scheduler=scheduler, torch_dtype=torch.bfloat16 ).to("cuda") pipe.load_lora_weights( "lightx2v/Qwen-Image-Lightning", weight_name="Qwen-Image-Lightning-8steps-V1.0.safetensors" ) prompt = "a tiny astronaut hatching from an egg on the moon, Ultra HD, 4K, cinematic composition." negative_prompt = " " image = pipe( prompt=prompt, negative_prompt=negative_prompt, width=1024, height=1024, num_inference_steps=8, true_cfg_scale=1.0, generator=torch.manual_seed(0), ).images[0] image.save("qwen_fewsteps.png") ``` </details> <Tip> The `guidance_scale` parameter in the pipeline is there to support future guidance-distilled models when they come up. Note that passing `guidance_scale` to the pipeline is ineffective. To enable classifier-free guidance, please pass `true_cfg_scale` and `negative_prompt` (even an empty negative prompt like " ") should enable classifier-free guidance computations. </Tip> ## QwenImagePipeline [[autodoc]] QwenImagePipeline - all - __call__ ## QwenImageImg2ImgPipeline [[autodoc]] QwenImageImg2ImgPipeline - all - __call__ ## QwenImageInpaintPipeline [[autodoc]] QwenImageInpaintPipeline - all - __call__ ## QwenImageEditPipeline [[autodoc]] QwenImageEditPipeline - all - __call__ ## QwenImaggeControlNetPipeline - all - __call__ ## QwenImagePipelineOutput [[autodoc]] pipelines.qwenimage.pipeline_output.QwenImagePipelineOutput
diffusers/docs/source/en/api/pipelines/qwenimage.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/qwenimage.md", "repo_id": "diffusers", "token_count": 1428 }
117
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Latent upscaler The Stable Diffusion latent upscaler model was created by [Katherine Crowson](https://github.com/crowsonkb/k-diffusion) in collaboration with [Stability AI](https://stability.ai/). It is used to enhance the output image resolution by a factor of 2 (see this demo [notebook](https://colab.research.google.com/drive/1o1qYJcFeywzCIdkfKJy7cTpgZTCM2EI4) for a demonstration of the original implementation). <Tip> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! </Tip> ## StableDiffusionLatentUpscalePipeline [[autodoc]] StableDiffusionLatentUpscalePipeline - all - __call__ - enable_sequential_cpu_offload - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/stable_diffusion/latent_upscale.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/latent_upscale.md", "repo_id": "diffusers", "token_count": 543 }
118
# Getting Started: VAE Decode with Hybrid Inference VAE decode is an essential component of diffusion models - turning latent representations into images or videos. ## Memory These tables demonstrate the VRAM requirements for VAE decode with SD v1 and SD XL on different GPUs. For the majority of these GPUs the memory usage % dictates other models (text encoders, UNet/Transformer) must be offloaded, or tiled decoding has to be used which increases time taken and impacts quality. <details><summary>SD v1.5</summary> | GPU | Resolution | Time (seconds) | Memory (%) | Tiled Time (secs) | Tiled Memory (%) | | --- | --- | --- | --- | --- | --- | | NVIDIA GeForce RTX 4090 | 512x512 | 0.031 | 5.60% | 0.031 (0%) | 5.60% | | NVIDIA GeForce RTX 4090 | 1024x1024 | 0.148 | 20.00% | 0.301 (+103%) | 5.60% | | NVIDIA GeForce RTX 4080 | 512x512 | 0.05 | 8.40% | 0.050 (0%) | 8.40% | | NVIDIA GeForce RTX 4080 | 1024x1024 | 0.224 | 30.00% | 0.356 (+59%) | 8.40% | | NVIDIA GeForce RTX 4070 Ti | 512x512 | 0.066 | 11.30% | 0.066 (0%) | 11.30% | | NVIDIA GeForce RTX 4070 Ti | 1024x1024 | 0.284 | 40.50% | 0.454 (+60%) | 11.40% | | NVIDIA GeForce RTX 3090 | 512x512 | 0.062 | 5.20% | 0.062 (0%) | 5.20% | | NVIDIA GeForce RTX 3090 | 1024x1024 | 0.253 | 18.50% | 0.464 (+83%) | 5.20% | | NVIDIA GeForce RTX 3080 | 512x512 | 0.07 | 12.80% | 0.070 (0%) | 12.80% | | NVIDIA GeForce RTX 3080 | 1024x1024 | 0.286 | 45.30% | 0.466 (+63%) | 12.90% | | NVIDIA GeForce RTX 3070 | 512x512 | 0.102 | 15.90% | 0.102 (0%) | 15.90% | | NVIDIA GeForce RTX 3070 | 1024x1024 | 0.421 | 56.30% | 0.746 (+77%) | 16.00% | </details> <details><summary>SDXL</summary> | GPU | Resolution | Time (seconds) | Memory Consumed (%) | Tiled Time (seconds) | Tiled Memory (%) | | --- | --- | --- | --- | --- | --- | | NVIDIA GeForce RTX 4090 | 512x512 | 0.057 | 10.00% | 0.057 (0%) | 10.00% | | NVIDIA GeForce RTX 4090 | 1024x1024 | 0.256 | 35.50% | 0.257 (+0.4%) | 35.50% | | NVIDIA GeForce RTX 4080 | 512x512 | 0.092 | 15.00% | 0.092 (0%) | 15.00% | | NVIDIA GeForce RTX 4080 | 1024x1024 | 0.406 | 53.30% | 0.406 (0%) | 53.30% | | NVIDIA GeForce RTX 4070 Ti | 512x512 | 0.121 | 20.20% | 0.120 (-0.8%) | 20.20% | | NVIDIA GeForce RTX 4070 Ti | 1024x1024 | 0.519 | 72.00% | 0.519 (0%) | 72.00% | | NVIDIA GeForce RTX 3090 | 512x512 | 0.107 | 10.50% | 0.107 (0%) | 10.50% | | NVIDIA GeForce RTX 3090 | 1024x1024 | 0.459 | 38.00% | 0.460 (+0.2%) | 38.00% | | NVIDIA GeForce RTX 3080 | 512x512 | 0.121 | 25.60% | 0.121 (0%) | 25.60% | | NVIDIA GeForce RTX 3080 | 1024x1024 | 0.524 | 93.00% | 0.524 (0%) | 93.00% | | NVIDIA GeForce RTX 3070 | 512x512 | 0.183 | 31.80% | 0.183 (0%) | 31.80% | | NVIDIA GeForce RTX 3070 | 1024x1024 | 0.794 | 96.40% | 0.794 (0%) | 96.40% | </details> ## Available VAEs | | **Endpoint** | **Model** | |:-:|:-----------:|:--------:| | **Stable Diffusion v1** | [https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud](https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud) | [`stabilityai/sd-vae-ft-mse`](https://hf.co/stabilityai/sd-vae-ft-mse) | | **Stable Diffusion XL** | [https://x2dmsqunjd6k9prw.us-east-1.aws.endpoints.huggingface.cloud](https://x2dmsqunjd6k9prw.us-east-1.aws.endpoints.huggingface.cloud) | [`madebyollin/sdxl-vae-fp16-fix`](https://hf.co/madebyollin/sdxl-vae-fp16-fix) | | **Flux** | [https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud](https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud) | [`black-forest-labs/FLUX.1-schnell`](https://hf.co/black-forest-labs/FLUX.1-schnell) | | **HunyuanVideo** | [https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud](https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud) | [`hunyuanvideo-community/HunyuanVideo`](https://hf.co/hunyuanvideo-community/HunyuanVideo) | > [!TIP] > Model support can be requested [here](https://github.com/huggingface/diffusers/issues/new?template=remote-vae-pilot-feedback.yml). ## Code > [!TIP] > Install `diffusers` from `main` to run the code: `pip install git+https://github.com/huggingface/diffusers@main` A helper method simplifies interacting with Hybrid Inference. ```python from diffusers.utils.remote_utils import remote_decode ``` ### Basic example Here, we show how to use the remote VAE on random tensors. <details><summary>Code</summary> ```python image = remote_decode( endpoint="https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/", tensor=torch.randn([1, 4, 64, 64], dtype=torch.float16), scaling_factor=0.18215, ) ``` </details> <figure class="image flex flex-col items-center justify-center text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/remote_vae/output.png"/> </figure> Usage for Flux is slightly different. Flux latents are packed so we need to send the `height` and `width`. <details><summary>Code</summary> ```python image = remote_decode( endpoint="https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/", tensor=torch.randn([1, 4096, 64], dtype=torch.float16), height=1024, width=1024, scaling_factor=0.3611, shift_factor=0.1159, ) ``` </details> <figure class="image flex flex-col items-center justify-center text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/remote_vae/flux_random_latent.png"/> </figure> Finally, an example for HunyuanVideo. <details><summary>Code</summary> ```python video = remote_decode( endpoint="https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud/", tensor=torch.randn([1, 16, 3, 40, 64], dtype=torch.float16), output_type="mp4", ) with open("video.mp4", "wb") as f: f.write(video) ``` </details> <figure class="image flex flex-col items-center justify-center text-center m-0 w-full"> <video alt="queue.mp4" autoplay loop autobuffer muted playsinline > <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/remote_vae/video_1.mp4" type="video/mp4"> </video> </figure> ### Generation But we want to use the VAE on an actual pipeline to get an actual image, not random noise. The example below shows how to do it with SD v1.5. <details><summary>Code</summary> ```python from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", vae=None, ).to("cuda") prompt = "Strawberry ice cream, in a stylish modern glass, coconut, splashing milk cream and honey, in a gradient purple background, fluid motion, dynamic movement, cinematic lighting, Mysterious" latent = pipe( prompt=prompt, output_type="latent", ).images image = remote_decode( endpoint="https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/", tensor=latent, scaling_factor=0.18215, ) image.save("test.jpg") ``` </details> <figure class="image flex flex-col items-center justify-center text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/remote_vae/test.jpg"/> </figure> Here’s another example with Flux. <details><summary>Code</summary> ```python from diffusers import FluxPipeline pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16, vae=None, ).to("cuda") prompt = "Strawberry ice cream, in a stylish modern glass, coconut, splashing milk cream and honey, in a gradient purple background, fluid motion, dynamic movement, cinematic lighting, Mysterious" latent = pipe( prompt=prompt, guidance_scale=0.0, num_inference_steps=4, output_type="latent", ).images image = remote_decode( endpoint="https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/", tensor=latent, height=1024, width=1024, scaling_factor=0.3611, shift_factor=0.1159, ) image.save("test.jpg") ``` </details> <figure class="image flex flex-col items-center justify-center text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/remote_vae/test_1.jpg"/> </figure> Here’s an example with HunyuanVideo. <details><summary>Code</summary> ```python from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel model_id = "hunyuanvideo-community/HunyuanVideo" transformer = HunyuanVideoTransformer3DModel.from_pretrained( model_id, subfolder="transformer", torch_dtype=torch.bfloat16 ) pipe = HunyuanVideoPipeline.from_pretrained( model_id, transformer=transformer, vae=None, torch_dtype=torch.float16 ).to("cuda") latent = pipe( prompt="A cat walks on the grass, realistic", height=320, width=512, num_frames=61, num_inference_steps=30, output_type="latent", ).frames video = remote_decode( endpoint="https://o7ywnmrahorts457.us-east-1.aws.endpoints.huggingface.cloud/", tensor=latent, output_type="mp4", ) if isinstance(video, bytes): with open("video.mp4", "wb") as f: f.write(video) ``` </details> <figure class="image flex flex-col items-center justify-center text-center m-0 w-full"> <video alt="queue.mp4" autoplay loop autobuffer muted playsinline > <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/remote_vae/video.mp4" type="video/mp4"> </video> </figure> ### Queueing One of the great benefits of using a remote VAE is that we can queue multiple generation requests. While the current latent is being processed for decoding, we can already queue another one. This helps improve concurrency. <details><summary>Code</summary> ```python import queue import threading from IPython.display import display from diffusers import StableDiffusionPipeline def decode_worker(q: queue.Queue): while True: item = q.get() if item is None: break image = remote_decode( endpoint="https://q1bj3bpq6kzilnsu.us-east-1.aws.endpoints.huggingface.cloud/", tensor=item, scaling_factor=0.18215, ) display(image) q.task_done() q = queue.Queue() thread = threading.Thread(target=decode_worker, args=(q,), daemon=True) thread.start() def decode(latent: torch.Tensor): q.put(latent) prompts = [ "Blueberry ice cream, in a stylish modern glass , ice cubes, nuts, mint leaves, splashing milk cream, in a gradient purple background, fluid motion, dynamic movement, cinematic lighting, Mysterious", "Lemonade in a glass, mint leaves, in an aqua and white background, flowers, ice cubes, halo, fluid motion, dynamic movement, soft lighting, digital painting, rule of thirds composition, Art by Greg rutkowski, Coby whitmore", "Comic book art, beautiful, vintage, pastel neon colors, extremely detailed pupils, delicate features, light on face, slight smile, Artgerm, Mary Blair, Edmund Dulac, long dark locks, bangs, glowing, fashionable style, fairytale ambience, hot pink.", "Masterpiece, vanilla cone ice cream garnished with chocolate syrup, crushed nuts, choco flakes, in a brown background, gold, cinematic lighting, Art by WLOP", "A bowl of milk, falling cornflakes, berries, blueberries, in a white background, soft lighting, intricate details, rule of thirds, octane render, volumetric lighting", "Cold Coffee with cream, crushed almonds, in a glass, choco flakes, ice cubes, wet, in a wooden background, cinematic lighting, hyper realistic painting, art by Carne Griffiths, octane render, volumetric lighting, fluid motion, dynamic movement, muted colors,", ] pipe = StableDiffusionPipeline.from_pretrained( "Lykon/dreamshaper-8", torch_dtype=torch.float16, vae=None, ).to("cuda") pipe.unet = pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) _ = pipe( prompt=prompts[0], output_type="latent", ) for prompt in prompts: latent = pipe( prompt=prompt, output_type="latent", ).images decode(latent) q.put(None) thread.join() ``` </details> <figure class="image flex flex-col items-center justify-center text-center m-0 w-full"> <video alt="queue.mp4" autoplay loop autobuffer muted playsinline > <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/remote_vae/queue.mp4" type="video/mp4"> </video> </figure> ## Integrations * **[SD.Next](https://github.com/vladmandic/sdnext):** All-in-one UI with direct supports Hybrid Inference. * **[ComfyUI-HFRemoteVae](https://github.com/kijai/ComfyUI-HFRemoteVae):** ComfyUI node for Hybrid Inference.
diffusers/docs/source/en/hybrid_inference/vae_decode.md/0
{ "file_path": "diffusers/docs/source/en/hybrid_inference/vae_decode.md", "repo_id": "diffusers", "token_count": 4810 }
119
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Caching Caching accelerates inference by storing and reusing intermediate outputs of different layers, such as attention and feedforward layers, instead of performing the entire computation at each inference step. It significantly improves generation speed at the expense of more memory and doesn't require additional training. This guide shows you how to use the caching methods supported in Diffusers. ## Pyramid Attention Broadcast [Pyramid Attention Broadcast (PAB)](https://huggingface.co/papers/2408.12588) is based on the observation that attention outputs aren't that different between successive timesteps of the generation process. The attention differences are smallest in the cross attention layers and are generally cached over a longer timestep range. This is followed by temporal attention and spatial attention layers. > [!TIP] > Not all video models have three types of attention (cross, temporal, and spatial)! PAB can be combined with other techniques like sequence parallelism and classifier-free guidance parallelism (data parallelism) for near real-time video generation. Set up and pass a [`PyramidAttentionBroadcastConfig`] to a pipeline's transformer to enable it. The `spatial_attention_block_skip_range` controls how often to skip attention calculations in the spatial attention blocks and the `spatial_attention_timestep_skip_range` is the range of timesteps to skip. Take care to choose an appropriate range because a smaller interval can lead to slower inference speeds and a larger interval can result in lower generation quality. ```python import torch from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig pipeline = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) pipeline.to("cuda") config = PyramidAttentionBroadcastConfig( spatial_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(100, 800), current_timestep_callback=lambda: pipe.current_timestep, ) pipeline.transformer.enable_cache(config) ``` ## FasterCache [FasterCache](https://huggingface.co/papers/2410.19355) caches and reuses attention features similar to [PAB](#pyramid-attention-broadcast) since output differences are small for each successive timestep. This method may also choose to skip the unconditional branch prediction, when using classifier-free guidance for sampling (common in most base models), and estimate it from the conditional branch prediction if there is significant redundancy in the predicted latent outputs between successive timesteps. Set up and pass a [`FasterCacheConfig`] to a pipeline's transformer to enable it. ```python import torch from diffusers import CogVideoXPipeline, FasterCacheConfig pipe line= CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) pipeline.to("cuda") config = FasterCacheConfig( spatial_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(-1, 681), current_timestep_callback=lambda: pipe.current_timestep, attention_weight_callback=lambda _: 0.3, unconditional_batch_skip_range=5, unconditional_batch_timestep_skip_range=(-1, 781), tensor_format="BFCHW", ) pipeline.transformer.enable_cache(config) ```
diffusers/docs/source/en/optimization/cache.md/0
{ "file_path": "diffusers/docs/source/en/optimization/cache.md", "repo_id": "diffusers", "token_count": 1014 }
120
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # xFormers We recommend [xFormers](https://github.com/facebookresearch/xformers) for both inference and training. In our tests, the optimizations performed in the attention blocks allow for both faster speed and reduced memory consumption. Install xFormers from `pip`: ```bash pip install xformers ``` <Tip> The xFormers `pip` package requires the latest version of PyTorch. If you need to use a previous version of PyTorch, then we recommend [installing xFormers from the source](https://github.com/facebookresearch/xformers#installing-xformers). </Tip> After xFormers is installed, you can use `enable_xformers_memory_efficient_attention()` for faster inference and reduced memory consumption as shown in this [section](memory#memory-efficient-attention). <Tip warning={true}> According to this [issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training (fine-tune or DreamBooth) in some GPUs. If you observe this problem, please install a development version as indicated in the issue comments. </Tip>
diffusers/docs/source/en/optimization/xformers.md/0
{ "file_path": "diffusers/docs/source/en/optimization/xformers.md", "repo_id": "diffusers", "token_count": 447 }
121
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # InstructPix2Pix [InstructPix2Pix](https://hf.co/papers/2211.09800) is a Stable Diffusion model trained to edit images from human-provided instructions. For example, your prompt can be "turn the clouds rainy" and the model will edit the input image accordingly. This model is conditioned on the text prompt (or editing instruction) and the input image. This guide will explore the [train_instruct_pix2pix.py](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) training script to help you become familiar with it, and how you can adapt it for your own use case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/instruct_pix2pix pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script has many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L65) function. Default values are provided for most parameters that work pretty well, but you can also set your own values in the training command if you'd like. For example, to increase the resolution of the input image: ```bash accelerate launch train_instruct_pix2pix.py \ --resolution=512 \ ``` Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the relevant parameters for InstructPix2Pix: - `--original_image_column`: the original image before the edits are made - `--edited_image_column`: the image after the edits are made - `--edit_prompt_column`: the instructions to edit the image - `--conditioning_dropout_prob`: the dropout probability for the edited image and edit prompts during training which enables classifier-free guidance (CFG) for one or both conditioning inputs ## Training script The dataset preprocessing code and training loop are found in the [`main()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L374) function. This is where you'll make your changes to the training script to adapt it for your own use-case. As with the script parameters, a walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the InstructPix2Pix relevant parts of the script. The script begins by modifying the [number of input channels](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L445) in the first convolutional layer of the UNet to account for InstructPix2Pix's additional conditioning image: ```py in_channels = 8 out_channels = unet.conv_in.out_channels unet.register_to_config(in_channels=in_channels) with torch.no_grad(): new_conv_in = nn.Conv2d( in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding ) new_conv_in.weight.zero_() new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight) unet.conv_in = new_conv_in ``` These UNet parameters are [updated](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L545C1-L551C6) by the optimizer: ```py optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Next, the edited images and edit instructions are [preprocessed](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L624) and [tokenized](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L610C24-L610C24). It is important the same image transformations are applied to the original and edited images. ```py def preprocess_train(examples): preprocessed_images = preprocess_images(examples) original_images, edited_images = preprocessed_images.chunk(2) original_images = original_images.reshape(-1, 3, args.resolution, args.resolution) edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution) examples["original_pixel_values"] = original_images examples["edited_pixel_values"] = edited_images captions = list(examples[edit_prompt_column]) examples["input_ids"] = tokenize_captions(captions) return examples ``` Finally, in the [training loop](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L730), it starts by encoding the edited images into latent space: ```py latents = vae.encode(batch["edited_pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor ``` Then, the script applies dropout to the original image and edit instruction embeddings to support CFG. This is what enables the model to modulate the influence of the edit instruction and original image on the edited image. ```py encoder_hidden_states = text_encoder(batch["input_ids"])[0] original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode() if args.conditioning_dropout_prob is not None: random_p = torch.rand(bsz, device=latents.device, generator=generator) prompt_mask = random_p < 2 * args.conditioning_dropout_prob prompt_mask = prompt_mask.reshape(bsz, 1, 1) null_conditioning = text_encoder(tokenize_captions([""]).to(accelerator.device))[0] encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states) image_mask_dtype = original_image_embeds.dtype image_mask = 1 - ( (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype) * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype) ) image_mask = image_mask.reshape(bsz, 1, 1, 1) original_image_embeds = image_mask * original_image_embeds ``` That's pretty much it! Aside from the differences described here, the rest of the script is very similar to the [Text-to-image](text2image#training-script) training script, so feel free to check it out for more details. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you're happy with the changes to your script or if you're okay with the default configuration, you're ready to launch the training script! 🚀 This guide uses the [fusing/instructpix2pix-1000-samples](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) dataset, which is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered). You can also create and use your own dataset if you'd like (see the [Create a dataset for training](create_dataset) guide). Set the `MODEL_NAME` environment variable to the name of the model (can be a model id on the Hub or a path to a local model), and the `DATASET_ID` to the name of the dataset on the Hub. The script creates and saves all the components (feature extractor, scheduler, text encoder, UNet, etc.) to a subfolder in your repository. <Tip> For better results, try longer training runs with a larger dataset. We've only tested this training script on a smaller-scale dataset. <br> To monitor training progress with Weights and Biases, add the `--report_to=wandb` parameter to the training command and specify a validation image with `--val_image_url` and a validation prompt with `--validation_prompt`. This can be really useful for debugging the model. </Tip> If you’re training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 \ --random_flip \ --train_batch_size=4 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 \ --checkpoints_total_limit=1 \ --learning_rate=5e-05 \ --max_grad_norm=1 \ --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` After training is finished, you can use your new InstructPix2Pix for inference: ```py import PIL import requests import torch from diffusers import StableDiffusionInstructPix2PixPipeline from diffusers.utils import load_image pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained("your_cool_model", torch_dtype=torch.float16).to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = load_image("https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png") prompt = "add some ducks to the lake" num_inference_steps = 20 image_guidance_scale = 1.5 guidance_scale = 10 edited_image = pipeline( prompt, image=image, num_inference_steps=num_inference_steps, image_guidance_scale=image_guidance_scale, guidance_scale=guidance_scale, generator=generator, ).images[0] edited_image.save("edited_image.png") ``` You should experiment with different `num_inference_steps`, `image_guidance_scale`, and `guidance_scale` values to see how they affect inference speed and quality. The guidance scale parameters are especially impactful because they control how much the original image and edit instructions affect the edited image. ## Stable Diffusion XL Stable Diffusion XL (SDXL) is a powerful text-to-image model that generates high-resolution images, and it adds a second text-encoder to its architecture. Use the [`train_instruct_pix2pix_sdxl.py`](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py) script to train a SDXL model to follow image editing instructions. The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide. ## Next steps Congratulations on training your own InstructPix2Pix model! 🥳 To learn more about the model, it may be helpful to: - Read the [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd) blog post to learn more about some experiments we've done with InstructPix2Pix, dataset preparation, and results for different instructions.
diffusers/docs/source/en/training/instructpix2pix.md/0
{ "file_path": "diffusers/docs/source/en/training/instructpix2pix.md", "repo_id": "diffusers", "token_count": 4157 }
122
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-to-image [[open-in-colab]] When you think of diffusion models, text-to-image is usually one of the first things that come to mind. Text-to-image generates an image from a text description (for example, "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k") which is also known as a *prompt*. From a very high level, a diffusion model takes a prompt and some random initial noise, and iteratively removes the noise to construct an image. The *denoising* process is guided by the prompt, and once the denoising process ends after a predetermined number of time steps, the image representation is decoded into an image. <Tip> Read the [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) blog post to learn more about how a latent diffusion model works. </Tip> You can generate images from a prompt in 🤗 Diffusers in two steps: 1. Load a checkpoint into the [`AutoPipelineForText2Image`] class, which automatically detects the appropriate pipeline class to use based on the checkpoint: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ).to("cuda") ``` 2. Pass a prompt to the pipeline to generate an image: ```py image = pipeline( "stained glass of darth vader, backlight, centered composition, masterpiece, photorealistic, 8k" ).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-vader.png"/> </div> ## Popular models The most common text-to-image models are [Stable Diffusion v1.5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5), [Stable Diffusion XL (SDXL)](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and [Kandinsky 2.2](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder). There are also ControlNet models or adapters that can be used with text-to-image models for more direct control in generating images. The results from each model are slightly different because of their architecture and training process, but no matter which model you choose, their usage is more or less the same. Let's use the same prompt for each model and compare their results. ### Stable Diffusion v1.5 [Stable Diffusion v1.5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) is a latent diffusion model initialized from [Stable Diffusion v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4), and finetuned for 595K steps on 512x512 images from the LAION-Aesthetics V2 dataset. You can use this model like: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] image ``` ### Stable Diffusion XL SDXL is a much larger version of the previous Stable Diffusion models, and involves a two-stage model process that adds even more details to an image. It also includes some additional *micro-conditionings* to generate high-quality images centered subjects. Take a look at the more comprehensive [SDXL](sdxl) guide to learn more about how to use it. In general, you can use SDXL like: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16" ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] image ``` ### Kandinsky 2.2 The Kandinsky model is a bit different from the Stable Diffusion models because it also uses an image prior model to create embeddings that are used to better align text and images in the diffusion model. The easiest way to use Kandinsky 2.2 is: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] image ``` ### ControlNet ControlNet models are auxiliary models or adapters that are finetuned on top of text-to-image models, such as [Stable Diffusion v1.5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5). Using ControlNet models in combination with text-to-image models offers diverse options for more explicit control over how to generate an image. With ControlNet, you add an additional conditioning input image to the model. For example, if you provide an image of a human pose (usually represented as multiple keypoints that are connected into a skeleton) as a conditioning input, the model generates an image that follows the pose of the image. Check out the more in-depth [ControlNet](controlnet) guide to learn more about other conditioning inputs and how to use them. In this example, let's condition the ControlNet with a human pose estimation image. Load the ControlNet model pretrained on human pose estimations: ```py from diffusers import ControlNetModel, AutoPipelineForText2Image from diffusers.utils import load_image import torch controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pose_image = load_image("https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png") ``` Pass the `controlnet` to the [`AutoPipelineForText2Image`], and provide the prompt and pose estimation image: ```py pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16" ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=pose_image, generator=generator).images[0] image ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion v1.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Stable Diffusion XL</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Kandinsky 2.2</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-3.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">ControlNet (pose conditioning)</figcaption> </div> </div> ## Configure pipeline parameters There are a number of parameters that can be configured in the pipeline that affect how an image is generated. You can change the image's output size, specify a negative prompt to improve image quality, and more. This section dives deeper into how to use these parameters. ### Height and width The `height` and `width` parameters control the height and width (in pixels) of the generated image. By default, the Stable Diffusion v1.5 model outputs 512x512 images, but you can change this to any size that is a multiple of 8. For example, to create a rectangular image: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" ).to("cuda") image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", height=768, width=512 ).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-hw.png"/> </div> <Tip warning={true}> Other models may have different default image sizes depending on the image sizes in the training dataset. For example, SDXL's default image size is 1024x1024 and using lower `height` and `width` values may result in lower quality images. Make sure you check the model's API reference first! </Tip> ### Guidance scale The `guidance_scale` parameter affects how much the prompt influences image generation. A lower value gives the model "creativity" to generate images that are more loosely related to the prompt. Higher `guidance_scale` values push the model to follow the prompt more closely, and if this value is too high, you may observe some artifacts in the generated image. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", guidance_scale=3.5 ).images[0] image ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-guidance-scale-2.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 2.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-guidance-scale-7.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 7.5</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-guidance-scale-10.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 10.5</figcaption> </div> </div> ### Negative prompt Just like how a prompt guides generation, a *negative prompt* steers the model away from things you don't want the model to generate. This is commonly used to improve overall image quality by removing poor or bad image features such as "low resolution" or "bad details". You can also use a negative prompt to remove or modify the content and style of an image. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") image = pipeline( prompt="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", negative_prompt="ugly, deformed, disfigured, poor details, bad anatomy", ).images[0] image ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-neg-prompt-1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/text2img-neg-prompt-2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "astronaut"</figcaption> </div> </div> ### Generator A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator) object enables reproducibility in a pipeline by setting a manual seed. You can use a `Generator` to generate batches of images and iteratively improve on an image generated from a seed as detailed in the [Improve image quality with deterministic generation](reusing_seeds) guide. You can set a seed and `Generator` as shown below. Creating an image with a `Generator` should return the same result each time instead of randomly generating a new image. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") generator = torch.Generator(device="cuda").manual_seed(30) image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator, ).images[0] image ``` ## Control image generation There are several ways to exert more control over how an image is generated outside of configuring a pipeline's parameters, such as prompt weighting and ControlNet models. ### Prompt weighting Prompt weighting is a technique for increasing or decreasing the importance of concepts in a prompt to emphasize or minimize certain features in an image. We recommend using the [Compel](https://github.com/damian0815/compel) library to help you generate the weighted prompt embeddings. <Tip> Learn how to create the prompt embeddings in the [Prompt weighting](weighted_prompts) guide. This example focuses on how to use the prompt embeddings in the pipeline. </Tip> Once you've created the embeddings, you can pass them to the `prompt_embeds` (and `negative_prompt_embeds` if you're using a negative prompt) parameter in the pipeline. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") image = pipeline( prompt_embeds=prompt_embeds, # generated from Compel negative_prompt_embeds=negative_prompt_embeds, # generated from Compel ).images[0] ``` ### ControlNet As you saw in the [ControlNet](#controlnet) section, these models offer a more flexible and accurate way to generate images by incorporating an additional conditioning image input. Each ControlNet model is pretrained on a particular type of conditioning image to generate new images that resemble it. For example, if you take a ControlNet model pretrained on depth maps, you can give the model a depth map as a conditioning input and it'll generate an image that preserves the spatial information in it. This is quicker and easier than specifying the depth information in a prompt. You can even combine multiple conditioning inputs with a [MultiControlNet](controlnet#multicontrolnet)! There are many types of conditioning inputs you can use, and 🤗 Diffusers supports ControlNet for Stable Diffusion and SDXL models. Take a look at the more comprehensive [ControlNet](controlnet) guide to learn how you can use these models. ## Optimize Diffusion models are large, and the iterative nature of denoising an image is computationally expensive and intensive. But this doesn't mean you need access to powerful - or even many - GPUs to use them. There are many optimization techniques for running diffusion models on consumer and free-tier resources. For example, you can load model weights in half-precision to save GPU memory and increase speed or offload the entire model to the GPU to save even more memory. PyTorch 2.0 also supports a more memory-efficient attention mechanism called [*scaled dot product attention*](../optimization/fp16#scaled-dot-product-attention) that is automatically enabled if you're using PyTorch 2.0. You can combine this with [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) to speed your code up even more: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16").to("cuda") pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` For more tips on how to optimize your code to save memory and speed up inference, read the [Accelerate inference](../optimization/fp16) and [Reduce memory usage](../optimization/memory) guides.
diffusers/docs/source/en/using-diffusers/conditional_image_generation.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/conditional_image_generation.md", "repo_id": "diffusers", "token_count": 5201 }
123
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Load pipelines [[open-in-colab]] Diffusion systems consist of multiple components like parameterized models and schedulers that interact in complex ways. That is why we designed the [`DiffusionPipeline`] to wrap the complexity of the entire diffusion system into an easy-to-use API. At the same time, the [`DiffusionPipeline`] is entirely customizable so you can modify each component to build a diffusion system for your use case. This guide will show you how to load: - pipelines from the Hub and locally - different components into a pipeline - multiple pipelines without increasing memory usage - checkpoint variants such as different floating point types or non-exponential mean averaged (EMA) weights ## Load a pipeline > [!TIP] > Skip to the [DiffusionPipeline explained](#diffusionpipeline-explained) section if you're interested in an explanation about how the [`DiffusionPipeline`] class works. There are two ways to load a pipeline for a task: 1. Load the generic [`DiffusionPipeline`] class and allow it to automatically detect the correct pipeline class from the checkpoint. 2. Load a specific pipeline class for a specific task. <hfoptions id="pipelines"> <hfoption id="generic pipeline"> The [`DiffusionPipeline`] class is a simple and generic way to load the latest trending diffusion model from the [Hub](https://huggingface.co/models?library=diffusers&sort=trending). It uses the [`~DiffusionPipeline.from_pretrained`] method to automatically detect the correct pipeline class for a task from the checkpoint, downloads and caches all the required configuration and weight files, and returns a pipeline ready for inference. ```python from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` This same checkpoint can also be used for an image-to-image task. The [`DiffusionPipeline`] class can handle any task as long as you provide the appropriate inputs. For example, for an image-to-image task, you need to pass an initial image to the pipeline. ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=init_image).images[0] ``` </hfoption> <hfoption id="specific pipeline"> Checkpoints can be loaded by their specific pipeline class if you already know it. For example, to load a Stable Diffusion model, use the [`StableDiffusionPipeline`] class. ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` This same checkpoint may also be used for another task like image-to-image. To differentiate what task you want to use the checkpoint for, you have to use the corresponding task-specific pipeline class. For example, to use the same checkpoint for image-to-image, use the [`StableDiffusionImg2ImgPipeline`] class. ```py from diffusers import StableDiffusionImg2ImgPipeline pipeline = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` </hfoption> </hfoptions> Use the Space below to gauge a pipeline's memory requirements before you download and load it to see if it runs on your hardware. <div class="block dark:hidden"> <iframe src="https://diffusers-compute-pipeline-size.hf.space?__theme=light" width="850" height="1600" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://diffusers-compute-pipeline-size.hf.space?__theme=dark" width="850" height="1600" ></iframe> </div> ### Specifying Component-Specific Data Types You can customize the data types for individual sub-models by passing a dictionary to the `torch_dtype` parameter. This allows you to load different components of a pipeline in different floating point precisions. For instance, if you want to load the transformer with `torch.bfloat16` and all other components with `torch.float16`, you can pass a dictionary mapping: ```python from diffusers import HunyuanVideoPipeline import torch pipe = HunyuanVideoPipeline.from_pretrained( "hunyuanvideo-community/HunyuanVideo", torch_dtype={"transformer": torch.bfloat16, "default": torch.float16}, ) print(pipe.transformer.dtype, pipe.vae.dtype) # (torch.bfloat16, torch.float16) ``` If a component is not explicitly specified in the dictionary and no `default` is provided, it will be loaded with `torch.float32`. ### Parallel loading Large models are often [sharded](../training/distributed_inference#model-sharding) into smaller files so that they are easier to load. Diffusers supports loading shards in parallel to speed up the loading process. Set the environment variables below to enable parallel loading. - Set `HF_ENABLE_PARALLEL_LOADING` to `"YES"` to enable parallel loading of shards. - Set `HF_PARALLEL_LOADING_WORKERS` to configure the number of parallel threads to use when loading shards. More workers loads a model faster but uses more memory. The `device_map` argument should be set to `"cuda"` to pre-allocate a large chunk of memory based on the model size. This substantially reduces model load time because warming up the memory allocator now avoids many smaller calls to the allocator later. ```py import os import torch from diffusers import DiffusionPipeline os.environ["HF_ENABLE_PARALLEL_LOADING"] = "YES" pipeline = DiffusionPipeline.from_pretrained( "Wan-AI/Wan2.2-I2V-A14B-Diffusers", torch_dtype=torch.bfloat16, device_map="cuda" ) ``` ### Local pipeline To load a pipeline locally, use [git-lfs](https://git-lfs.github.com/) to manually download a checkpoint to your local disk. ```bash git-lfs install git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` This creates a local folder, ./stable-diffusion-v1-5, on your disk and you should pass its path to [`~DiffusionPipeline.from_pretrained`]. ```python from diffusers import DiffusionPipeline stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` The [`~DiffusionPipeline.from_pretrained`] method won't download files from the Hub when it detects a local path, but this also means it won't download and cache the latest changes to a checkpoint. ## Customize a pipeline You can customize a pipeline by loading different components into it. This is important because you can: - change to a scheduler with faster generation speed or higher generation quality depending on your needs (call the `scheduler.compatibles` method on your pipeline to see compatible schedulers) - change a default pipeline component to a newer and better performing one For example, let's customize the default [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0) checkpoint with: - The [`HeunDiscreteScheduler`] to generate higher quality images at the expense of slower generation speed. You must pass the `subfolder="scheduler"` parameter in [`~HeunDiscreteScheduler.from_pretrained`] to load the scheduler configuration into the correct [subfolder](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/scheduler) of the pipeline repository. - A more stable VAE that runs in fp16. ```py from diffusers import StableDiffusionXLPipeline, HeunDiscreteScheduler, AutoencoderKL import torch scheduler = HeunDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler") vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) ``` Now pass the new scheduler and VAE to the [`StableDiffusionXLPipeline`]. ```py pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", scheduler=scheduler, vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") ``` ## Reuse a pipeline When you load multiple pipelines that share the same model components, it makes sense to reuse the shared components instead of reloading everything into memory again, especially if your hardware is memory-constrained. For example: 1. You generated an image with the [`StableDiffusionPipeline`] but you want to improve its quality with the [`StableDiffusionSAGPipeline`]. Both of these pipelines share the same pretrained model, so it'd be a waste of memory to load the same model twice. 2. You want to add a model component, like a [`MotionAdapter`](../api/pipelines/animatediff#animatediffpipeline), to [`AnimateDiffPipeline`] which was instantiated from an existing [`StableDiffusionPipeline`]. Again, both pipelines share the same pretrained model, so it'd be a waste of memory to load an entirely new pipeline again. With the [`DiffusionPipeline.from_pipe`] API, you can switch between multiple pipelines to take advantage of their different features without increasing memory-usage. It is similar to turning on and off a feature in your pipeline. > [!TIP] > To switch between tasks (rather than features), use the [`~DiffusionPipeline.from_pipe`] method with the [AutoPipeline](../api/pipelines/auto_pipeline) class, which automatically identifies the pipeline class based on the task (learn more in the [AutoPipeline](../tutorials/autopipeline) tutorial). Let's start with a [`StableDiffusionPipeline`] and then reuse the loaded model components to create a [`StableDiffusionSAGPipeline`] to increase generation quality. You'll use the [`StableDiffusionPipeline`] with an [IP-Adapter](./ip_adapter) to generate a bear eating pizza. ```python from diffusers import DiffusionPipeline, StableDiffusionSAGPipeline import torch import gc from diffusers.utils import load_image from accelerate.utils import compute_module_sizes image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png") pipe_sd = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", torch_dtype=torch.float16) pipe_sd.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipe_sd.set_ip_adapter_scale(0.6) pipe_sd.to("cuda") generator = torch.Generator(device="cpu").manual_seed(33) out_sd = pipe_sd( prompt="bear eats pizza", negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", ip_adapter_image=image, num_inference_steps=50, generator=generator, ).images[0] out_sd ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/from_pipe_out_sd_0.png"/> </div> For reference, you can check how much memory this process consumed. ```python def bytes_to_giga_bytes(bytes): return bytes / 1024 / 1024 / 1024 print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") "Max memory allocated: 4.406213283538818 GB" ``` Now, reuse the same pipeline components from [`StableDiffusionPipeline`] in [`StableDiffusionSAGPipeline`] with the [`~DiffusionPipeline.from_pipe`] method. > [!WARNING] > Some pipeline methods may not function properly on new pipelines created with [`~DiffusionPipeline.from_pipe`]. For instance, the [`~DiffusionPipeline.enable_model_cpu_offload`] method installs hooks on the model components based on a unique offloading sequence for each pipeline. If the models are executed in a different order in the new pipeline, the CPU offloading may not work correctly. > > To ensure everything works as expected, we recommend re-applying a pipeline method on a new pipeline created with [`~DiffusionPipeline.from_pipe`]. ```python pipe_sag = StableDiffusionSAGPipeline.from_pipe( pipe_sd ) generator = torch.Generator(device="cpu").manual_seed(33) out_sag = pipe_sag( prompt="bear eats pizza", negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", ip_adapter_image=image, num_inference_steps=50, generator=generator, guidance_scale=1.0, sag_scale=0.75 ).images[0] out_sag ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/from_pipe_out_sag_1.png"/> </div> If you check the memory usage, you'll see it remains the same as before because [`StableDiffusionPipeline`] and [`StableDiffusionSAGPipeline`] are sharing the same pipeline components. This allows you to use them interchangeably without any additional memory overhead. ```py print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") "Max memory allocated: 4.406213283538818 GB" ``` Let's animate the image with the [`AnimateDiffPipeline`] and also add a [`MotionAdapter`] module to the pipeline. For the [`AnimateDiffPipeline`], you need to unload the IP-Adapter first and reload it *after* you've created your new pipeline (this only applies to the [`AnimateDiffPipeline`]). ```py from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler from diffusers.utils import export_to_gif pipe_sag.unload_ip_adapter() adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) pipe_animate = AnimateDiffPipeline.from_pipe(pipe_sd, motion_adapter=adapter) pipe_animate.scheduler = DDIMScheduler.from_config(pipe_animate.scheduler.config, beta_schedule="linear") # load IP-Adapter and LoRA weights again pipe_animate.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipe_animate.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") pipe_animate.to("cuda") generator = torch.Generator(device="cpu").manual_seed(33) pipe_animate.set_adapters("zoom-out", adapter_weights=0.75) out = pipe_animate( prompt="bear eats pizza", num_frames=16, num_inference_steps=50, ip_adapter_image=image, generator=generator, ).frames[0] export_to_gif(out, "out_animate.gif") ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/from_pipe_out_animate_3.gif"/> </div> The [`AnimateDiffPipeline`] is more memory-intensive and consumes 15GB of memory (see the [Memory-usage of from_pipe](#memory-usage-of-from_pipe) section to learn what this means for your memory-usage). ```py print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") "Max memory allocated: 15.178664207458496 GB" ``` ### Modify from_pipe components Pipelines loaded with [`~DiffusionPipeline.from_pipe`] can be customized with different model components or methods. However, whenever you modify the *state* of the model components, it affects all the other pipelines that share the same components. For example, if you call [`~diffusers.loaders.IPAdapterMixin.unload_ip_adapter`] on the [`StableDiffusionSAGPipeline`], you won't be able to use IP-Adapter with the [`StableDiffusionPipeline`] because it's been removed from their shared components. ```py pipe.sag_unload_ip_adapter() generator = torch.Generator(device="cpu").manual_seed(33) out_sd = pipe_sd( prompt="bear eats pizza", negative_prompt="wrong white balance, dark, sketches,worst quality,low quality", ip_adapter_image=image, num_inference_steps=50, generator=generator, ).images[0] "AttributeError: 'NoneType' object has no attribute 'image_projection_layers'" ``` ### Memory usage of from_pipe The memory requirement of loading multiple pipelines with [`~DiffusionPipeline.from_pipe`] is determined by the pipeline with the highest memory-usage regardless of the number of pipelines you create. | Pipeline | Memory usage (GB) | |---|---| | StableDiffusionPipeline | 4.400 | | StableDiffusionSAGPipeline | 4.400 | | AnimateDiffPipeline | 15.178 | The [`AnimateDiffPipeline`] has the highest memory requirement, so the *total memory-usage* is based only on the [`AnimateDiffPipeline`]. Your memory-usage will not increase if you create additional pipelines as long as their memory requirements doesn't exceed that of the [`AnimateDiffPipeline`]. Each pipeline can be used interchangeably without any additional memory overhead. ## Safety checker Diffusers implements a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) for Stable Diffusion models which can generate harmful content. The safety checker screens the generated output against known hardcoded not-safe-for-work (NSFW) content. If for whatever reason you'd like to disable the safety checker, pass `safety_checker=None` to the [`~DiffusionPipeline.from_pretrained`] method. ```python from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, use_safetensors=True) """ You have disabled the safety checker for <class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline'> by passing `safety_checker=None`. Ensure that you abide by the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend keeping the safety filter enabled in all public-facing circumstances, disabling it only for use cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 . """ ``` ## Checkpoint variants A checkpoint variant is usually a checkpoint whose weights are: - Stored in a different floating point type, such as [torch.float16](https://pytorch.org/docs/stable/tensors.html#data-types), because it only requires half the bandwidth and storage to download. You can't use this variant if you're continuing training or using a CPU. - Non-exponential mean averaged (EMA) weights which shouldn't be used for inference. You should use this variant to continue finetuning a model. > [!TIP] > When the checkpoints have identical model structures, but they were trained on different datasets and with a different training setup, they should be stored in separate repositories. For example, [stabilityai/stable-diffusion-2](https://hf.co/stabilityai/stable-diffusion-2) and [stabilityai/stable-diffusion-2-1](https://hf.co/stabilityai/stable-diffusion-2-1) are stored in separate repositories. Otherwise, a variant is **identical** to the original checkpoint. They have exactly the same serialization format (like [safetensors](./using_safetensors)), model structure, and their weights have identical tensor shapes. | **checkpoint type** | **weight name** | **argument for loading weights** | |---------------------|---------------------------------------------|----------------------------------| | original | diffusion_pytorch_model.safetensors | | | floating point | diffusion_pytorch_model.fp16.safetensors | `variant`, `torch_dtype` | | non-EMA | diffusion_pytorch_model.non_ema.safetensors | `variant` | There are two important arguments for loading variants: - `torch_dtype` specifies the floating point precision of the loaded checkpoint. For example, if you want to save bandwidth by loading a fp16 variant, you should set `variant="fp16"` and `torch_dtype=torch.float16` to *convert the weights* to fp16. Otherwise, the fp16 weights are converted to the default fp32 precision. If you only set `torch_dtype=torch.float16`, the default fp32 weights are downloaded first and then converted to fp16. - `variant` specifies which files should be loaded from the repository. For example, if you want to load a non-EMA variant of a UNet from [stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main/unet), set `variant="non_ema"` to download the `non_ema` file. <hfoptions id="variants"> <hfoption id="fp16"> ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True ) ``` </hfoption> <hfoption id="non-EMA"> ```py pipeline = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema", use_safetensors=True ) ``` </hfoption> </hfoptions> Use the `variant` parameter in the [`DiffusionPipeline.save_pretrained`] method to save a checkpoint as a different floating point type or as a non-EMA variant. You should try save a variant to the same folder as the original checkpoint, so you have the option of loading both from the same folder. <hfoptions id="save"> <hfoption id="fp16"> ```python from diffusers import DiffusionPipeline pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="fp16") ``` </hfoption> <hfoption id="non_ema"> ```py pipeline.save_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", variant="non_ema") ``` </hfoption> </hfoptions> If you don't save the variant to an existing folder, you must specify the `variant` argument otherwise it'll throw an `Exception` because it can't find the original checkpoint. ```python # 👎 this won't work pipeline = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) # 👍 this works pipeline = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16, use_safetensors=True ) ``` ## DiffusionPipeline explained As a class method, [`DiffusionPipeline.from_pretrained`] is responsible for two things: - Download the latest version of the folder structure required for inference and cache it. If the latest folder structure is available in the local cache, [`DiffusionPipeline.from_pretrained`] reuses the cache and won't redownload the files. - Load the cached weights into the correct pipeline [class](../api/pipelines/overview#diffusers-summary) - retrieved from the `model_index.json` file - and return an instance of it. The pipelines' underlying folder structure corresponds directly with their class instances. For example, the [`StableDiffusionPipeline`] corresponds to the folder structure in [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5). ```python from diffusers import DiffusionPipeline repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(repo_id, use_safetensors=True) print(pipeline) ``` You'll see pipeline is an instance of [`StableDiffusionPipeline`], which consists of seven components: - `"feature_extractor"`: a [`~transformers.CLIPImageProcessor`] from 🤗 Transformers. - `"safety_checker"`: a [component](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) for screening against harmful content. - `"scheduler"`: an instance of [`PNDMScheduler`]. - `"text_encoder"`: a [`~transformers.CLIPTextModel`] from 🤗 Transformers. - `"tokenizer"`: a [`~transformers.CLIPTokenizer`] from 🤗 Transformers. - `"unet"`: an instance of [`UNet2DConditionModel`]. - `"vae"`: an instance of [`AutoencoderKL`]. ```json StableDiffusionPipeline { "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` Compare the components of the pipeline instance to the [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main) folder structure, and you'll see there is a separate folder for each of the components in the repository: ``` . ├── feature_extractor │   └── preprocessor_config.json ├── model_index.json ├── safety_checker │   ├── config.json | ├── model.fp16.safetensors │ ├── model.safetensors │ ├── pytorch_model.bin | └── pytorch_model.fp16.bin ├── scheduler │   └── scheduler_config.json ├── text_encoder │   ├── config.json | ├── model.fp16.safetensors │ ├── model.safetensors │ |── pytorch_model.bin | └── pytorch_model.fp16.bin ├── tokenizer │   ├── merges.txt │   ├── special_tokens_map.json │   ├── tokenizer_config.json │   └── vocab.json ├── unet │   ├── config.json │   ├── diffusion_pytorch_model.bin | |── diffusion_pytorch_model.fp16.bin │ |── diffusion_pytorch_model.f16.safetensors │ |── diffusion_pytorch_model.non_ema.bin │ |── diffusion_pytorch_model.non_ema.safetensors │ └── diffusion_pytorch_model.safetensors |── vae . ├── config.json . ├── diffusion_pytorch_model.bin ├── diffusion_pytorch_model.fp16.bin ├── diffusion_pytorch_model.fp16.safetensors └── diffusion_pytorch_model.safetensors ``` You can access each of the components of the pipeline as an attribute to view its configuration: ```py pipeline.tokenizer CLIPTokenizer( name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", vocab_size=49408, model_max_length=77, is_fast=False, padding_side="right", truncation_side="right", special_tokens={ "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "pad_token": "<|endoftext|>", }, clean_up_tokenization_spaces=True ) ``` Every pipeline expects a [`model_index.json`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/model_index.json) file that tells the [`DiffusionPipeline`]: - which pipeline class to load from `_class_name` - which version of 🧨 Diffusers was used to create the model in `_diffusers_version` - what components from which library are stored in the subfolders (`name` corresponds to the component and subfolder name, `library` corresponds to the name of the library to load the class from, and `class` corresponds to the class name) ```json { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.6.0", "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ```
diffusers/docs/source/en/using-diffusers/loading.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/loading.md", "repo_id": "diffusers", "token_count": 9164 }
124
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual Inversion [Textual Inversion](https://huggingface.co/papers/2208.01618) is a method for generating personalized images of a concept. It works by fine-tuning a models word embeddings on 3-5 images of the concept (for example, pixel art) that is associated with a unique token (`<sks>`). This allows you to use the `<sks>` token in your prompt to trigger the model to generate pixel art images. Textual Inversion weights are very lightweight and typically only a few KBs because they're only word embeddings. However, this also means the word embeddings need to be loaded after loading a model with [`~DiffusionPipeline.from_pretrained`]. ```py import torch from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") ``` Load the word embeddings with [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] and include the unique token in the prompt to activate its generation. ```py pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration, <gta5-artwork> style" pipeline(prompt).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_txt_embed.png" /> </div> Textual Inversion can also be trained to learn *negative embeddings* to steer generation away from unwanted characteristics such as "blurry" or "ugly". It is useful for improving image quality. EasyNegative is a widely used negative embedding that contains multiple learned negative concepts. Load the negative embeddings and specify the file name and token associated with the negative embeddings. Pass the token to `negative_prompt` in your pipeline to activate it. ```py import torch from diffusers import AutoPipelineForText2Image pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") pipeline.load_textual_inversion( "EvilEngine/easynegative", weight_name="easynegative.safetensors", token="easynegative" ) prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration" negative_prompt = "easynegative" pipeline(prompt, negative_prompt).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png" /> </div>
diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md", "repo_id": "diffusers", "token_count": 945 }
125
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 철학 [[philosophy]] 🧨 Diffusers는 다양한 모달리티에서 **최신의** 사전 훈련된 diffusion 모델을 제공합니다. 그 목적은 추론과 훈련을 위한 **모듈식 툴박스**로 사용되는 것입니다. 저희는 시간이 지나도 변치 않는 라이브러리를 구축하는 것을 목표로 하기에 API 설계를 매우 중요하게 생각합니다. 간단히 말해서, Diffusers는 PyTorch를 자연스럽게 확장할 수 있도록 만들어졌습니다. 따라서 대부분의 설계 선택은 [PyTorch의 설계 원칙](https://pytorch.org/docs/stable/community/design.html#pytorch-design-philosophy)에 기반합니다. 이제 가장 중요한 것들을 살펴보겠습니다: ## 성능보다는 사용성을 [[usability-over-performance]] - Diffusers는 다양한 성능 향상 기능이 내장되어 있지만 (자세한 내용은 [메모리와 속도](https://huggingface.co/docs/diffusers/optimization/fp16) 참조), 모델은 항상 가장 높은 정밀도와 최소한의 최적화로 로드됩니다. 따라서 사용자가 별도로 정의하지 않는 한 기본적으로 diffusion 파이프라인은 항상 float32 정밀도로 CPU에 인스턴스화됩니다. 이는 다양한 플랫폼과 가속기에서의 사용성을 보장하며, 라이브러리를 실행하기 위해 복잡한 설치가 필요하지 않다는 것을 의미합니다. - Diffusers는 **가벼운** 패키지를 지향하기 때문에 필수 종속성은 거의 없지만 성능을 향상시킬 수 있는 많은 선택적 종속성이 있습니다 (`accelerate`, `safetensors`, `onnx` 등). 저희는 라이브러리를 가능한 한 가볍게 유지하여 다른 패키지에 대한 종속성 걱정이 없도록 노력하고 있습니다. - Diffusers는 간결하고 이해하기 쉬운 코드를 선호합니다. 이는 람다 함수나 고급 PyTorch 연산자와 같은 압축된 코드 구문을 자주 사용하지 않는 것을 의미합니다. ## 쉬움보다는 간단함을 [[simple-over-easy]] PyTorch에서는 **명시적인 것이 암시적인 것보다 낫다**와 **단순한 것이 복잡한 것보다 낫다**라고 말합니다. 이 설계 철학은 라이브러리의 여러 부분에 반영되어 있습니다: - [`DiffusionPipeline.to`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.to)와 같은 메소드를 사용하여 사용자가 장치 관리를 할 수 있도록 PyTorch의 API를 따릅니다. - 잘못된 입력을 조용히 수정하는 대신 간결한 오류 메시지를 발생시키는 것이 우선입니다. Diffusers는 라이브러리를 가능한 한 쉽게 사용할 수 있도록 하는 것보다 사용자를 가르치는 것을 목표로 합니다. - 복잡한 모델과 스케줄러 로직이 내부에서 마법처럼 처리하는 대신 노출됩니다. 스케줄러/샘플러는 서로에게 최소한의 종속성을 가지고 분리되어 있습니다. 이로써 사용자는 언롤된 노이즈 제거 루프를 작성해야 합니다. 그러나 이 분리는 디버깅을 더 쉽게하고 노이즈 제거 과정을 조정하거나 diffusers 모델이나 스케줄러를 교체하는 데 사용자에게 더 많은 제어권을 제공합니다. - diffusers 파이프라인의 따로 훈련된 구성 요소인 text encoder, unet 및 variational autoencoder는 각각 자체 모델 클래스를 갖습니다. 이로써 사용자는 서로 다른 모델의 구성 요소 간의 상호 작용을 처리해야 하며, 직렬화 형식은 모델 구성 요소를 다른 파일로 분리합니다. 그러나 이는 디버깅과 커스터마이징을 더 쉽게합니다. DreamBooth나 Textual Inversion 훈련은 Diffusers의 'diffusion 파이프라인의 단일 구성 요소들을 분리할 수 있는 능력' 덕분에 매우 간단합니다. ## 추상화보다는 수정 가능하고 기여하기 쉬움을 [[tweakable-contributor-friendly-over-abstraction]] 라이브러리의 대부분에 대해 Diffusers는 [Transformers 라이브러리](https://github.com/huggingface/transformers)의 중요한 설계 원칙을 채택합니다, 바로 성급한 추상화보다는 copy-pasted 코드를 선호한다는 것입니다. 이 설계 원칙은 [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself)와 같은 인기 있는 설계 원칙과는 대조적으로 매우 의견이 분분한데요. 간단히 말해서, Transformers가 모델링 파일에 대해 수행하는 것처럼, Diffusers는 매우 낮은 수준의 추상화와 매우 독립적인 코드를 유지하는 것을 선호합니다. 함수, 긴 코드 블록, 심지어 클래스도 여러 파일에 복사할 수 있으며, 이는 처음에는 라이브러리를 유지할 수 없게 만드는 나쁜, 서투른 설계 선택으로 보일 수 있습니다. 하지만 이러한 설계는 매우 성공적이며, 커뮤니티 기반의 오픈 소스 기계 학습 라이브러리에 매우 적합합니다. 그 이유는 다음과 같습니다: - 기계 학습은 패러다임, 모델 아키텍처 및 알고리즘이 빠르게 변화하는 매우 빠르게 움직이는 분야이기 때문에 오랜 기간 지속되는 코드 추상화를 정의하기가 매우 어렵습니다. - 기계 학습 전문가들은 아이디어와 연구를 위해 기존 코드를 빠르게 조정할 수 있어야 하므로, 많은 추상화보다는 독립적인 코드를 선호합니다. - 오픈 소스 라이브러리는 커뮤니티 기여에 의존하므로, 기여하기 쉬운 라이브러리를 구축해야 합니다. 코드가 추상화되면 의존성이 많아지고 읽기 어렵고 기여하기 어려워집니다. 기여자들은 중요한 기능을 망가뜨릴까 두려워하여 매우 추상화된 라이브러리에 기여하지 않게 됩니다. 라이브러리에 기여하는 것이 다른 기본 코드를 망가뜨릴 수 없다면, 잠재적인 새로운 기여자에게 더욱 환영받을 수 있을 뿐만 아니라 여러 부분에 대해 병렬적으로 검토하고 기여하기가 더 쉬워집니다. Hugging Face에서는 이 설계를 **단일 파일 정책**이라고 부르며, 특정 클래스의 대부분의 코드가 단일하고 독립적인 파일에 작성되어야 한다는 의미입니다. 철학에 대해 자세히 알아보려면 [이 블로그 글](https://huggingface.co/blog/transformers-design-philosophy)을 참조할 수 있습니다. Diffusers에서는 이러한 철학을 파이프라인과 스케줄러에 모두 따르지만, diffusion 모델에 대해서는 일부만 따릅니다. 일부만 따르는 이유는 Diffusion 파이프라인인 [DDPM](https://huggingface.co/docs/diffusers/api/pipelines/ddpm), [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview#stable-diffusion-pipelines), [unCLIP (DALL·E 2)](https://huggingface.co/docs/diffusers/api/pipelines/unclip) 및 [Imagen](https://imagen.research.google/) 등 대부분의 diffusion 파이프라인은 동일한 diffusion 모델인 [UNet](https://huggingface.co/docs/diffusers/api/models/unet2d-cond)에 의존하기 때문입니다. 좋아요, 이제 🧨 Diffusers가 설계된 방식을 대략적으로 이해했을 것입니다 🤗. 우리는 이러한 설계 원칙을 일관되게 라이브러리 전체에 적용하려고 노력하고 있습니다. 그럼에도 불구하고 철학에 대한 일부 예외 사항이나 불행한 설계 선택이 있을 수 있습니다. 디자인에 대한 피드백이 있다면 [GitHub에서 직접](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=) 알려주시면 감사하겠습니다. ## 디자인 철학 자세히 알아보기 [[design-philosophy-in-details]] 이제 디자인 철학의 세부 사항을 좀 더 자세히 살펴보겠습니다. Diffusers는 주로 세 가지 주요 클래스로 구성됩니다: [파이프라인](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines), [모델](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models), 그리고 [스케줄러](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). 각 클래스에 대한 더 자세한 설계 결정 사항을 살펴보겠습니다. ### 파이프라인 [[pipelines]] 파이프라인은 사용하기 쉽도록 설계되었으며 (따라서 [*쉬움보다는 간단함을*](#쉬움보다는-간단함을)을 100% 따르지는 않음), feature-complete하지 않으며, 추론을 위한 [모델](#모델)과 [스케줄러](#스케줄러)를 사용하는 방법의 예시로 간주될 수 있습니다. 다음과 같은 설계 원칙을 따릅니다: - 파이프라인은 단일 파일 정책을 따릅니다. 모든 파이프라인은 src/diffusers/pipelines의 개별 디렉토리에 있습니다. 하나의 파이프라인 폴더는 하나의 diffusion 논문/프로젝트/릴리스에 해당합니다. 여러 파이프라인 파일은 하나의 파이프라인 폴더에 모을 수 있습니다. 예를 들어 [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion)에서 그렇게 하고 있습니다. 파이프라인이 유사한 기능을 공유하는 경우, [# Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251)을 사용할 수 있습니다. - 파이프라인은 모두 [`DiffusionPipeline`]을 상속합니다. - 각 파이프라인은 서로 다른 모델 및 스케줄러 구성 요소로 구성되어 있으며, 이는 [`model_index.json` 파일](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/model_index.json)에 문서화되어 있으며, 파이프라인의 속성 이름과 동일한 이름으로 액세스할 수 있으며, [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) 함수를 통해 파이프라인 간에 공유할 수 있습니다. - 각 파이프라인은 [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) 함수를 통해 로드할 수 있어야 합니다. - 파이프라인은 추론에**만** 사용되어야 합니다. - 파이프라인은 매우 가독성이 좋고, 이해하기 쉽고, 쉽게 조정할 수 있도록 설계되어야 합니다. - 파이프라인은 서로 상호작용하고, 상위 수준 API에 쉽게 통합할 수 있도록 설계되어야 합니다. - 파이프라인은 사용자 인터페이스가 feature-complete하지 않게 하는 것을 목표로 합니다. future-complete한 사용자 인터페이스를 원한다면 [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), [lama-cleaner](https://github.com/Sanster/lama-cleaner)를 참조해야 합니다. - 모든 파이프라인은 오로지 `__call__` 메소드를 통해 실행할 수 있어야 합니다. `__call__` 인자의 이름은 모든 파이프라인에서 공유되어야 합니다. - 파이프라인은 해결하고자 하는 작업의 이름으로 지정되어야 합니다. - 대부분의 경우에 새로운 diffusion 파이프라인은 새로운 파이프라인 폴더/파일에 구현되어야 합니다. ### 모델 [[models]] 모델은 [PyTorch의 Module 클래스](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)의 자연스러운 확장이 되도록, 구성 가능한 툴박스로 설계되었습니다. 그리고 모델은 **단일 파일 정책**을 일부만 따릅니다. 다음과 같은 설계 원칙을 따릅니다: - 모델은 **모델 아키텍처 유형**에 해당합니다. 예를 들어 [`UNet2DConditionModel`] 클래스는 2D 이미지 입력을 기대하고 일부 context에 의존하는 모든 UNet 변형들에 사용됩니다. - 모든 모델은 [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models)에서 찾을 수 있으며, 각 모델 아키텍처는 해당 파일에 정의되어야 합니다. 예를 들어 [`unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py), [`transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformer_2d.py) 등이 있습니다. - 모델은 **단일 파일 정책**을 따르지 않으며, [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py) 등과 같은 작은 모델 구성 요소를 사용해야 합니다. **참고**: 이는 Transformers의 모델링 파일과는 대조적으로 모델이 실제로 단일 파일 정책을 따르지 않음을 보여줍니다. - 모델은 PyTorch의 `Module` 클래스와 마찬가지로 복잡성을 노출하고 명확한 오류 메시지를 제공해야 합니다. - 모든 모델은 `ModelMixin`과 `ConfigMixin`을 상속합니다. - 모델은 주요 코드 변경이 필요하지 않고, 역호환성을 유지하며, 메모리 또는 컴퓨팅과 관련한 중요한 이득을 제공할 때 성능을 위해 최적화할 수 있습니다. - 모델은 기본적으로 가장 높은 정밀도와 가장 낮은 성능 설정을 가져야 합니다. - Diffusers에 이미 있는 모델 아키텍처로 분류할 수 있는 새로운 모델 체크포인트를 통합할 때는 기존 모델 아키텍처를 새로운 체크포인트와 호환되도록 수정해야 합니다. 새로운 파일을 만들어야 하는 경우는 모델 아키텍처가 근본적으로 다른 경우에만 해당합니다. - 모델은 미래의 변경 사항을 쉽게 확장할 수 있도록 설계되어야 합니다. 이는 공개 함수 인수들과 구성 인수들을 제한하고,미래의 변경 사항을 "예상"하는 것을 통해 달성할 수 있습니다. 예를 들어, 불리언 `is_..._type` 인수보다는 새로운 미래 유형에 쉽게 확장할 수 있는 문자열 "...type" 인수를 추가하는 것이 일반적으로 더 좋습니다. 새로운 모델 체크포인트가 작동하도록 하기 위해 기존 아키텍처에 최소한의 변경만을 가해야 합니다. - 모델 디자인은 코드의 가독성과 간결성을 유지하는 것과 많은 모델 체크포인트를 지원하는 것 사이의 어려운 균형 조절입니다. 모델링 코드의 대부분은 새로운 모델 체크포인트를 위해 클래스를 수정하는 것이 좋지만, [UNet 블록](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py) 및 [Attention 프로세서](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py)와 같이 코드를 장기적으로 간결하고 읽기 쉽게 유지하기 위해 새로운 클래스를 추가하는 예외도 있습니다. ### 스케줄러 [[schedulers]] 스케줄러는 추론을 위한 노이즈 제거 과정을 안내하고 훈련을 위한 노이즈 스케줄을 정의하는 역할을 합니다. 스케줄러는 개별 클래스로 설계되어 있으며, 로드 가능한 구성 파일과 **단일 파일 정책**을 엄격히 따릅니다. 다음과 같은 설계 원칙을 따릅니다: - 모든 스케줄러는 [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers)에서 찾을 수 있습니다. - 스케줄러는 큰 유틸리티 파일에서 가져오지 **않아야** 하며, 자체 포함성을 유지해야 합니다. - 하나의 스케줄러 Python 파일은 하나의 스케줄러 알고리즘(논문에서 정의된 것과 같은)에 해당합니다. - 스케줄러가 유사한 기능을 공유하는 경우, `# Copied from` 메커니즘을 사용할 수 있습니다. - 모든 스케줄러는 `SchedulerMixin`과 `ConfigMixin`을 상속합니다. - [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) 메소드를 사용하여 스케줄러를 쉽게 교체할 수 있습니다. 자세한 내용은 [여기](../using-diffusers/schedulers.md)에서 설명합니다. - 모든 스케줄러는 `set_num_inference_steps`와 `step` 함수를 가져야 합니다. `set_num_inference_steps(...)`는 각 노이즈 제거 과정(즉, `step(...)`이 호출되기 전) 이전에 호출되어야 합니다. - 각 스케줄러는 모델이 호출될 타임스텝의 배열인 `timesteps` 속성을 통해 루프를 돌 수 있는 타임스텝을 노출합니다. - `step(...)` 함수는 예측된 모델 출력과 "현재" 샘플(x_t)을 입력으로 받고, "이전" 약간 더 노이즈가 제거된 샘플(x_t-1)을 반환합니다. - 노이즈 제거 스케줄러의 복잡성을 고려하여, `step` 함수는 모든 복잡성을 노출하지 않으며, "블랙 박스"일 수 있습니다. - 거의 모든 경우에 새로운 스케줄러는 새로운 스케줄링 파일에 구현되어야 합니다.
diffusers/docs/source/ko/conceptual/philosophy.md/0
{ "file_path": "diffusers/docs/source/ko/conceptual/philosophy.md", "repo_id": "diffusers", "token_count": 12970 }
126
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) (ControlNet)은 Lvmin Zhang과 Maneesh Agrawala에 의해 쓰여졌습니다. 이 예시는 [원본 ControlNet 리포지토리에서 예시 학습하기](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md)에 기반합니다. ControlNet은 원들을 채우기 위해 [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k)을 사용해서 학습됩니다. ## 의존성 설치하기 아래의 스크립트를 실행하기 전에, 라이브러리의 학습 의존성을 설치해야 합니다. <Tip warning={true}> 가장 최신 버전의 예시 스크립트를 성공적으로 실행하기 위해서는, 소스에서 설치하고 최신 버전의 설치를 유지하는 것을 강력하게 추천합니다. 우리는 예시 스크립트들을 자주 업데이트하고 예시에 맞춘 특정한 요구사항을 설치합니다. </Tip> 위 사항을 만족시키기 위해서, 새로운 가상환경에서 다음 일련의 스텝을 실행하세요: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` 그 다음에는 [예시 폴더](https://github.com/huggingface/diffusers/tree/main/examples/controlnet)으로 이동합니다. ```bash cd examples/controlnet ``` 이제 실행하세요: ```bash pip install -r requirements.txt ``` [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화 합니다: ```bash accelerate config ``` 혹은 여러분의 환경이 무엇인지 몰라도 기본적인 🤗Accelerate 구성으로 초기화할 수 있습니다: ```bash accelerate config default ``` 혹은 당신의 환경이 노트북 같은 상호작용하는 쉘을 지원하지 않는다면, 아래의 코드로 초기화 할 수 있습니다: ```python from accelerate.utils import write_basic_config write_basic_config() ``` 자체 데이터셋을 사용하기 위해서는 [학습을 위한 데이터셋 생성하기](create_dataset) 가이드를 확인하세요. ## 학습 이 학습에 사용될 다음 이미지들을 다운로드하세요: ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` `MODEL_NAME` 환경 변수 (Hub 모델 리포지토리 아이디 혹은 모델 가중치가 있는 디렉토리로 가는 주소)를 명시하고 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인자로 환경변수를 보냅니다. 학습 스크립트는 당신의 리포지토리에 `diffusion_pytorch_model.bin` 파일을 생성하고 저장합니다. ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --push_to_hub ``` 이 기본적인 설정으로는 ~38GB VRAM이 필요합니다. 기본적으로 학습 스크립트는 결과를 텐서보드에 기록합니다. 가중치(weight)와 편향(bias)을 사용하기 위해 `--report_to wandb` 를 전달합니다. 더 작은 batch(배치) 크기로 gradient accumulation(기울기 누적)을 하면 학습 요구사항을 ~20 GB VRAM으로 줄일 수 있습니다. ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --push_to_hub ``` ## 여러개 GPU로 학습하기 `accelerate` 은 seamless multi-GPU 학습을 고려합니다. `accelerate`과 함께 분산된 학습을 실행하기 위해 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 의 설명을 확인하세요. 아래는 예시 명령어입니다: ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --mixed_precision="fp16" \ --tracker_project_name="controlnet-demo" \ --report_to=wandb \ --push_to_hub ``` ## 예시 결과 #### 배치 사이즈 8로 300 스텝 이후: | | | |-------------------|:-------------------------:| | | 푸른 배경과 빨간 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![푸른 배경과 빨간 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_300_steps.png) | | | 갈색 꽃 배경과 청록색 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![갈색 꽃 배경과 청록색 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_300_steps.png) | #### 배치 사이즈 8로 6000 스텝 이후: | | | |-------------------|:-------------------------:| | | 푸른 배경과 빨간 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![푸른 배경과 빨간 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_6000_steps.png) | | | 갈색 꽃 배경과 청록색 원 | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![갈색 꽃 배경과 청록색 원](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_6000_steps.png) | ## 16GB GPU에서 학습하기 16GB GPU에서 학습하기 위해 다음의 최적화를 진행하세요: - 기울기 체크포인트 저장하기 - bitsandbyte의 [8-bit optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)가 설치되지 않았다면 링크에 연결된 설명서를 보세요. 이제 학습 스크립트를 시작할 수 있습니다: ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --push_to_hub ``` ## 12GB GPU에서 학습하기 12GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요: - 기울기 체크포인트 저장하기 - bitsandbyte의 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - 기울기를 `None`으로 설정 ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --push_to_hub ``` `pip install xformers`으로 `xformers`을 확실히 설치하고 `enable_xformers_memory_efficient_attention`을 사용하세요. ## 8GB GPU에서 학습하기 우리는 ControlNet을 지원하기 위한 DeepSpeed를 철저하게 테스트하지 않았습니다. 환경설정이 메모리를 저장할 때, 그 환경이 성공적으로 학습했는지를 확정하지 않았습니다. 성공한 학습 실행을 위해 설정을 변경해야 할 가능성이 높습니다. 8GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요: - 기울기 체크포인트 저장하기 - bitsandbyte의 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요) - 기울기를 `None`으로 설정 - DeepSpeed stage 2 변수와 optimizer 없에기 - fp16 혼합 정밀도(precision) [DeepSpeed](https://www.deepspeed.ai/)는 CPU 또는 NVME로 텐서를 VRAM에서 오프로드할 수 있습니다. 이를 위해서 훨씬 더 많은 RAM(약 25 GB)가 필요합니다. DeepSpeed stage 2를 활성화하기 위해서 `accelerate config`로 환경을 구성해야합니다. 구성(configuration) 파일은 이런 모습이어야 합니다: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 4 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED ``` <팁> [문서](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)를 더 많은 DeepSpeed 설정 옵션을 위해 보세요. <팁> 기본 Adam optimizer를 DeepSpeed'의 Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` 으로 바꾸면 상당한 속도 향상을 이룰수 있지만, Pytorch와 같은 버전의 CUDA toolchain이 필요합니다. 8-비트 optimizer는 현재 DeepSpeed와 호환되지 않는 것 같습니다. ```bash export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --mixed_precision fp16 \ --push_to_hub ``` ## 추론 학습된 모델은 [`StableDiffusionControlNetPipeline`]과 함께 실행될 수 있습니다. `base_model_path`와 `controlnet_path` 에 값을 지정하세요 `--pretrained_model_name_or_path` 와 `--output_dir` 는 학습 스크립트에 개별적으로 지정됩니다. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image import torch base_model_path = "path to model" controlnet_path = "path to controlnet" controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16 ) # 더 빠른 스케줄러와 메모리 최적화로 diffusion 프로세스 속도 올리기 pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # xformers가 설치되지 않으면 아래 줄을 삭제하기 pipe.enable_xformers_memory_efficient_attention() pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png") prompt = "pale golden rod circle with old lace background" # 이미지 생성하기 generator = torch.manual_seed(0) image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0] image.save("./output.png") ```
diffusers/docs/source/ko/training/controlnet.md/0
{ "file_path": "diffusers/docs/source/ko/training/controlnet.md", "repo_id": "diffusers", "token_count": 7261 }
127
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided depth-to-image 생성 [[open-in-colab]] [`StableDiffusionDepth2ImgPipeline`]을 사용하면 텍스트 프롬프트와 초기 이미지를 전달하여 새 이미지의 생성을 조절할 수 있습니다. 또한 이미지 구조를 보존하기 위해 `depth_map`을 전달할 수도 있습니다. `depth_map`이 제공되지 않으면 파이프라인은 통합된 [depth-estimation model](https://github.com/isl-org/MiDaS)을 통해 자동으로 깊이를 예측합니다. 먼저 [`StableDiffusionDepth2ImgPipeline`]의 인스턴스를 생성합니다: ```python import torch import requests from PIL import Image from diffusers import StableDiffusionDepth2ImgPipeline pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", torch_dtype=torch.float16, ).to("cuda") ``` 이제 프롬프트를 파이프라인에 전달합니다. 특정 단어가 이미지 생성을 가이드 하는것을 방지하기 위해 `negative_prompt`를 전달할 수도 있습니다: ```python url = "http://images.cocodataset.org/val2017/000000039769.jpg" init_image = Image.open(requests.get(url, stream=True).raw) prompt = "two tigers" n_prompt = "bad, deformed, ugly, bad anatomy" image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0] image ``` | Input | Output | |---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> | 아래의 Spaces를 가지고 놀며 depth map이 있는 이미지와 없는 이미지의 차이가 있는지 확인해 보세요! <iframe src="https://radames-stable-diffusion-depth2img.hf.space" frameborder="0" width="850" height="500" ></iframe>
diffusers/docs/source/ko/using-diffusers/depth2img.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/depth2img.md", "repo_id": "diffusers", "token_count": 1376 }
128
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 프롬프트에 가중치 부여하기 [[open-in-colab]] 텍스트 가이드 기반의 diffusion 모델은 주어진 텍스트 프롬프트를 기반으로 이미지를 생성합니다. 텍스트 프롬프트에는 모델이 생성해야 하는 여러 개념이 포함될 수 있으며 프롬프트의 특정 부분에 가중치를 부여하는 것이 바람직한 경우가 많습니다. Diffusion 모델은 문맥화된 텍스트 임베딩으로 diffusion 모델의 cross attention 레이어를 조절함으로써 작동합니다. ([더 많은 정보를 위한 Stable Diffusion Guide](https://huggingface.co/docs/optimum-neuron/main/en/package_reference/modeling#stable-diffusion)를 참고하세요). 따라서 프롬프트의 특정 부분을 강조하는(또는 강조하지 않는) 간단한 방법은 프롬프트의 관련 부분에 해당하는 텍스트 임베딩 벡터의 크기를 늘리거나 줄이는 것입니다. 이것은 "프롬프트 가중치 부여" 라고 하며, 커뮤니티에서 가장 요구하는 기능입니다.([이곳](https://github.com/huggingface/diffusers/issues/2431)의 issue를 보세요 ). ## Diffusers에서 프롬프트 가중치 부여하는 방법 우리는 `diffusers`의 역할이 다른 프로젝트를 가능하게 하는 필수적인 기능을 제공하는 toolbex라고 생각합니다. [InvokeAI](https://github.com/invoke-ai/InvokeAI) 나 [diffuzers](https://github.com/abhishekkrthakur/diffuzers) 같은 강력한 UI를 구축할 수 있습니다. 프롬프트를 조작하는 방법을 지원하기 위해, `diffusers` 는 [StableDiffusionPipeline](https://huggingface.co/docs/diffusers/v0.18.2/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline)와 같은 많은 파이프라인에 [prompt_embeds](https://huggingface.co/docs/diffusers/v0.14.0/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) 인수를 노출시켜, "prompt-weighted"/축척된 텍스트 임베딩을 파이프라인에 바로 전달할 수 있게 합니다. [Compel 라이브러리](https://github.com/damian0815/compel)는 프롬프트의 일부를 강조하거나 강조하지 않을 수 있는 쉬운 방법을 제공합니다. 임베딩을 직접 준비하는 것 대신 이 방법을 사용하는 것을 강력히 추천합니다. 간단한 예제를 살펴보겠습니다. 다음과 같이 `"공을 갖고 노는 붉은색 고양이"` 이미지를 생성하고 싶습니다: ```py from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) prompt = "a red cat playing with a ball" generator = torch.Generator(device="cpu").manual_seed(33) image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] image ``` 생성된 이미지: ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_0.png) 사진에서 알 수 있듯이, "공"은 이미지에 없습니다. 이 부분을 강조해 볼까요! 먼저 `compel` 라이브러리를 설치해야합니다: ```sh pip install compel ``` 그런 다음에는 `Compel` 오브젝트를 생성합니다: ```py from compel import Compel compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) ``` 이제 `"++"` 를 사용해서 "공" 을 강조해 봅시다: ```py prompt = "a red cat playing with a ball++" ``` 그리고 이 프롬프트를 파이프라인에 바로 전달하지 않고, `compel_proc` 를 사용하여 처리해야합니다: ```py prompt_embeds = compel_proc(prompt) ``` 파이프라인에 `prompt_embeds` 를 바로 전달할 수 있습니다: ```py generator = torch.Generator(device="cpu").manual_seed(33) images = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` 이제 "공"이 있는 그림을 출력할 수 있습니다! ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_1.png) 마찬가지로 `--` 접미사를 단어에 사용하여 문장의 일부를 강조하지 않을 수 있습니다. 한번 시도해 보세요! 즐겨찾는 파이프라인에 `prompt_embeds` 입력이 없는 경우 issue를 새로 만들어주세요. Diffusers 팀은 최대한 대응하려고 노력합니다. Compel 1.1.6 는 textual inversions을 사용하여 단순화하는 유티릴티 클래스를 추가합니다. `DiffusersTextualInversionManager`를 인스턴스화 한 후 이를 Compel init에 전달합니다: ``` textual_inversion_manager = DiffusersTextualInversionManager(pipe) compel = Compel( tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, textual_inversion_manager=textual_inversion_manager) ``` 더 많은 정보를 얻고 싶다면 [compel](https://github.com/damian0815/compel) 라이브러리 문서를 참고하세요.
diffusers/docs/source/ko/using-diffusers/weighted_prompts.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/weighted_prompts.md", "repo_id": "diffusers", "token_count": 3376 }
129
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 安装 在你正在使用的任意深度学习框架中安装 🤗 Diffusers 。 🤗 Diffusers已在Python 3.8+、PyTorch 1.7.0+和Flax上进行了测试。按照下面的安装说明,针对你正在使用的深度学习框架进行安装: - [PyTorch](https://pytorch.org/get-started/locally/) installation instructions. - [Flax](https://flax.readthedocs.io/en/latest/) installation instructions. ## 使用pip安装 你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装 🤗 Diffusers 。 如果你对 Python 虚拟环境不熟悉,可以看看这个[教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). 在虚拟环境中,你可以轻松管理不同的项目,避免依赖项之间的兼容性问题。 首先,在你的项目目录下创建一个虚拟环境: ```bash python -m venv .env ``` 激活虚拟环境: ```bash source .env/bin/activate ``` 现在,你就可以安装 🤗 Diffusers了!使用下边这个命令: **PyTorch** ```bash pip install diffusers["torch"] ``` **Flax** ```bash pip install diffusers["flax"] ``` ## 从源代码安装 在从源代码安装 `diffusers` 之前,确保你已经安装了 `torch` 和 `accelerate`。 `torch`的安装教程可以看 `torch` [文档](https://pytorch.org/get-started/locally/#start-locally). 安装 `accelerate` ```bash pip install accelerate ``` 从源码安装 🤗 Diffusers 需要使用以下命令: ```bash pip install git+https://github.com/huggingface/diffusers ``` 这个命令安装的是最新的 `main`版本,而不是最近的`stable`版。 `main`是一直和最新进展保持一致的。比如,上次发布的正式版中有bug,在`main`中可以看到这个bug被修复了,但是新的正式版此时尚未推出。 但是这也意味着 `main`版本不保证是稳定的。 我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决 如果你遇到了问题,可以提 [Issue](https://github.com/huggingface/transformers/issues),这样我们就能更快修复问题了。 ## 可修改安装 如果你想做以下两件事,那你可能需要一个可修改代码的安装方式: * 使用 `main`版本的源代码。 * 为 🤗 Diffusers 贡献,需要测试代码中的变化。 使用以下命令克隆并安装 🤗 Diffusers: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` **PyTorch** ```sh pip install -e ".[torch]" ``` **Flax** ```sh pip install -e ".[flax]" ``` 这些命令将连接到你克隆的版本库和你的 Python 库路径。 现在,不只是在通常的库路径,Python 还会在你克隆的文件夹内寻找包。 例如,如果你的 Python 包通常安装在 `~/anaconda3/envs/main/lib/python3.10/Site-packages/`,Python 也会搜索你克隆到的文件夹。`~/diffusers/`。 <Tip warning={true}> 如果你想继续使用这个库,你必须保留 `diffusers` 文件夹。 </Tip> 现在你可以用下面的命令轻松地将你克隆的 🤗 Diffusers 库更新到最新版本。 ```bash cd ~/diffusers/ git pull ``` 你的Python环境将在下次运行时找到`main`版本的 🤗 Diffusers。 ## 注意 Telemetry 日志 我们的库会在使用`from_pretrained()`请求期间收集 telemetry 信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道类,以及预训练检查点的路径(如果它被托管在Hub上的话)。 这些使用数据有助于我们调试问题并确定新功能的开发优先级。 Telemetry 数据仅在从 HuggingFace Hub 中加载模型和管道时发送,而不会在本地使用期间收集。 我们知道,并不是每个人都想分享这些的信息,我们尊重您的隐私, 因此您可以通过在终端中设置 `DISABLE_TELEMETRY` 环境变量从而禁用 Telemetry 数据收集: Linux/MacOS : ```bash export DISABLE_TELEMETRY=YES ``` Windows : ```bash set DISABLE_TELEMETRY=YES ```
diffusers/docs/source/zh/installation.md/0
{ "file_path": "diffusers/docs/source/zh/installation.md", "repo_id": "diffusers", "token_count": 2457 }
130
<!--版权所有 2025 The HuggingFace Team。保留所有权利。 根据 Apache 许可证 2.0 版本(“许可证”)授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,不附带任何明示或暗示的担保或条件。请参阅许可证了解具体的语言管理权限和限制。 --> # 分布式推理 在分布式设置中,您可以使用 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 或 [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html) 在多个 GPU 上运行推理,这对于并行生成多个提示非常有用。 本指南将向您展示如何使用 🤗 Accelerate 和 PyTorch Distributed 进行分布式推理。 ## 🤗 Accelerate 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 是一个旨在简化在分布式设置中训练或运行推理的库。它简化了设置分布式环境的过程,让您可以专注于您的 PyTorch 代码。 首先,创建一个 Python 文件并初始化一个 [`accelerate.PartialState`] 来创建分布式环境;您的设置会自动检测,因此您无需明确定义 `rank` 或 `world_size`。将 [`DiffusionPipeline`] 移动到 `distributed_state.device` 以为每个进程分配一个 GPU。 现在使用 [`~accelerate.PartialState.split_between_processes`] 实用程序作为上下文管理器,自动在进程数之间分发提示。 ```py import torch from accelerate import PartialState from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) distributed_state = PartialState() pipeline.to(distributed_state.device) with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result = pipeline(prompt).images[0] result.save(f"result_{distributed_state.process_index}.png") ``` 使用 `--num_processes` 参数指定要使用的 GPU 数量,并调用 `accelerate launch` 来运行脚本: ```bash accelerate launch run_distributed.py --num_processes=2 ``` <Tip> 参考这个最小示例 [脚本](https://gist.github.com/sayakpaul/cfaebd221820d7b43fae638b4dfa01ba) 以在多个 GPU 上运行推理。要了解更多信息,请查看 [使用 🤗 Accelerate 进行分布式推理](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) 指南。 </Tip> ## PyTorch Distributed PyTorch 支持 [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html),它启用了数据 并行性。 首先,创建一个 Python 文件并导入 `torch.distributed` 和 `torch.multiprocessing` 来设置分布式进程组,并为每个 GPU 上的推理生成进程。您还应该初始化一个 [`DiffusionPipeline`]: ```py import torch import torch.distributed as dist import torch.multiprocessing as mp from diffusers import DiffusionPipeline sd = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) ``` 您需要创建一个函数来运行推理;[`init_process_group`](https://pytorch.org/docs/stable/distributed.html?highlight=init_process_group#torch.distributed.init_process_group) 处理创建一个分布式环境,指定要使用的后端类型、当前进程的 `rank` 以及参与进程的数量 `world_size`。如果您在 2 个 GPU 上并行运行推理,那么 `world_size` 就是 2。 将 [`DiffusionPipeline`] 移动到 `rank`,并使用 `get_rank` 为每个进程分配一个 GPU,其中每个进程处理不同的提示: ```py def run_inference(rank, world_size): dist.init_process_group("nccl", rank=rank, world_size=world_size) sd.to(rank) if torch.distributed.get_rank() == 0: prompt = "a dog" elif torch.distributed.get_rank() == 1: prompt = "a cat" image = sd(prompt).images[0] image.save(f"./{'_'.join(prompt)}.png") ``` 要运行分布式推理,调用 [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn) 在 `world_size` 定义的 GPU 数量上运行 `run_inference` 函数: ```py def main(): world_size = 2 mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True) if __name__ == "__main__": main() ``` 完成推理脚本后,使用 `--nproc_per_node` 参数指定要使用的 GPU 数量,并调用 `torchrun` 来运行脚本: ```bash torchrun run_distributed.py --nproc_per_node=2 ``` > [!TIP] > 您可以在 [`DiffusionPipeline`] 中使用 `device_map` 将其模型级组件分布在多个设备上。请参考 [设备放置](../tutorials/inference_with_big_models#device-placement) 指南了解更多信息。 ## 模型分片 现代扩散系统,如 [Flux](../api/pipelines/flux),非常大且包含多个模型。例如,[Flux.1-Dev](https://hf.co/black-forest-labs/FLUX.1-dev) 由两个文本编码器 - [T5-XXL](https://hf.co/google/t5-v1_1-xxl) 和 [CLIP-L](https://hf.co/openai/clip-vit-large-patch14) - 一个 [扩散变换器](../api/models/flux_transformer),以及一个 [VAE](../api/models/autoencoderkl) 组成。对于如此大的模型,在消费级 GPU 上运行推理可能具有挑战性。 模型分片是一种技术,当模型无法容纳在单个 GPU 上时,将模型分布在多个 GPU 上。下面的示例假设有两个 16GB GPU 可用于推理。 开始使用文本编码器计算文本嵌入。通过设置 `device_map="balanced"` 将文本编码器保持在两个GPU上。`balanced` 策略将模型均匀分布在所有可用GPU上。使用 `max_memory` 参数为每个GPU上的每个文本编码器分配最大内存量。 > [!TIP] > **仅** 在此步骤加载文本编码器!扩散变换器和VAE在后续步骤中加载以节省内存。 ```py from diffusers import FluxPipeline import torch prompt = "a photo of a dog with cat-like look" pipeline = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", transformer=None, vae=None, device_map="balanced", max_memory={0: "16GB", 1: "16GB"}, torch_dtype=torch.bfloat16 ) with torch.no_grad(): print("Encoding prompts.") prompt_embeds, pooled_prompt_embeds, text_ids = pipeline.encode_prompt( prompt=prompt, prompt_2=None, max_sequence_length=512 ) ``` 一旦文本嵌入计算完成,从GPU中移除它们以为扩散变换器腾出空间。 ```py import gc def flush(): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() del pipeline.text_encoder del pipeline.text_encoder_2 del pipeline.tokenizer del pipeline.tokenizer_2 del pipeline flush() ``` 接下来加载扩散变换器,它有125亿参数。这次,设置 `device_map="auto"` 以自动将模型分布在两个16GB GPU上。`auto` 策略由 [Accelerate](https://hf.co/docs/accelerate/index) 支持,并作为 [大模型推理](https://hf.co/docs/accelerate/concept_guides/big_model_inference) 功能的一部分可用。它首先将模型分布在最快的设备(GPU)上,然后在需要时移动到较慢的设备如CPU和硬盘。将模型参数存储在较慢设备上的权衡是推理延迟较慢。 ```py from diffusers import AutoModel import torch transformer = AutoModel.from_pretrained( "black-forest-labs/FLUX.1-dev", subfolder="transformer", device_map="auto", torch_dtype=torch.bfloat16 ) ``` > [!TIP] > 在任何时候,您可以尝试 `print(pipeline.hf_device_map)` 来查看各种模型如何在设备上分布。这对于跟踪模型的设备放置很有用。您也可以尝试 `print(transformer.hf_device_map)` 来查看变换器模型如何在设备上分片。 将变换器模型添加到管道中以进行去噪,但将其他模型级组件如文本编码器和VAE设置为 `None`,因为您还不需要它们。 ```py pipeline = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", text_encoder=None, text_encoder_2=None, tokenizer=None, tokenizer_2=None, vae=None, transformer=transformer, torch_dtype=torch.bfloat16 ) print("Running denoising.") height, width = 768, 1360 latents = pipeline( prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, num_inference_steps=50, guidance_scale=3.5, height=height, width=width, output_type="latent", ).images ``` 从内存中移除管道和变换器,因为它们不再需要。 ```py del pipeline.transformer del pipeline flush() ``` 最后,使用变分自编码器(VAE)将潜在表示解码为图像。VAE通常足够小,可以在单个GPU上加载。 ```py from diffusers import AutoencoderKL from diffusers.image_processor import VaeImageProcessor import torch vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=torch.bfloat16).to("cuda") vae_scale_factor = 2 ** (len(vae.config.block_out_channels)) image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor) with torch.no_grad(): print("运行解码中。") latents = FluxPipeline._unpack_latents(latents, height, width, vae_scale_factor) latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor image = vae.decode(latents, return_dict=False)[0] image = image_processor.postprocess(image, output_type="pil") image[0].save("split_transformer.png") ``` 通过选择性加载和卸载在特定阶段所需的模型,并将最大模型分片到多个GPU上,可以在消费级GPU上运行大型模型的推理。
diffusers/docs/source/zh/training/distributed_inference.md/0
{ "file_path": "diffusers/docs/source/zh/training/distributed_inference.md", "repo_id": "diffusers", "token_count": 5237 }
131
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import sys import tempfile import safetensors from diffusers.loaders.lora_base import LORA_ADAPTER_METADATA_KEY sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBoothLoRAFluxAdvanced(ExamplesTestsAccelerate): instance_data_dir = "docs/source/en/imgs" instance_prompt = "photo" pretrained_model_name_or_path = "hf-internal-testing/tiny-flux-pipe" script_path = "examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py" def test_dreambooth_lora_flux(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_text_encoder_flux(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --train_text_encoder --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) starts_with_expected_prefix = all( (key.startswith("transformer") or key.startswith("text_encoder")) for key in lora_state_dict.keys() ) self.assertTrue(starts_with_expected_prefix) def test_dreambooth_lora_pivotal_tuning_flux_clip(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --train_text_encoder_ti --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure embeddings were also saved self.assertTrue(os.path.isfile(os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # make sure the state_dict has the correct naming in the parameters. textual_inversion_state_dict = safetensors.torch.load_file( os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors") ) is_clip = all("clip_l" in k for k in textual_inversion_state_dict.keys()) self.assertTrue(is_clip) # when performing pivotal tuning, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_pivotal_tuning_flux_clip_t5(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --train_text_encoder_ti --enable_t5_ti --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure embeddings were also saved self.assertTrue(os.path.isfile(os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # make sure the state_dict has the correct naming in the parameters. textual_inversion_state_dict = safetensors.torch.load_file( os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors") ) is_te = all(("clip_l" in k or "t5" in k) for k in textual_inversion_state_dict.keys()) self.assertTrue(is_te) # when performing pivotal tuning, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_latent_caching(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --cache_latents --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_flux_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_lora_flux_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}) resume_run_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}) def test_dreambooth_lora_with_metadata(self): # Use a `lora_alpha` that is different from `rank`. lora_alpha = 8 rank = 4 with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --lora_alpha={lora_alpha} --rank={rank} --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test state_dict_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors") self.assertTrue(os.path.isfile(state_dict_file)) # Check if the metadata was properly serialized. with safetensors.torch.safe_open(state_dict_file, framework="pt", device="cpu") as f: metadata = f.metadata() or {} metadata.pop("format", None) raw = metadata.get(LORA_ADAPTER_METADATA_KEY) if raw: raw = json.loads(raw) loaded_lora_alpha = raw["transformer.lora_alpha"] self.assertTrue(loaded_lora_alpha == lora_alpha) loaded_lora_rank = raw["transformer.r"] self.assertTrue(loaded_lora_rank == rank)
diffusers/examples/advanced_diffusion_training/test_dreambooth_lora_flux_advanced.py/0
{ "file_path": "diffusers/examples/advanced_diffusion_training/test_dreambooth_lora_flux_advanced.py", "repo_id": "diffusers", "token_count": 7022 }
132
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput BITS = 8 # convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py def decimal_to_bits(x, bits=BITS): """expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1""" device = x.device x = (x * 255).int().clamp(0, 255) mask = 2 ** torch.arange(bits - 1, -1, -1, device=device) mask = rearrange(mask, "d -> d 1 1") x = rearrange(x, "b c h w -> b c 1 h w") bits = ((x & mask) != 0).float() bits = rearrange(bits, "b c d h w -> b (c d) h w") bits = bits * 2 - 1 return bits def bits_to_decimal(x, bits=BITS): """expects bits from -1 to 1, outputs image tensor from 0 to 1""" device = x.device x = (x > 0).int() mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32) mask = rearrange(mask, "d -> d 1 1") x = rearrange(x, "b (c d) h w -> b c d h w", d=8) dec = reduce(x * mask, "b c d h w -> b c h w", "sum") return (dec / 255).clamp(0.0, 1.0) # modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale def ddim_bit_scheduler_step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, eta: float = 0.0, use_clipped_model_output: bool = True, generator=None, return_dict: bool = True, ) -> Union[DDIMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.Tensor`): current instance of sample being created by diffusion process. eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): TODO generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://huggingface.co/papers/2010.02502 # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502 pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) # 4. Clip "predicted x_0" scale = self.bit_scale if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -scale, scale) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = self._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://huggingface.co/papers/2010.02502 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output # 7. compute x_t without "random noise" of formula (12) from https://huggingface.co/papers/2010.02502 prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 device = model_output.device if torch.is_tensor(model_output) else "cpu" noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device) variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise prev_sample = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def ddpm_bit_scheduler_step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, prediction_type="epsilon", generator=None, return_dict: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.Tensor`): current instance of sample being created by diffusion process. prediction_type (`str`, default `epsilon`): indicates whether the model predicts the noise (epsilon), or the samples (`sample`). generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ t = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://huggingface.co/papers/2006.11239 if prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif prediction_type == "sample": pred_original_sample = model_output else: raise ValueError(f"Unsupported prediction_type {prediction_type}.") # 3. Clip "predicted x_0" scale = self.bit_scale if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -scale, scale) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://huggingface.co/papers/2006.11239 pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://huggingface.co/papers/2006.11239 pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise variance = 0 if t > 0: noise = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) class BitDiffusion(DiffusionPipeline): def __init__( self, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], bit_scale: Optional[float] = 1.0, ): super().__init__() self.bit_scale = bit_scale self.scheduler.step = ( ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step ) self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, height: Optional[int] = 256, width: Optional[int] = 256, num_inference_steps: Optional[int] = 50, generator: Optional[torch.Generator] = None, batch_size: Optional[int] = 1, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[Tuple, ImagePipelineOutput]: latents = torch.randn( (batch_size, self.unet.config.in_channels, height, width), generator=generator, ) latents = decimal_to_bits(latents) * self.bit_scale latents = latents.to(self.device) self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # predict the noise residual noise_pred = self.unet(latents, t).sample # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents).prev_sample image = bits_to_decimal(latents) if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/community/bit_diffusion.py/0
{ "file_path": "diffusers/examples/community/bit_diffusion.py", "repo_id": "diffusers", "token_count": 4347 }
133
import re from copy import deepcopy from dataclasses import asdict, dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import torch from numpy import exp, pi, sqrt from torchvision.transforms.functional import resize from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler def preprocess_image(image): from PIL import Image """Preprocess an input image Same as https://github.com/huggingface/diffusers/blob/1138d63b519e37f0ce04e027b9f4a3261d27c628/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L44 """ w, h = image.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=Image.LANCZOS) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image) return 2.0 * image - 1.0 @dataclass class CanvasRegion: """Class defining a rectangular region in the canvas""" row_init: int # Region starting row in pixel space (included) row_end: int # Region end row in pixel space (not included) col_init: int # Region starting column in pixel space (included) col_end: int # Region end column in pixel space (not included) region_seed: int = None # Seed for random operations in this region noise_eps: float = 0.0 # Deviation of a zero-mean gaussian noise to be applied over the latents in this region. Useful for slightly "rerolling" latents def __post_init__(self): # Initialize arguments if not specified if self.region_seed is None: self.region_seed = np.random.randint(9999999999) # Check coordinates are non-negative for coord in [self.row_init, self.row_end, self.col_init, self.col_end]: if coord < 0: raise ValueError( f"A CanvasRegion must be defined with non-negative indices, found ({self.row_init}, {self.row_end}, {self.col_init}, {self.col_end})" ) # Check coordinates are divisible by 8, else we end up with nasty rounding error when mapping to latent space for coord in [self.row_init, self.row_end, self.col_init, self.col_end]: if coord // 8 != coord / 8: raise ValueError( f"A CanvasRegion must be defined with locations divisible by 8, found ({self.row_init}-{self.row_end}, {self.col_init}-{self.col_end})" ) # Check noise eps is non-negative if self.noise_eps < 0: raise ValueError(f"A CanvasRegion must be defined noises eps non-negative, found {self.noise_eps}") # Compute coordinates for this region in latent space self.latent_row_init = self.row_init // 8 self.latent_row_end = self.row_end // 8 self.latent_col_init = self.col_init // 8 self.latent_col_end = self.col_end // 8 @property def width(self): return self.col_end - self.col_init @property def height(self): return self.row_end - self.row_init def get_region_generator(self, device="cpu"): """Creates a torch.Generator based on the random seed of this region""" # Initialize region generator return torch.Generator(device).manual_seed(self.region_seed) @property def __dict__(self): return asdict(self) class MaskModes(Enum): """Modes in which the influence of diffuser is masked""" CONSTANT = "constant" GAUSSIAN = "gaussian" QUARTIC = "quartic" # See https://en.wikipedia.org/wiki/Kernel_(statistics) @dataclass class DiffusionRegion(CanvasRegion): """Abstract class defining a region where some class of diffusion process is acting""" pass @dataclass class Text2ImageRegion(DiffusionRegion): """Class defining a region where a text guided diffusion process is acting""" prompt: str = "" # Text prompt guiding the diffuser in this region guidance_scale: float = 7.5 # Guidance scale of the diffuser in this region. If None, randomize mask_type: MaskModes = MaskModes.GAUSSIAN.value # Kind of weight mask applied to this region mask_weight: float = 1.0 # Global weights multiplier of the mask tokenized_prompt = None # Tokenized prompt encoded_prompt = None # Encoded prompt def __post_init__(self): super().__post_init__() # Mask weight cannot be negative if self.mask_weight < 0: raise ValueError( f"A Text2ImageRegion must be defined with non-negative mask weight, found {self.mask_weight}" ) # Mask type must be an actual known mask if self.mask_type not in [e.value for e in MaskModes]: raise ValueError( f"A Text2ImageRegion was defined with mask {self.mask_type}, which is not an accepted mask ({[e.value for e in MaskModes]})" ) # Randomize arguments if given as None if self.guidance_scale is None: self.guidance_scale = np.random.randint(5, 30) # Clean prompt self.prompt = re.sub(" +", " ", self.prompt).replace("\n", " ") def tokenize_prompt(self, tokenizer): """Tokenizes the prompt for this diffusion region using a given tokenizer""" self.tokenized_prompt = tokenizer( self.prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) def encode_prompt(self, text_encoder, device): """Encodes the previously tokenized prompt for this diffusion region using a given encoder""" assert self.tokenized_prompt is not None, ValueError( "Prompt in diffusion region must be tokenized before encoding" ) self.encoded_prompt = text_encoder(self.tokenized_prompt.input_ids.to(device))[0] @dataclass class Image2ImageRegion(DiffusionRegion): """Class defining a region where an image guided diffusion process is acting""" reference_image: torch.Tensor = None strength: float = 0.8 # Strength of the image def __post_init__(self): super().__post_init__() if self.reference_image is None: raise ValueError("Must provide a reference image when creating an Image2ImageRegion") if self.strength < 0 or self.strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {self.strength}") # Rescale image to region shape self.reference_image = resize(self.reference_image, size=[self.height, self.width]) def encode_reference_image(self, encoder, device, generator, cpu_vae=False): """Encodes the reference image for this Image2Image region into the latent space""" # Place encoder in CPU or not following the parameter cpu_vae if cpu_vae: # Note here we use mean instead of sample, to avoid moving also generator to CPU, which is troublesome self.reference_latents = encoder.cpu().encode(self.reference_image).latent_dist.mean.to(device) else: self.reference_latents = encoder.encode(self.reference_image.to(device)).latent_dist.sample( generator=generator ) self.reference_latents = 0.18215 * self.reference_latents @property def __dict__(self): # This class requires special casting to dict because of the reference_image tensor. Otherwise it cannot be casted to JSON # Get all basic fields from parent class super_fields = {key: getattr(self, key) for key in DiffusionRegion.__dataclass_fields__.keys()} # Pack other fields return {**super_fields, "reference_image": self.reference_image.cpu().tolist(), "strength": self.strength} class RerollModes(Enum): """Modes in which the reroll regions operate""" RESET = "reset" # Completely reset the random noise in the region EPSILON = "epsilon" # Alter slightly the latents in the region @dataclass class RerollRegion(CanvasRegion): """Class defining a rectangular canvas region in which initial latent noise will be rerolled""" reroll_mode: RerollModes = RerollModes.RESET.value @dataclass class MaskWeightsBuilder: """Auxiliary class to compute a tensor of weights for a given diffusion region""" latent_space_dim: int # Size of the U-net latent space nbatch: int = 1 # Batch size in the U-net def compute_mask_weights(self, region: DiffusionRegion) -> torch.tensor: """Computes a tensor of weights for a given diffusion region""" MASK_BUILDERS = { MaskModes.CONSTANT.value: self._constant_weights, MaskModes.GAUSSIAN.value: self._gaussian_weights, MaskModes.QUARTIC.value: self._quartic_weights, } return MASK_BUILDERS[region.mask_type](region) def _constant_weights(self, region: DiffusionRegion) -> torch.tensor: """Computes a tensor of constant for a given diffusion region""" latent_width = region.latent_col_end - region.latent_col_init latent_height = region.latent_row_end - region.latent_row_init return torch.ones(self.nbatch, self.latent_space_dim, latent_height, latent_width) * region.mask_weight def _gaussian_weights(self, region: DiffusionRegion) -> torch.tensor: """Generates a gaussian mask of weights for tile contributions""" latent_width = region.latent_col_end - region.latent_col_init latent_height = region.latent_row_end - region.latent_row_init var = 0.01 midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1 x_probs = [ exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var) for x in range(latent_width) ] midpoint = (latent_height - 1) / 2 y_probs = [ exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var) for y in range(latent_height) ] weights = np.outer(y_probs, x_probs) * region.mask_weight return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1)) def _quartic_weights(self, region: DiffusionRegion) -> torch.tensor: """Generates a quartic mask of weights for tile contributions The quartic kernel has bounded support over the diffusion region, and a smooth decay to the region limits. """ quartic_constant = 15.0 / 16.0 support = (np.array(range(region.latent_col_init, region.latent_col_end)) - region.latent_col_init) / ( region.latent_col_end - region.latent_col_init - 1 ) * 1.99 - (1.99 / 2.0) x_probs = quartic_constant * np.square(1 - np.square(support)) support = (np.array(range(region.latent_row_init, region.latent_row_end)) - region.latent_row_init) / ( region.latent_row_end - region.latent_row_init - 1 ) * 1.99 - (1.99 / 2.0) y_probs = quartic_constant * np.square(1 - np.square(support)) weights = np.outer(y_probs, x_probs) * region.mask_weight return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1)) class StableDiffusionCanvasPipeline(DiffusionPipeline, StableDiffusionMixin): """Stable Diffusion pipeline that mixes several diffusers in the same canvas""" def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) def decode_latents(self, latents, cpu_vae=False): """Decodes a given array of latents into pixel space""" # scale and decode the image latents with vae if cpu_vae: lat = deepcopy(latents).cpu() vae = deepcopy(self.vae).cpu() else: lat = latents vae = self.vae lat = 1 / 0.18215 * lat image = vae.decode(lat).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() return self.numpy_to_pil(image) def get_latest_timestep_img2img(self, num_inference_steps, strength): """Finds the latest timesteps where an img2img strength does not impose latents anymore""" # get the original timestep using init_timestep offset = self.scheduler.config.get("steps_offset", 0) init_timestep = int(num_inference_steps * (1 - strength)) + offset init_timestep = min(init_timestep, num_inference_steps) t_start = min(max(num_inference_steps - init_timestep + offset, 0), num_inference_steps - 1) latest_timestep = self.scheduler.timesteps[t_start] return latest_timestep @torch.no_grad() def __call__( self, canvas_height: int, canvas_width: int, regions: List[DiffusionRegion], num_inference_steps: Optional[int] = 50, seed: Optional[int] = 12345, reroll_regions: Optional[List[RerollRegion]] = None, cpu_vae: Optional[bool] = False, decode_steps: Optional[bool] = False, ): if reroll_regions is None: reroll_regions = [] batch_size = 1 if decode_steps: steps_images = [] # Prepare scheduler self.scheduler.set_timesteps(num_inference_steps, device=self.device) # Split diffusion regions by their kind text2image_regions = [region for region in regions if isinstance(region, Text2ImageRegion)] image2image_regions = [region for region in regions if isinstance(region, Image2ImageRegion)] # Prepare text embeddings for region in text2image_regions: region.tokenize_prompt(self.tokenizer) region.encode_prompt(self.text_encoder, self.device) # Create original noisy latents using the timesteps latents_shape = (batch_size, self.unet.config.in_channels, canvas_height // 8, canvas_width // 8) generator = torch.Generator(self.device).manual_seed(seed) init_noise = torch.randn(latents_shape, generator=generator, device=self.device) # Reset latents in seed reroll regions, if requested for region in reroll_regions: if region.reroll_mode == RerollModes.RESET.value: region_shape = ( latents_shape[0], latents_shape[1], region.latent_row_end - region.latent_row_init, region.latent_col_end - region.latent_col_init, ) init_noise[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] = torch.randn(region_shape, generator=region.get_region_generator(self.device), device=self.device) # Apply epsilon noise to regions: first diffusion regions, then reroll regions all_eps_rerolls = regions + [r for r in reroll_regions if r.reroll_mode == RerollModes.EPSILON.value] for region in all_eps_rerolls: if region.noise_eps > 0: region_noise = init_noise[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] eps_noise = ( torch.randn( region_noise.shape, generator=region.get_region_generator(self.device), device=self.device ) * region.noise_eps ) init_noise[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] += eps_noise # scale the initial noise by the standard deviation required by the scheduler latents = init_noise * self.scheduler.init_noise_sigma # Get unconditional embeddings for classifier free guidance in text2image regions for region in text2image_regions: max_length = region.tokenized_prompt.input_ids.shape[-1] uncond_input = self.tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes region.encoded_prompt = torch.cat([uncond_embeddings, region.encoded_prompt]) # Prepare image latents for region in image2image_regions: region.encode_reference_image(self.vae, device=self.device, generator=generator) # Prepare mask of weights for each region mask_builder = MaskWeightsBuilder(latent_space_dim=self.unet.config.in_channels, nbatch=batch_size) mask_weights = [mask_builder.compute_mask_weights(region).to(self.device) for region in text2image_regions] # Diffusion timesteps for i, t in tqdm(enumerate(self.scheduler.timesteps)): # Diffuse each region noise_preds_regions = [] # text2image regions for region in text2image_regions: region_latents = latents[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([region_latents] * 2) # scale model input following scheduler rules latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=region.encoded_prompt)["sample"] # perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_region = noise_pred_uncond + region.guidance_scale * (noise_pred_text - noise_pred_uncond) noise_preds_regions.append(noise_pred_region) # Merge noise predictions for all tiles noise_pred = torch.zeros(latents.shape, device=self.device) contributors = torch.zeros(latents.shape, device=self.device) # Add each tile contribution to overall latents for region, noise_pred_region, mask_weights_region in zip( text2image_regions, noise_preds_regions, mask_weights ): noise_pred[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] += noise_pred_region * mask_weights_region contributors[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] += mask_weights_region # Average overlapping areas with more than 1 contributor noise_pred /= contributors noise_pred = torch.nan_to_num( noise_pred ) # Replace NaNs by zeros: NaN can appear if a position is not covered by any DiffusionRegion # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents).prev_sample # Image2Image regions: override latents generated by the scheduler for region in image2image_regions: influence_step = self.get_latest_timestep_img2img(num_inference_steps, region.strength) # Only override in the timesteps before the last influence step of the image (given by its strength) if t > influence_step: timestep = t.repeat(batch_size) region_init_noise = init_noise[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] region_latents = self.scheduler.add_noise(region.reference_latents, region_init_noise, timestep) latents[ :, :, region.latent_row_init : region.latent_row_end, region.latent_col_init : region.latent_col_end, ] = region_latents if decode_steps: steps_images.append(self.decode_latents(latents, cpu_vae)) # scale and decode the image latents with vae image = self.decode_latents(latents, cpu_vae) output = {"images": image} if decode_steps: output = {**output, "steps_images": steps_images} return output
diffusers/examples/community/mixture_canvas.py/0
{ "file_path": "diffusers/examples/community/mixture_canvas.py", "repo_id": "diffusers", "token_count": 9675 }
134
# Copyright 2025 TencentARC and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import is_compiled_module, randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler >>> from diffusers.utils import load_image >>> from controlnet_aux.midas import MidasDetector >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" >>> image = load_image(img_url).resize((1024, 1024)) >>> mask_image = load_image(mask_url).resize((1024, 1024)) >>> midas_depth = MidasDetector.from_pretrained( ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large" ... ).to("cuda") >>> depth_image = midas_depth( ... image, detect_resolution=512, image_resolution=1024 ... ) >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0" >>> adapter = T2IAdapter.from_pretrained( ... "Adapter/t2iadapter", ... subfolder="sketch_sdxl_1.0", ... torch_dtype=torch.float16, ... adapter_type="full_adapter_xl", ... ) >>> controlnet = ControlNetModel.from_pretrained( ... "diffusers/controlnet-depth-sdxl-1.0", ... torch_dtype=torch.float16, ... variant="fp16", ... use_safetensors=True ... ).to("cuda") >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained( ... model_id, ... adapter=adapter, ... controlnet=controlnet, ... torch_dtype=torch.float16, ... variant="fp16", ... scheduler=scheduler ... ).to("cuda") >>> strength = 0.5 >>> generator = torch.manual_seed(42) >>> sketch_image_out = pipe( ... prompt="a photo of a tiger sitting on a park bench", ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality", ... adapter_image=depth_image, ... control_image=mask_image, ... adapter_conditioning_scale=strength, ... controlnet_conditioning_scale=strength, ... generator=generator, ... guidance_scale=7.5, ... ).images[0] ``` """ def _preprocess_adapter_image(image, height, width): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] image = [ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image ] # expand [h, w] or [h, w, c] to [b, h, w, c] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): if image[0].ndim == 3: image = torch.stack(image, dim=0) elif image[0].ndim == 4: image = torch.cat(image, dim=0) else: raise ValueError( f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but receive: {image[0].ndim}" ) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class StableDiffusionXLControlNetAdapterPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter https://huggingface.co/papers/2302.08453 This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a list, the outputs from each Adapter are added together to create one combined additional conditioning. adapter_weights (`List[float]`, *optional*, defaults to None): List of floats representing the weight which will be multiply to each adapter's output before adding them together. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], controlnet: Union[ControlNetModel, MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, ): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, adapter=adapter, controlnet=controlnet, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs def check_inputs( self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def check_conditions( self, prompt, prompt_embeds, adapter_image, control_image, adapter_conditioning_scale, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ): # controlnet checks if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Check controlnet `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(control_image, prompt, prompt_embeds) elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(control_image, list): raise TypeError("For multiple controlnets: `control_image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in control_image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(control_image) != len(self.controlnet.nets): raise ValueError( f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets." ) for image_ in control_image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False # adapter checks if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter): self.check_image(adapter_image, prompt, prompt_embeds) elif ( isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter) ): if not isinstance(adapter_image, list): raise TypeError("For multiple adapters: `adapter_image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in adapter_image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(adapter_image) != len(self.adapter.adapters): raise ValueError( f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters." ) for image_ in adapter_image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `adapter_conditioning_scale` if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter): if not isinstance(adapter_conditioning_scale, float): raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.") elif ( isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter) ): if isinstance(adapter_conditioning_scale, list): if any(isinstance(i, list) for i in adapter_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len( self.adapter.adapters ): raise ValueError( "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have" " the same length as the number of adapters" ) else: assert False # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width def _default_height_width(self, height, width, image): # NOTE: It is possible that a list of images have different # dimensions for each image, so just checking the first image # is not _exactly_ correct, but it is simple. while isinstance(image, list): image = image[0] if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[-2] # round down to nearest multiple of `self.adapter.downscale_factor` height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[-1] # round down to nearest multiple of `self.adapter.downscale_factor` width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor return height, width def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, adapter_image: PipelineImageInput = None, control_image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, adapter_conditioning_scale: Union[float, List[float]] = 1.0, adapter_conditioning_factor: float = 1.0, clip_skip: Optional[int] = None, controlnet_conditioning_scale=1.0, guess_mode: bool = False, control_guidance_start: float = 0.0, control_guidance_end: float = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders adapter_image (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the type is specified as `torch.Tensor`, it is passed to Adapter as is. PIL.Image.Image` can also be accepted as an image. The control image is automatically resized to fit the output image. control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. If multiple adapters are specified in init, you can set the corresponding scale as a list. adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the residual in the original unet. If multiple adapters are specified in init, you can set the corresponding scale as a list. adapter_conditioning_factor (`float`, *optional*, defaults to 1.0): The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is `0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter # 0. Default height and width to unet height, width = self._default_height_width(height, width, adapter_image) device = self._execution_device if isinstance(adapter, MultiAdapter): adapter_input = [] for one_image in adapter_image: one_image = _preprocess_adapter_image(one_image, height, width) one_image = one_image.to(device=device, dtype=adapter.dtype) adapter_input.append(one_image) else: adapter_input = _preprocess_adapter_image(adapter_image, height, width) adapter_input = adapter_input.to(device=device, dtype=adapter.dtype) original_size = original_size or (height, width) target_size = target_size or (height, width) # 0.1 align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float): adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.adapters) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, callback_steps, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) self.check_conditions( prompt, prompt_embeds, adapter_image, control_image, adapter_conditioning_scale, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, clip_skip=clip_skip, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Prepare added time ids & embeddings & adapter features if isinstance(adapter, MultiAdapter): adapter_state = adapter(adapter_input, adapter_conditioning_scale) for k, v in enumerate(adapter_state): adapter_state[k] = v else: adapter_state = adapter(adapter_input) for k, v in enumerate(adapter_state): adapter_state[k] = v * adapter_conditioning_scale if num_images_per_prompt > 1: for k, v in enumerate(adapter_state): adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) if do_classifier_free_guidance: for k, v in enumerate(adapter_state): adapter_state[k] = torch.cat([v] * 2, dim=0) # 7.2 Prepare control images if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images else: raise ValueError(f"{controlnet.__class__} is not supported.") # 8.2 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] if isinstance(self.controlnet, MultiControlNetModel): controlnet_keep.append(keeps) else: controlnet_keep.append(keeps[0]) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7.1 Apply denoising_end if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if i < int(num_inference_steps * adapter_conditioning_factor): down_intrablock_additional_residuals = [state.clone() for state in adapter_state] else: down_intrablock_additional_residuals = None # ----------- ControlNet # expand the latents if we are doing classifier free guidance latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t) # controlnet(s) inference if guess_mode and do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": add_text_embeds.chunk(2)[1], "time_ids": add_time_ids.chunk(2)[1], } else: control_model_input = latent_model_input_controlnet controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, down_intrablock_additional_residuals=down_intrablock_additional_residuals, # t2iadapter down_block_additional_residuals=down_block_res_samples, # controlnet mid_block_additional_residual=mid_block_res_sample, # controlnet )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py/0
{ "file_path": "diffusers/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py", "repo_id": "diffusers", "token_count": 33021 }
135
import argparse import inspect import os import time import warnings from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from transformers import CLIPTokenizer from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> # !pip install opencv-python transformers accelerate >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler >>> from diffusers.utils import load_image >>> import numpy as np >>> import torch >>> import cv2 >>> from PIL import Image >>> # download an image >>> image = load_image( ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ... ) >>> np_image = np.array(image) >>> # get canny image >>> np_image = cv2.Canny(np_image, 100, 200) >>> np_image = np_image[:, :, None] >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2) >>> canny_image = Image.fromarray(np_image) >>> # load control net and stable diffusion v1-5 >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ... ) >>> # speed up diffusion process with faster scheduler and memory optimization >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) >>> pipe.enable_model_cpu_offload() >>> # generate image >>> generator = torch.manual_seed(0) >>> image = pipe( ... "futuristic-looking woman", ... num_inference_steps=20, ... generator=generator, ... image=image, ... control_image=canny_image, ... ).images[0] ``` """ def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline): vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: KarrasDiffusionSchedulers def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (4 - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) def _encode_prompt( self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="np", ) negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): warnings.warn( "The decode_latents method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor instead", FutureWarning, ) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, num_controlnet, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Check `image` if num_controlnet == 1: self.check_image(image, prompt, prompt_embeds) elif num_controlnet > 1: if not isinstance(image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(image) != num_controlnet: raise ValueError( f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets." ) for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if num_controlnet == 1: if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif num_controlnet > 1: if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif ( isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != num_controlnet ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if num_controlnet > 1: if len(control_guidance_start) != num_controlnet: raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: _image = image.cpu().detach().numpy() init_latents = self.vae_encoder(sample=_image)[0] init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype) init_latents = 0.18215 * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, num_controlnet: int, fp16: bool = True, prompt: Union[str, List[str]] = None, image: Union[ torch.Tensor, PIL.Image.Image, np.ndarray, List[torch.Tensor], List[PIL.Image.Image], List[np.ndarray], ] = None, control_image: Union[ torch.Tensor, PIL.Image.Image, np.ndarray, List[torch.Tensor], List[PIL.Image.Image], List[np.ndarray], ] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 0.8, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The initial image will be used as the starting point for the image generation process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in init, images must be passed as a list such that each element of the list can be correctly batched for input to a single controlnet. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. If multiple ControlNets are specified in init, you can set the corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting than for [`~StableDiffusionControlNetPipeline.__call__`]. guess_mode (`bool`, *optional*, defaults to `False`): In this mode, the ControlNet encoder will try best to recognize the content of the input image even if you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the controlnet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the controlnet stops applying. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ if fp16: torch_dtype = torch.float16 np_dtype = np.float16 else: torch_dtype = torch.float32 np_dtype = np.float32 # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = num_controlnet control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( num_controlnet, prompt, control_image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare image image = self.image_processor.preprocess(image).to(dtype=torch.float32) # 5. Prepare controlnet_conditioning_image if num_controlnet == 1: control_image = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=torch_dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) elif num_controlnet > 1: control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=torch_dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, torch_dtype, device, generator, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # predict the noise residual _latent_model_input = latent_model_input.cpu().detach().numpy() _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype) _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype) if num_controlnet == 1: control_images = np.array([control_image], dtype=np_dtype) else: control_images = [] for _control_img in control_image: _control_img = _control_img.cpu().detach().numpy() control_images.append(_control_img) control_images = np.array(control_images, dtype=np_dtype) control_scales = np.array(cond_scale, dtype=np_dtype) control_scales = np.resize(control_scales, (num_controlnet, 1)) noise_pred = self.unet( sample=_latent_model_input, timestep=_t, encoder_hidden_states=_prompt_embeds, controlnet_conds=control_images, conditioning_scales=control_scales, )[0] noise_pred = torch.from_numpy(noise_pred).to(device) # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": _latents = latents.cpu().detach().numpy() / 0.18215 _latents = np.array(_latents, dtype=np_dtype) image = self.vae_decoder(latent_sample=_latents)[0] image = torch.from_numpy(image).to(device, dtype=torch.float32) has_nsfw_concept = None else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--sd_model", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument( "--onnx_model_dir", type=str, required=True, help="Path to the ONNX directory", ) parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image") args = parser.parse_args() qr_image = Image.open(args.qr_img_path) qr_image = qr_image.resize((512, 512)) # init stable diffusion pipeline pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model) pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config) provider = ["CUDAExecutionProvider", "CPUExecutionProvider"] onnx_pipeline = OnnxStableDiffusionControlNetImg2ImgPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained( os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider ), vae_decoder=OnnxRuntimeModel.from_pretrained( os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider ), text_encoder=OnnxRuntimeModel.from_pretrained( os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider ), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(os.path.join(args.onnx_model_dir, "unet"), provider=provider), scheduler=pipeline.scheduler, ) onnx_pipeline = onnx_pipeline.to("cuda") prompt = "a cute cat fly to the moon" negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect" for i in range(10): start_time = time.time() image = onnx_pipeline( num_controlnet=2, prompt=prompt, negative_prompt=negative_prompt, image=qr_image, control_image=[qr_image, qr_image], width=512, height=512, strength=0.75, num_inference_steps=20, num_images_per_prompt=1, controlnet_conditioning_scale=[0.8, 0.8], control_guidance_start=[0.3, 0.3], control_guidance_end=[0.9, 0.9], ).images[0] print(time.time() - start_time) image.save("output_qr_code.png")
diffusers/examples/community/run_onnx_controlnet.py/0
{ "file_path": "diffusers/examples/community/run_onnx_controlnet.py", "repo_id": "diffusers", "token_count": 19727 }
136
# # Copyright 2025 The HuggingFace Inc. team. # SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os from collections import OrderedDict from typing import List, Optional, Tuple, Union import numpy as np import onnx import onnx_graphsurgeon as gs import PIL.Image import tensorrt as trt import torch from cuda import cudart from huggingface_hub import snapshot_download from huggingface_hub.utils import validate_hf_hub_args from onnx import shape_inference from packaging import version from polygraphy import cuda from polygraphy.backend.common import bytes_from_path from polygraphy.backend.onnx.loader import fold_constants from polygraphy.backend.trt import ( CreateConfig, Profile, engine_from_bytes, engine_from_network, network_from_onnx_path, save_engine, ) from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict, deprecate from diffusers.image_processor import VaeImageProcessor from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion import ( StableDiffusionPipelineOutput, StableDiffusionSafetyChecker, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import retrieve_latents from diffusers.schedulers import DDIMScheduler from diffusers.utils import logging """ Installation instructions python3 -m pip install --upgrade transformers diffusers>=0.16.0 python3 -m pip install --upgrade tensorrt~=10.2.0 python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com python3 -m pip install onnxruntime """ TRT_LOGGER = trt.Logger(trt.Logger.ERROR) logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Map of numpy dtype -> torch dtype numpy_to_torch_dtype_dict = { np.uint8: torch.uint8, np.int8: torch.int8, np.int16: torch.int16, np.int32: torch.int32, np.int64: torch.int64, np.float16: torch.float16, np.float32: torch.float32, np.float64: torch.float64, np.complex64: torch.complex64, np.complex128: torch.complex128, } if np.version.full_version >= "1.24.0": numpy_to_torch_dtype_dict[np.bool_] = torch.bool else: numpy_to_torch_dtype_dict[np.bool] = torch.bool # Map of torch dtype -> numpy dtype torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()} def preprocess_image(image): """ image: torch.Tensor """ w, h = image.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h)) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).contiguous() return 2.0 * image - 1.0 class Engine: def __init__(self, engine_path): self.engine_path = engine_path self.engine = None self.context = None self.buffers = OrderedDict() self.tensors = OrderedDict() def __del__(self): [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)] del self.engine del self.context del self.buffers del self.tensors def build( self, onnx_path, fp16, input_profile=None, enable_all_tactics=False, timing_cache=None, ): logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}") p = Profile() if input_profile: for name, dims in input_profile.items(): assert len(dims) == 3 p.add(name, min=dims[0], opt=dims[1], max=dims[2]) extra_build_args = {} if not enable_all_tactics: extra_build_args["tactic_sources"] = [] engine = engine_from_network( network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]), config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **extra_build_args), save_timing_cache=timing_cache, ) save_engine(engine, path=self.engine_path) def load(self): logger.warning(f"Loading TensorRT engine: {self.engine_path}") self.engine = engine_from_bytes(bytes_from_path(self.engine_path)) def activate(self): self.context = self.engine.create_execution_context() def allocate_buffers(self, shape_dict=None, device="cuda"): for binding in range(self.engine.num_io_tensors): name = self.engine.get_tensor_name(binding) if shape_dict and name in shape_dict: shape = shape_dict[name] else: shape = self.engine.get_tensor_shape(name) dtype = trt.nptype(self.engine.get_tensor_dtype(name)) if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT: self.context.set_input_shape(name, shape) tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device) self.tensors[name] = tensor def infer(self, feed_dict, stream): for name, buf in feed_dict.items(): self.tensors[name].copy_(buf) for name, tensor in self.tensors.items(): self.context.set_tensor_address(name, tensor.data_ptr()) noerror = self.context.execute_async_v3(stream) if not noerror: raise ValueError("ERROR: inference failed.") return self.tensors class Optimizer: def __init__(self, onnx_graph): self.graph = gs.import_onnx(onnx_graph) def cleanup(self, return_onnx=False): self.graph.cleanup().toposort() if return_onnx: return gs.export_onnx(self.graph) def select_outputs(self, keep, names=None): self.graph.outputs = [self.graph.outputs[o] for o in keep] if names: for i, name in enumerate(names): self.graph.outputs[i].name = name def fold_constants(self, return_onnx=False): onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True) self.graph = gs.import_onnx(onnx_graph) if return_onnx: return onnx_graph def infer_shapes(self, return_onnx=False): onnx_graph = gs.export_onnx(self.graph) if onnx_graph.ByteSize() > 2147483648: raise TypeError("ERROR: model size exceeds supported 2GB limit") else: onnx_graph = shape_inference.infer_shapes(onnx_graph) self.graph = gs.import_onnx(onnx_graph) if return_onnx: return onnx_graph class BaseModel: def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77): self.model = model self.name = "SD Model" self.fp16 = fp16 self.device = device self.min_batch = 1 self.max_batch = max_batch_size self.min_image_shape = 256 # min image resolution: 256x256 self.max_image_shape = 1024 # max image resolution: 1024x1024 self.min_latent_shape = self.min_image_shape // 8 self.max_latent_shape = self.max_image_shape // 8 self.embedding_dim = embedding_dim self.text_maxlen = text_maxlen def get_model(self): return self.model def get_input_names(self): pass def get_output_names(self): pass def get_dynamic_axes(self): return None def get_sample_input(self, batch_size, image_height, image_width): pass def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): return None def get_shape_dict(self, batch_size, image_height, image_width): return None def optimize(self, onnx_graph): opt = Optimizer(onnx_graph) opt.cleanup() opt.fold_constants() opt.infer_shapes() onnx_opt_graph = opt.cleanup(return_onnx=True) return onnx_opt_graph def check_dims(self, batch_size, image_height, image_width): assert batch_size >= self.min_batch and batch_size <= self.max_batch assert image_height % 8 == 0 or image_width % 8 == 0 latent_height = image_height // 8 latent_width = image_width // 8 assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape return (latent_height, latent_width) def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape): min_batch = batch_size if static_batch else self.min_batch max_batch = batch_size if static_batch else self.max_batch latent_height = image_height // 8 latent_width = image_width // 8 min_image_height = image_height if static_shape else self.min_image_shape max_image_height = image_height if static_shape else self.max_image_shape min_image_width = image_width if static_shape else self.min_image_shape max_image_width = image_width if static_shape else self.max_image_shape min_latent_height = latent_height if static_shape else self.min_latent_shape max_latent_height = latent_height if static_shape else self.max_latent_shape min_latent_width = latent_width if static_shape else self.min_latent_shape max_latent_width = latent_width if static_shape else self.max_latent_shape return ( min_batch, max_batch, min_image_height, max_image_height, min_image_width, max_image_width, min_latent_height, max_latent_height, min_latent_width, max_latent_width, ) def getOnnxPath(model_name, onnx_dir, opt=True): return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx") def getEnginePath(model_name, engine_dir): return os.path.join(engine_dir, model_name + ".plan") def build_engines( models: dict, engine_dir, onnx_dir, onnx_opset, opt_image_height, opt_image_width, opt_batch_size=1, force_engine_rebuild=False, static_batch=False, static_shape=True, enable_all_tactics=False, timing_cache=None, ): built_engines = {} if not os.path.isdir(onnx_dir): os.makedirs(onnx_dir) if not os.path.isdir(engine_dir): os.makedirs(engine_dir) # Export models to ONNX for model_name, model_obj in models.items(): engine_path = getEnginePath(model_name, engine_dir) if force_engine_rebuild or not os.path.exists(engine_path): logger.warning("Building Engines...") logger.warning("Engine build can take a while to complete") onnx_path = getOnnxPath(model_name, onnx_dir, opt=False) onnx_opt_path = getOnnxPath(model_name, onnx_dir) if force_engine_rebuild or not os.path.exists(onnx_opt_path): if force_engine_rebuild or not os.path.exists(onnx_path): logger.warning(f"Exporting model: {onnx_path}") model = model_obj.get_model() with torch.inference_mode(), torch.autocast("cuda"): inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width) torch.onnx.export( model, inputs, onnx_path, export_params=True, opset_version=onnx_opset, do_constant_folding=True, input_names=model_obj.get_input_names(), output_names=model_obj.get_output_names(), dynamic_axes=model_obj.get_dynamic_axes(), ) del model torch.cuda.empty_cache() gc.collect() else: logger.warning(f"Found cached model: {onnx_path}") # Optimize onnx if force_engine_rebuild or not os.path.exists(onnx_opt_path): logger.warning(f"Generating optimizing model: {onnx_opt_path}") onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path)) onnx.save(onnx_opt_graph, onnx_opt_path) else: logger.warning(f"Found cached optimized model: {onnx_opt_path} ") # Build TensorRT engines for model_name, model_obj in models.items(): engine_path = getEnginePath(model_name, engine_dir) engine = Engine(engine_path) onnx_path = getOnnxPath(model_name, onnx_dir, opt=False) onnx_opt_path = getOnnxPath(model_name, onnx_dir) if force_engine_rebuild or not os.path.exists(engine.engine_path): engine.build( onnx_opt_path, fp16=True, input_profile=model_obj.get_input_profile( opt_batch_size, opt_image_height, opt_image_width, static_batch=static_batch, static_shape=static_shape, ), timing_cache=timing_cache, ) built_engines[model_name] = engine # Load and activate TensorRT engines for model_name, model_obj in models.items(): engine = built_engines[model_name] engine.load() engine.activate() return built_engines def runEngine(engine, feed_dict, stream): return engine.infer(feed_dict, stream) class CLIP(BaseModel): def __init__(self, model, device, max_batch_size, embedding_dim): super(CLIP, self).__init__( model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim ) self.name = "CLIP" def get_input_names(self): return ["input_ids"] def get_output_names(self): return ["text_embeddings", "pooler_output"] def get_dynamic_axes(self): return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}} def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): self.check_dims(batch_size, image_height, image_width) min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims( batch_size, image_height, image_width, static_batch, static_shape ) return { "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)] } def get_shape_dict(self, batch_size, image_height, image_width): self.check_dims(batch_size, image_height, image_width) return { "input_ids": (batch_size, self.text_maxlen), "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim), } def get_sample_input(self, batch_size, image_height, image_width): self.check_dims(batch_size, image_height, image_width) return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device) def optimize(self, onnx_graph): opt = Optimizer(onnx_graph) opt.select_outputs([0]) # delete graph output#1 opt.cleanup() opt.fold_constants() opt.infer_shapes() opt.select_outputs([0], names=["text_embeddings"]) # rename network output opt_onnx_graph = opt.cleanup(return_onnx=True) return opt_onnx_graph def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False): return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) class UNet(BaseModel): def __init__( self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4 ): super(UNet, self).__init__( model=model, fp16=fp16, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim, text_maxlen=text_maxlen, ) self.unet_dim = unet_dim self.name = "UNet" def get_input_names(self): return ["sample", "timestep", "encoder_hidden_states"] def get_output_names(self): return ["latent"] def get_dynamic_axes(self): return { "sample": {0: "2B", 2: "H", 3: "W"}, "encoder_hidden_states": {0: "2B"}, "latent": {0: "2B", 2: "H", 3: "W"}, } def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) ( min_batch, max_batch, _, _, _, _, min_latent_height, max_latent_height, min_latent_width, max_latent_width, ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) return { "sample": [ (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width), (2 * batch_size, self.unet_dim, latent_height, latent_width), (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width), ], "encoder_hidden_states": [ (2 * min_batch, self.text_maxlen, self.embedding_dim), (2 * batch_size, self.text_maxlen, self.embedding_dim), (2 * max_batch, self.text_maxlen, self.embedding_dim), ], } def get_shape_dict(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return { "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width), "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim), "latent": (2 * batch_size, 4, latent_height, latent_width), } def get_sample_input(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) dtype = torch.float16 if self.fp16 else torch.float32 return ( torch.randn( 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device ), torch.tensor([1.0], dtype=torch.float32, device=self.device), torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device), ) def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False): return UNet( model, fp16=True, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim, unet_dim=(9 if inpaint else 4), ) class VAE(BaseModel): def __init__(self, model, device, max_batch_size, embedding_dim): super(VAE, self).__init__( model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim ) self.name = "VAE decoder" def get_input_names(self): return ["latent"] def get_output_names(self): return ["images"] def get_dynamic_axes(self): return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}} def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) ( min_batch, max_batch, _, _, _, _, min_latent_height, max_latent_height, min_latent_width, max_latent_width, ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) return { "latent": [ (min_batch, 4, min_latent_height, min_latent_width), (batch_size, 4, latent_height, latent_width), (max_batch, 4, max_latent_height, max_latent_width), ] } def get_shape_dict(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return { "latent": (batch_size, 4, latent_height, latent_width), "images": (batch_size, 3, image_height, image_width), } def get_sample_input(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device) def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False): return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) class TorchVAEEncoder(torch.nn.Module): def __init__(self, model): super().__init__() self.vae_encoder = model def forward(self, x): return retrieve_latents(self.vae_encoder.encode(x)) class VAEEncoder(BaseModel): def __init__(self, model, device, max_batch_size, embedding_dim): super(VAEEncoder, self).__init__( model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim ) self.name = "VAE encoder" def get_model(self): vae_encoder = TorchVAEEncoder(self.model) return vae_encoder def get_input_names(self): return ["images"] def get_output_names(self): return ["latent"] def get_dynamic_axes(self): return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}} def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): assert batch_size >= self.min_batch and batch_size <= self.max_batch min_batch = batch_size if static_batch else self.min_batch max_batch = batch_size if static_batch else self.max_batch self.check_dims(batch_size, image_height, image_width) ( min_batch, max_batch, min_image_height, max_image_height, min_image_width, max_image_width, _, _, _, _, ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) return { "images": [ (min_batch, 3, min_image_height, min_image_width), (batch_size, 3, image_height, image_width), (max_batch, 3, max_image_height, max_image_width), ] } def get_shape_dict(self, batch_size, image_height, image_width): latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) return { "images": (batch_size, 3, image_height, image_width), "latent": (batch_size, 4, latent_height, latent_width), } def get_sample_input(self, batch_size, image_height, image_width): self.check_dims(batch_size, image_height, image_width) return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device) def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False): return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline): r""" Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, stages=["clip", "unet", "vae", "vae_encoder"], image_height: int = 512, image_width: int = 512, max_batch_size: int = 16, # ONNX export parameters onnx_opset: int = 17, onnx_dir: str = "onnx", # TensorRT engine build parameters engine_dir: str = "engine", force_engine_rebuild: bool = False, timing_cache: str = "timing_cache", ): super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.stages = stages self.image_height, self.image_width = image_height, image_width self.inpaint = False self.onnx_opset = onnx_opset self.onnx_dir = onnx_dir self.engine_dir = engine_dir self.force_engine_rebuild = force_engine_rebuild self.timing_cache = timing_cache self.build_static_batch = False self.build_dynamic_shape = False self.max_batch_size = max_batch_size # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation. if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512: self.max_batch_size = 4 self.stream = None # loaded in loadResources() self.models = {} # loaded in __loadModels() self.engine = {} # loaded in build_engines() self.vae.forward = self.vae.decode self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def __loadModels(self): # Load pipeline models self.embedding_dim = self.text_encoder.config.hidden_size models_args = { "device": self.torch_device, "max_batch_size": self.max_batch_size, "embedding_dim": self.embedding_dim, "inpaint": self.inpaint, } if "clip" in self.stages: self.models["clip"] = make_CLIP(self.text_encoder, **models_args) if "unet" in self.stages: self.models["unet"] = make_UNet(self.unet, **models_args) if "vae" in self.stages: self.models["vae"] = make_VAE(self.vae, **models_args) if "vae_encoder" in self.stages: self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker( self, image: Union[torch.Tensor, PIL.Image.Image], device: torch.device, dtype: torch.dtype ) -> Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]: r""" Runs the safety checker on the given image. Args: image (Union[torch.Tensor, PIL.Image.Image]): The input image to be checked. device (torch.device): The device to run the safety checker on. dtype (torch.dtype): The data type of the input image. Returns: (image, has_nsfw_concept) Tuple[Union[torch.Tensor, PIL.Image.Image], Optional[bool]]: A tuple containing the processed image and a boolean indicating whether the image has a NSFW (Not Safe for Work) concept. """ if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept @classmethod @validate_hf_hub_args def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) cls.cached_folder = ( pretrained_model_name_or_path if os.path.isdir(pretrained_model_name_or_path) else snapshot_download( pretrained_model_name_or_path, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, ) ) def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False): super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings) self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir) self.engine_dir = os.path.join(self.cached_folder, self.engine_dir) self.timing_cache = os.path.join(self.cached_folder, self.timing_cache) # set device self.torch_device = self._execution_device logger.warning(f"Running inference on device: {self.torch_device}") # load models self.__loadModels() # build engines self.engine = build_engines( self.models, self.engine_dir, self.onnx_dir, self.onnx_opset, opt_image_height=self.image_height, opt_image_width=self.image_width, force_engine_rebuild=self.force_engine_rebuild, static_batch=self.build_static_batch, static_shape=not self.build_dynamic_shape, timing_cache=self.timing_cache, ) return self def __initialize_timesteps(self, timesteps, strength): self.scheduler.set_timesteps(timesteps) offset = self.scheduler.steps_offset if hasattr(self.scheduler, "steps_offset") else 0 init_timestep = int(timesteps * strength) + offset init_timestep = min(init_timestep, timesteps) t_start = max(timesteps - init_timestep + offset, 0) timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device) return timesteps, t_start def __preprocess_images(self, batch_size, images=()): init_images = [] for image in images: image = image.to(self.torch_device).float() image = image.repeat(batch_size, 1, 1, 1) init_images.append(image) return tuple(init_images) def __encode_image(self, init_image): init_latents = runEngine(self.engine["vae_encoder"], {"images": init_image}, self.stream)["latent"] init_latents = 0.18215 * init_latents return init_latents def __encode_prompt(self, prompt, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ # Tokenize prompt text_input_ids = ( self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) .input_ids.type(torch.int32) .to(self.torch_device) ) # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids}, self.stream)[ "text_embeddings" ].clone() # Tokenize negative prompt uncond_input_ids = ( self.tokenizer( negative_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) .input_ids.type(torch.int32) .to(self.torch_device) ) uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids}, self.stream)[ "text_embeddings" ] # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16) return text_embeddings def __denoise_latent( self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None ): if not isinstance(timesteps, torch.Tensor): timesteps = self.scheduler.timesteps for step_index, timestep in enumerate(timesteps): # Expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep) if isinstance(mask, torch.Tensor): latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # Predict the noise residual timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep noise_pred = runEngine( self.engine["unet"], {"sample": latent_model_input, "timestep": timestep_float, "encoder_hidden_states": text_embeddings}, self.stream, )["latent"] # Perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample latents = 1.0 / 0.18215 * latents return latents def __decode_latent(self, latents): images = runEngine(self.engine["vae"], {"latent": latents}, self.stream)["images"] images = (images / 2 + 0.5).clamp(0, 1) return images.cpu().permute(0, 2, 3, 1).float().numpy() def __loadResources(self, image_height, image_width, batch_size): self.stream = cudart.cudaStreamCreate()[1] # Allocate buffers for TensorRT engine bindings for model_name, obj in self.models.items(): self.engine[model_name].allocate_buffers( shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, strength: float = 0.8, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. """ self.generator = generator self.denoising_steps = num_inference_steps self._guidance_scale = guidance_scale # Pre-compute latent input scales and linear multistep coefficients self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device) # Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 prompt = [prompt] elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}") if negative_prompt is None: negative_prompt = [""] * batch_size if negative_prompt is not None and isinstance(negative_prompt, str): negative_prompt = [negative_prompt] assert len(prompt) == len(negative_prompt) if batch_size > self.max_batch_size: raise ValueError( f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4" ) # load resources self.__loadResources(self.image_height, self.image_width, batch_size) with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER): # Initialize timesteps timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength) latent_timestep = timesteps[:1].repeat(batch_size) # Pre-process input image if isinstance(image, PIL.Image.Image): image = preprocess_image(image) init_image = self.__preprocess_images(batch_size, (image,))[0] # VAE encode init image init_latents = self.__encode_image(init_image) # Add noise to latents using timesteps noise = torch.randn( init_latents.shape, generator=self.generator, device=self.torch_device, dtype=torch.float32 ) latents = self.scheduler.add_noise(init_latents, noise, latent_timestep) # CLIP text encoder text_embeddings = self.__encode_prompt(prompt, negative_prompt) # UNet denoiser latents = self.__denoise_latent(latents, text_embeddings, timesteps=timesteps, step_offset=t_start) # VAE decode latent images = self.__decode_latent(latents) images, has_nsfw_concept = self.run_safety_checker(images, self.torch_device, text_embeddings.dtype) images = self.numpy_to_pil(images) return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/stable_diffusion_tensorrt_img2img.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_tensorrt_img2img.py", "repo_id": "diffusers", "token_count": 21855 }
137
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import functools import gc import itertools import json import logging import math import os import random import shutil from contextlib import nullcontext from pathlib import Path from typing import List, Union import accelerate import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms.functional as TF import transformers import webdataset as wds from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from braceexpand import braceexpand from huggingface_hub import create_repo, upload_folder from packaging import version from peft import LoraConfig, get_peft_model, get_peft_model_state_dict from torch.utils.data import default_collate from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, CLIPTextModel, PretrainedConfig from webdataset.tariterators import ( base_plus_ext, tar_file_expander, url_opener, valid_sample, ) import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, LCMScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import resolve_interpolation_mode from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available MAX_SEQ_LENGTH = 77 if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.36.0.dev0") logger = get_logger(__name__) def get_module_kohya_state_dict(module, prefix: str, dtype: torch.dtype, adapter_name: str = "default"): kohya_ss_state_dict = {} for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items(): kohya_key = peft_key.replace("base_model.model", prefix) kohya_key = kohya_key.replace("lora_A", "lora_down") kohya_key = kohya_key.replace("lora_B", "lora_up") kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) kohya_ss_state_dict[kohya_key] = weight.to(dtype) # Set alpha parameter if "lora_down" in kohya_key: alpha_key = f"{kohya_key.split('.')[0]}.alpha" kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype) return kohya_ss_state_dict def filter_keys(key_set): def _f(dictionary): return {k: v for k, v in dictionary.items() if k in key_set} return _f def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None): """Return function over iterator that groups key, value pairs into samples. :param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to lower case (Default value = True) """ current_sample = None for filesample in data: assert isinstance(filesample, dict) fname, value = filesample["fname"], filesample["data"] prefix, suffix = keys(fname) if prefix is None: continue if lcase: suffix = suffix.lower() # FIXME webdataset version throws if suffix in current_sample, but we have a potential for # this happening in the current LAION400m dataset if a tar ends with same prefix as the next # begins, rare, but can happen since prefix aren't unique across tar files in that dataset if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample: if valid_sample(current_sample): yield current_sample current_sample = {"__key__": prefix, "__url__": filesample["__url__"]} if suffixes is None or suffix in suffixes: current_sample[suffix] = value if valid_sample(current_sample): yield current_sample def tarfile_to_samples_nothrow(src, handler=wds.warn_and_continue): # NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw streams = url_opener(src, handler=handler) files = tar_file_expander(streams, handler=handler) samples = group_by_keys_nothrow(files, handler=handler) return samples class WebdatasetFilter: def __init__(self, min_size=1024, max_pwatermark=0.5): self.min_size = min_size self.max_pwatermark = max_pwatermark def __call__(self, x): try: if "json" in x: x_json = json.loads(x["json"]) filter_size = (x_json.get("original_width", 0.0) or 0.0) >= self.min_size and x_json.get( "original_height", 0 ) >= self.min_size filter_watermark = (x_json.get("pwatermark", 1.0) or 1.0) <= self.max_pwatermark return filter_size and filter_watermark else: return False except Exception: return False class SDText2ImageDataset: def __init__( self, train_shards_path_or_url: Union[str, List[str]], num_train_examples: int, per_gpu_batch_size: int, global_batch_size: int, num_workers: int, resolution: int = 512, interpolation_type: str = "bilinear", shuffle_buffer_size: int = 1000, pin_memory: bool = False, persistent_workers: bool = False, ): if not isinstance(train_shards_path_or_url, str): train_shards_path_or_url = [list(braceexpand(urls)) for urls in train_shards_path_or_url] # flatten list using itertools train_shards_path_or_url = list(itertools.chain.from_iterable(train_shards_path_or_url)) interpolation_mode = resolve_interpolation_mode(interpolation_type) def transform(example): # resize image image = example["image"] image = TF.resize(image, resolution, interpolation=interpolation_mode) # get crop coordinates and crop image c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(resolution, resolution)) image = TF.crop(image, c_top, c_left, resolution, resolution) image = TF.to_tensor(image) image = TF.normalize(image, [0.5], [0.5]) example["image"] = image return example processing_pipeline = [ wds.decode("pil", handler=wds.ignore_and_continue), wds.rename(image="jpg;png;jpeg;webp", text="text;txt;caption", handler=wds.warn_and_continue), wds.map(filter_keys({"image", "text"})), wds.map(transform), wds.to_tuple("image", "text"), ] # Create train dataset and loader pipeline = [ wds.ResampledShards(train_shards_path_or_url), tarfile_to_samples_nothrow, wds.shuffle(shuffle_buffer_size), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate), ] num_worker_batches = math.ceil(num_train_examples / (global_batch_size * num_workers)) # per dataloader worker num_batches = num_worker_batches * num_workers num_samples = num_batches * global_batch_size # each worker is iterating over this self._train_dataset = wds.DataPipeline(*pipeline).with_epoch(num_worker_batches) self._train_dataloader = wds.WebLoader( self._train_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers, ) # add meta-data to dataloader instance for convenience self._train_dataloader.num_batches = num_batches self._train_dataloader.num_samples = num_samples @property def train_dataset(self): return self._train_dataset @property def train_dataloader(self): return self._train_dataloader def log_validation(vae, unet, args, accelerator, weight_dtype, step): logger.info("Running validation... ") if torch.backends.mps.is_available(): autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type, dtype=weight_dtype) unet = accelerator.unwrap_model(unet) pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_teacher_model, vae=vae, scheduler=LCMScheduler.from_pretrained(args.pretrained_teacher_model, subfolder="scheduler"), revision=args.revision, torch_dtype=weight_dtype, safety_checker=None, ) pipeline.set_progress_bar_config(disable=True) lora_state_dict = get_module_kohya_state_dict(unet, "lora_unet", weight_dtype) pipeline.load_lora_weights(lora_state_dict) pipeline.fuse_lora() pipeline = pipeline.to(accelerator.device, dtype=weight_dtype) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) validation_prompts = [ "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography", "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k", "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece", ] image_logs = [] for _, prompt in enumerate(validation_prompts): images = [] with autocast_ctx: images = pipeline( prompt=prompt, num_inference_steps=4, num_images_per_prompt=4, generator=generator, guidance_scale=1.0, ).images image_logs.append({"validation_prompt": prompt, "images": images}) for tracker in accelerator.trackers: if tracker.name == "tensorboard": for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] formatted_images = [] for image in images: formatted_images.append(np.asarray(image)) formatted_images = np.stack(formatted_images) tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") elif tracker.name == "wandb": formatted_images = [] for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] for image in images: image = wandb.Image(image, caption=validation_prompt) formatted_images.append(image) tracker.log({"validation": formatted_images}) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() torch.cuda.empty_cache() return image_logs # From LatentConsistencyModel.get_guidance_scale_embedding def guidance_scale_embedding(w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): scaled_timestep = timestep_scaling * timestep c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2) c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5 return c_skip, c_out # Compare LCMScheduler.step, Step 4 def get_predicted_original_sample(model_output, timesteps, sample, prediction_type, alphas, sigmas): alphas = extract_into_tensor(alphas, timesteps, sample.shape) sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) if prediction_type == "epsilon": pred_x_0 = (sample - sigmas * model_output) / alphas elif prediction_type == "sample": pred_x_0 = model_output elif prediction_type == "v_prediction": pred_x_0 = alphas * sample - sigmas * model_output else: raise ValueError( f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`" f" are supported." ) return pred_x_0 # Based on step 4 in DDIMScheduler.step def get_predicted_noise(model_output, timesteps, sample, prediction_type, alphas, sigmas): alphas = extract_into_tensor(alphas, timesteps, sample.shape) sigmas = extract_into_tensor(sigmas, timesteps, sample.shape) if prediction_type == "epsilon": pred_epsilon = model_output elif prediction_type == "sample": pred_epsilon = (sample - alphas * model_output) / sigmas elif prediction_type == "v_prediction": pred_epsilon = alphas * model_output + sigmas * sample else: raise ValueError( f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`" f" are supported." ) return pred_epsilon def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def update_ema(target_params, source_params, rate=0.99): """ Update target parameters to be closer to those of source parameters using an exponential moving average. :param target_params: the target parameter sequence. :param source_params: the source parameter sequence. :param rate: the EMA rate (closer to 1 means slower). """ for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src, alpha=1 - rate) def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") # ----------Model Checkpoint Loading Arguments---------- parser.add_argument( "--pretrained_teacher_model", type=str, default=None, required=True, help="Path to pretrained LDM teacher model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--teacher_revision", type=str, default=None, required=False, help="Revision of pretrained LDM teacher model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained LDM model identifier from huggingface.co/models.", ) # ----------Training Arguments---------- # ----General Training Arguments---- parser.add_argument( "--output_dir", type=str, default="lcm-xl-distilled", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") # ----Logging---- parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) # ----Checkpointing---- parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) # ----Image Processing---- parser.add_argument( "--train_shards_path_or_url", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--interpolation_type", type=str, default="bilinear", help=( "The interpolation function used when resizing images to the desired resolution. Choose between `bilinear`," " `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) # ----Dataloader---- parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) # ----Batch Size and Training Steps---- parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) # ----Learning Rate---- parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) # ----Optimizer (Adam)---- parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") # ----Diffusion Training Arguments---- parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) # ----Latent Consistency Distillation (LCD) Specific Arguments---- parser.add_argument( "--w_min", type=float, default=5.0, required=False, help=( "The minimum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG" " formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as" " compared to the original paper." ), ) parser.add_argument( "--w_max", type=float, default=15.0, required=False, help=( "The maximum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG" " formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as" " compared to the original paper." ), ) parser.add_argument( "--num_ddim_timesteps", type=int, default=50, help="The number of timesteps to use for DDIM sampling.", ) parser.add_argument( "--loss_type", type=str, default="l2", choices=["l2", "huber"], help="The type of loss to use for the LCD loss.", ) parser.add_argument( "--huber_c", type=float, default=0.001, help="The huber loss parameter. Only used if `--loss_type=huber`.", ) parser.add_argument( "--lora_rank", type=int, default=64, help="The rank of the LoRA projection matrix.", ) parser.add_argument( "--lora_alpha", type=int, default=64, help=( "The value of the LoRA alpha parameter, which controls the scaling factor in front of the LoRA weight" " update delta_W. No scaling will be performed if this value is equal to `lora_rank`." ), ) parser.add_argument( "--lora_dropout", type=float, default=0.0, help="The dropout probability for the dropout layer added before applying the LoRA to each layer input.", ) parser.add_argument( "--lora_target_modules", type=str, default=None, help=( "A comma-separated string of target module keys to add LoRA to. If not set, a default list of modules will" " be used. By default, LoRA will be applied to all conv and linear layers." ), ) parser.add_argument( "--vae_encode_batch_size", type=int, default=32, required=False, help=( "The batch size used when encoding (and decoding) images to latents (and vice versa) using the VAE." " Encoding or decoding the whole batch at once may run into OOM issues." ), ) parser.add_argument( "--timestep_scaling_factor", type=float, default=10.0, help=( "The multiplicative timestep scaling factor used when calculating the boundary scalings for LCM. The" " higher the scaling is, the lower the approximation error, but the default value of 10.0 should typically" " suffice." ), ) # ----Mixed Precision---- parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--cast_teacher_unet", action="store_true", help="Whether to cast the teacher U-Net to the precision specified by `--mixed_precision`.", ) # ----Training Optimizations---- parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) # ----Distributed Training---- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") # ----------Validation Arguments---------- parser.add_argument( "--validation_steps", type=int, default=200, help="Run validation every X steps.", ) # ----------Huggingface Hub Arguments----------- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) # ----------Accelerate Arguments---------- parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") return args # Adapted from pipelines.StableDiffusionPipeline.encode_prompt def encode_prompt(prompt_batch, text_encoder, tokenizer, proportion_empty_prompts, is_train=True): captions = [] for caption in prompt_batch: if random.random() < proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) with torch.no_grad(): text_inputs = tokenizer( captions, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder(text_input_ids.to(text_encoder.device))[0] return prompt_embeds def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, split_batches=True, # It's important to set this to True when using webdataset to get the right number of steps for lr scheduling. If set to False, the number of steps will be divided by the number of processes assuming batches are multiplied by the number of processes ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token, private=True, ).repo_id # 1. Create the noise scheduler and the desired noise schedule. noise_scheduler = DDPMScheduler.from_pretrained( args.pretrained_teacher_model, subfolder="scheduler", revision=args.teacher_revision ) # DDPMScheduler calculates the alpha and sigma noise schedules (based on the alpha bars) for us alpha_schedule = torch.sqrt(noise_scheduler.alphas_cumprod) sigma_schedule = torch.sqrt(1 - noise_scheduler.alphas_cumprod) # Initialize the DDIM ODE solver for distillation. solver = DDIMSolver( noise_scheduler.alphas_cumprod.numpy(), timesteps=noise_scheduler.config.num_train_timesteps, ddim_timesteps=args.num_ddim_timesteps, ) # 2. Load tokenizers from SD 1.X/2.X checkpoint. tokenizer = AutoTokenizer.from_pretrained( args.pretrained_teacher_model, subfolder="tokenizer", revision=args.teacher_revision, use_fast=False ) # 3. Load text encoders from SD 1.X/2.X checkpoint. # import correct text encoder classes text_encoder = CLIPTextModel.from_pretrained( args.pretrained_teacher_model, subfolder="text_encoder", revision=args.teacher_revision ) # 4. Load VAE from SD 1.X/2.X checkpoint vae = AutoencoderKL.from_pretrained( args.pretrained_teacher_model, subfolder="vae", revision=args.teacher_revision, ) # 5. Load teacher U-Net from SD 1.X/2.X checkpoint teacher_unet = UNet2DConditionModel.from_pretrained( args.pretrained_teacher_model, subfolder="unet", revision=args.teacher_revision ) # 6. Freeze teacher vae, text_encoder, and teacher_unet vae.requires_grad_(False) text_encoder.requires_grad_(False) teacher_unet.requires_grad_(False) # 7. Create online student U-Net. unet = UNet2DConditionModel.from_pretrained( args.pretrained_teacher_model, subfolder="unet", revision=args.teacher_revision ) unet.train() # Check that all trainable models are in full precision low_precision_error_string = ( " Please make sure to always have all model weights in full float32 precision when starting training - even if" " doing mixed precision training, copy of the weights should still be float32." ) if accelerator.unwrap_model(unet).dtype != torch.float32: raise ValueError( f"Controlnet loaded as datatype {accelerator.unwrap_model(unet).dtype}. {low_precision_error_string}" ) # 8. Add LoRA to the student U-Net, only the LoRA projection matrix will be updated by the optimizer. if args.lora_target_modules is not None: lora_target_modules = [module_key.strip() for module_key in args.lora_target_modules.split(",")] else: lora_target_modules = [ "to_q", "to_k", "to_v", "to_out.0", "proj_in", "proj_out", "ff.net.0.proj", "ff.net.2", "conv1", "conv2", "conv_shortcut", "downsamplers.0.conv", "upsamplers.0.conv", "time_emb_proj", ] lora_config = LoraConfig( r=args.lora_rank, target_modules=lora_target_modules, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, ) unet = get_peft_model(unet, lora_config) # 9. Handle mixed precision and device placement # For mixed precision training we cast all non-trainable weights to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. vae.to(accelerator.device) if args.pretrained_vae_model_name_or_path is not None: vae.to(dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) # Move teacher_unet to device, optionally cast to weight_dtype teacher_unet.to(accelerator.device) if args.cast_teacher_unet: teacher_unet.to(dtype=weight_dtype) # Also move the alpha and sigma noise schedules to accelerator.device. alpha_schedule = alpha_schedule.to(accelerator.device) sigma_schedule = sigma_schedule.to(accelerator.device) # Move the ODE solver to accelerator.device. solver = solver.to(accelerator.device) # 10. Handle saving and loading of checkpoints # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: unet_ = accelerator.unwrap_model(unet) lora_state_dict = get_peft_model_state_dict(unet_, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(output_dir, "unet_lora"), lora_state_dict) # save weights in peft format to be able to load them back unet_.save_pretrained(output_dir) for _, model in enumerate(models): # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): # load the LoRA into the model unet_ = accelerator.unwrap_model(unet) unet_.load_adapter(input_dir, "default", is_trainable=True) for _ in range(len(models)): # pop models so that they are not loaded again models.pop() accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # 11. Enable optimizations if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() teacher_unet.enable_xformers_memory_efficient_attention() # target_unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # 12. Optimizer creation optimizer = optimizer_class( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # 13. Dataset creation and data processing # Here, we compute not just the text embeddings but also the additional embeddings # needed for the SD XL UNet to operate. def compute_embeddings(prompt_batch, proportion_empty_prompts, text_encoder, tokenizer, is_train=True): prompt_embeds = encode_prompt(prompt_batch, text_encoder, tokenizer, proportion_empty_prompts, is_train) return {"prompt_embeds": prompt_embeds} dataset = SDText2ImageDataset( train_shards_path_or_url=args.train_shards_path_or_url, num_train_examples=args.max_train_samples, per_gpu_batch_size=args.train_batch_size, global_batch_size=args.train_batch_size * accelerator.num_processes, num_workers=args.dataloader_num_workers, resolution=args.resolution, interpolation_type=args.interpolation_type, shuffle_buffer_size=1000, pin_memory=True, persistent_workers=True, ) train_dataloader = dataset.train_dataloader compute_embeddings_fn = functools.partial( compute_embeddings, proportion_empty_prompts=0, text_encoder=text_encoder, tokenizer=tokenizer, ) # 14. LR Scheduler creation # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps, ) # 15. Prepare for training # Prepare everything with our `accelerator`. unet, optimizer, lr_scheduler = accelerator.prepare(unet, optimizer, lr_scheduler) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) accelerator.init_trackers(args.tracker_project_name, config=tracker_config) uncond_input_ids = tokenizer( [""] * args.train_batch_size, return_tensors="pt", padding="max_length", max_length=77 ).input_ids.to(accelerator.device) uncond_prompt_embeds = text_encoder(uncond_input_ids)[0] if torch.backends.mps.is_available(): autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type) # 16. Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num batches each epoch = {train_dataloader.num_batches}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # 1. Load and process the image and text conditioning image, text = batch image = image.to(accelerator.device, non_blocking=True) encoded_text = compute_embeddings_fn(text) pixel_values = image.to(dtype=weight_dtype) if vae.dtype != weight_dtype: vae.to(dtype=weight_dtype) # encode pixel values with batch size of at most args.vae_encode_batch_size latents = [] for i in range(0, pixel_values.shape[0], args.vae_encode_batch_size): latents.append(vae.encode(pixel_values[i : i + args.vae_encode_batch_size]).latent_dist.sample()) latents = torch.cat(latents, dim=0) latents = latents * vae.config.scaling_factor latents = latents.to(weight_dtype) bsz = latents.shape[0] # 2. Sample a random timestep for each image t_n from the ODE solver timesteps without bias. # For the DDIM solver, the timestep schedule is [T - 1, T - k - 1, T - 2 * k - 1, ...] topk = noise_scheduler.config.num_train_timesteps // args.num_ddim_timesteps index = torch.randint(0, args.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # 3. Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions( start_timesteps, timestep_scaling=args.timestep_scaling_factor ) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions( timesteps, timestep_scaling=args.timestep_scaling_factor ) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # 4. Sample noise from the prior and add it to the latents according to the noise magnitude at each # timestep (this is the forward diffusion process) [z_{t_{n + k}} in Algorithm 1] noise = torch.randn_like(latents) noisy_model_input = noise_scheduler.add_noise(latents, noise, start_timesteps) # 5. Sample a random guidance scale w from U[w_min, w_max] # Note that for LCM-LoRA distillation it is not necessary to use a guidance scale embedding w = (args.w_max - args.w_min) * torch.rand((bsz,)) + args.w_min w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # 6. Prepare prompt embeds and unet_added_conditions prompt_embeds = encoded_text.pop("prompt_embeds") # 7. Get online LCM prediction on z_{t_{n + k}} (noisy_model_input), w, c, t_{n + k} (start_timesteps) noise_pred = unet( noisy_model_input, start_timesteps, timestep_cond=None, encoder_hidden_states=prompt_embeds.float(), added_cond_kwargs=encoded_text, ).sample pred_x_0 = get_predicted_original_sample( noise_pred, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # 8. Compute the conditional and unconditional teacher model predictions to get CFG estimates of the # predicted noise eps_0 and predicted original sample x_0, then run the ODE solver using these # estimates to predict the data point in the augmented PF-ODE trajectory corresponding to the next ODE # solver timestep. with torch.no_grad(): with autocast_ctx: # 1. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and conditional embedding c cond_teacher_output = teacher_unet( noisy_model_input.to(weight_dtype), start_timesteps, encoder_hidden_states=prompt_embeds.to(weight_dtype), ).sample cond_pred_x0 = get_predicted_original_sample( cond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) cond_pred_noise = get_predicted_noise( cond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) # 2. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and unconditional embedding 0 uncond_teacher_output = teacher_unet( noisy_model_input.to(weight_dtype), start_timesteps, encoder_hidden_states=uncond_prompt_embeds.to(weight_dtype), ).sample uncond_pred_x0 = get_predicted_original_sample( uncond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) uncond_pred_noise = get_predicted_noise( uncond_teacher_output, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) # 3. Calculate the CFG estimate of x_0 (pred_x0) and eps_0 (pred_noise) # Note that this uses the LCM paper's CFG formulation rather than the Imagen CFG formulation pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_pred_noise + w * (cond_pred_noise - uncond_pred_noise) # 4. Run one step of the ODE solver to estimate the next point x_prev on the # augmented PF-ODE trajectory (solving backward in time) # Note that the DDIM step depends on both the predicted x_0 and source noise eps_0. x_prev = solver.ddim_step(pred_x0, pred_noise, index) # 9. Get target LCM prediction on x_prev, w, c, t_n (timesteps) # Note that we do not use a separate target network for LCM-LoRA distillation. with torch.no_grad(): with autocast_ctx: target_noise_pred = unet( x_prev.float(), timesteps, timestep_cond=None, encoder_hidden_states=prompt_embeds.float(), ).sample pred_x_0 = get_predicted_original_sample( target_noise_pred, timesteps, x_prev, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) target = c_skip * x_prev + c_out * pred_x_0 # 10. Calculate loss if args.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif args.loss_type == "huber": loss = torch.mean( torch.sqrt((model_pred.float() - target.float()) ** 2 + args.huber_c**2) - args.huber_c ) # 11. Backpropagate on the online student model (`unet`) accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if global_step % args.validation_steps == 0: log_validation(vae, unet, args, accelerator, weight_dtype, global_step) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) unet.save_pretrained(args.output_dir) lora_state_dict = get_peft_model_state_dict(unet, adapter_name="default") StableDiffusionPipeline.save_lora_weights(os.path.join(args.output_dir, "unet_lora"), lora_state_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py/0
{ "file_path": "diffusers/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py", "repo_id": "diffusers", "token_count": 27024 }
138
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import random import time from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import torch import torch.utils.checkpoint import transformers from datasets import load_dataset, load_from_disk from flax import jax_utils from flax.core.frozen_dict import unfreeze from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder from PIL import Image, PngImagePlugin from torch.utils.data import IterableDataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxControlNetModel, FlaxDDPMScheduler, FlaxStableDiffusionControlNetPipeline, FlaxUNet2DConditionModel, ) from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card # To prevent an error that occurs when there are abnormally large compressed data chunk in the png image # see more https://github.com/python-pillow/Pillow/issues/5610 LARGE_ENOUGH_NUMBER = 100 PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2) if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.36.0.dev0") logger = logging.getLogger(__name__) def log_validation(pipeline, pipeline_params, controlnet_params, tokenizer, args, rng, weight_dtype): logger.info("Running validation...") pipeline_params = pipeline_params.copy() pipeline_params["controlnet"] = controlnet_params num_samples = jax.device_count() prng_seed = jax.random.split(rng, jax.device_count()) if len(args.validation_image) == len(args.validation_prompt): validation_images = args.validation_image validation_prompts = args.validation_prompt elif len(args.validation_image) == 1: validation_images = args.validation_image * len(args.validation_prompt) validation_prompts = args.validation_prompt elif len(args.validation_prompt) == 1: validation_images = args.validation_image validation_prompts = args.validation_prompt * len(args.validation_image) else: raise ValueError( "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" ) image_logs = [] for validation_prompt, validation_image in zip(validation_prompts, validation_images): prompts = num_samples * [validation_prompt] prompt_ids = pipeline.prepare_text_inputs(prompts) prompt_ids = shard(prompt_ids) validation_image = Image.open(validation_image).convert("RGB") processed_image = pipeline.prepare_image_inputs(num_samples * [validation_image]) processed_image = shard(processed_image) images = pipeline( prompt_ids=prompt_ids, image=processed_image, params=pipeline_params, prng_seed=prng_seed, num_inference_steps=50, jit=True, ).images images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) images = pipeline.numpy_to_pil(images) image_logs.append( {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} ) if args.report_to == "wandb": formatted_images = [] for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning")) for image in images: image = wandb.Image(image, caption=validation_prompt) formatted_images.append(image) wandb.log({"validation": formatted_images}) else: logger.warning(f"image logging not implemented for {args.report_to}") return image_logs def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None): img_str = "" if image_logs is not None: for i, log in enumerate(image_logs): images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] validation_image.save(os.path.join(repo_folder, "image_control.png")) img_str += f"prompt: {validation_prompt}\n" images = [validation_image] + images make_image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png")) img_str += f"![images_{i})](./images_{i}.png)\n" model_description = f""" # controlnet- {repo_id} These are controlnet weights trained on {base_model} with new type of conditioning. You can find some example images in the following. \n {img_str} """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=base_model, model_description=model_description, inference=True, ) tags = [ "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers", "controlnet", "jax-diffusers-event", "diffusers-training", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--controlnet_model_name_or_path", type=str, default=None, help="Path to pretrained controlnet model or model identifier from huggingface.co/models." " If not specified controlnet weights are initialized from unet.", ) parser.add_argument( "--revision", type=str, default=None, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--from_pt", action="store_true", help="Load the pretrained model from a PyTorch checkpoint.", ) parser.add_argument( "--controlnet_revision", type=str, default=None, help="Revision of controlnet model identifier from huggingface.co/models.", ) parser.add_argument( "--profile_steps", type=int, default=0, help="How many training steps to profile in the beginning.", ) parser.add_argument( "--profile_validation", action="store_true", help="Whether to profile the (last) validation.", ) parser.add_argument( "--profile_memory", action="store_true", help="Whether to dump an initial (before training loop) and a final (at program end) memory profile.", ) parser.add_argument( "--ccache", type=str, default=None, help="Enables compilation cache.", ) parser.add_argument( "--controlnet_from_pt", action="store_true", help="Load the controlnet model from a PyTorch checkpoint.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--output_dir", type=str, default="runs/{timestamp}", help="The output directory where the model predictions and checkpoints will be written. " "Can contain placeholders: {timestamp}.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--train_batch_size", type=int, default=1, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform.", ) parser.add_argument( "--checkpointing_steps", type=int, default=5000, help=("Save a checkpoint of the training state every X updates."), ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://huggingface.co/papers/2303.09556.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_steps", type=int, default=100, help=("log training metric every X steps to `--report_t`"), ) parser.add_argument( "--report_to", type=str, default="wandb", help=('The integration to report the results and logs to. Currently only supported platforms are `"wandb"`'), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument("--streaming", action="store_true", help="To stream a large dataset from Hub.") parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training dataset. By default it will use `load_dataset` method to load a custom dataset from the folder." "Folder must contain a dataset script as described here https://huggingface.co/docs/datasets/dataset_script) ." "If `--load_from_disk` flag is passed, it will use `load_from_disk` method instead. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--load_from_disk", action="store_true", help=( "If True, will load a dataset that was previously saved using `save_to_disk` from `--train_data_dir`" "See more https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.load_from_disk" ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image." ) parser.add_argument( "--conditioning_image_column", type=str, default="conditioning_image", help="The column of the dataset containing the controlnet conditioning image.", ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set. Needed if `streaming` is set to True." ), ) parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) parser.add_argument( "--validation_prompt", type=str, default=None, nargs="+", help=( "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." " Provide either a matching number of `--validation_image`s, a single `--validation_image`" " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." ), ) parser.add_argument( "--validation_image", type=str, default=None, nargs="+", help=( "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`" " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" " `--validation_image` that will be used with all `--validation_prompt`s." ), ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` and logging the images." ), ) parser.add_argument("--wandb_entity", type=str, default=None, help=("The wandb entity to use (for teams).")) parser.add_argument( "--tracker_project_name", type=str, default="train_controlnet_flax", help=("The `project` argument passed to wandb"), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of steps to accumulate gradients over" ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() args.output_dir = args.output_dir.replace("{timestamp}", time.strftime("%Y%m%d_%H%M%S")) env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") if args.dataset_name is not None and args.train_data_dir is not None: raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`") if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") if args.validation_prompt is not None and args.validation_image is None: raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") if args.validation_prompt is None and args.validation_image is not None: raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") if ( args.validation_image is not None and args.validation_prompt is not None and len(args.validation_image) != 1 and len(args.validation_prompt) != 1 and len(args.validation_image) != len(args.validation_prompt) ): raise ValueError( "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," " or the same number of `--validation_prompt`s and `--validation_image`s" ) # This idea comes from # https://github.com/borisdayma/dalle-mini/blob/d2be512d4a6a9cda2d63ba04afc33038f98f705f/src/dalle_mini/data.py#L370 if args.streaming and args.max_train_samples is None: raise ValueError("You must specify `max_train_samples` when using dataset streaming.") return args def make_train_dataset(args, tokenizer, batch_size=None): # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, streaming=args.streaming, ) else: if args.train_data_dir is not None: if args.load_from_disk: dataset = load_from_disk( args.train_data_dir, ) else: dataset = load_dataset( args.train_data_dir, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script # Preprocessing the datasets. # We need to tokenize inputs and targets. if isinstance(dataset["train"], IterableDataset): column_names = next(iter(dataset["train"])).keys() else: column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = column_names[1] logger.info(f"caption column defaulting to {caption_column}") else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) if args.conditioning_image_column is None: conditioning_image_column = column_names[2] logger.info(f"conditioning image column defaulting to {caption_column}") else: conditioning_image_column = args.conditioning_image_column if conditioning_image_column not in column_names: raise ValueError( f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if random.random() < args.proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) conditioning_image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] images = [image_transforms(image) for image in images] conditioning_images = [image.convert("RGB") for image in examples[conditioning_image_column]] conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images] examples["pixel_values"] = images examples["conditioning_pixel_values"] = conditioning_images examples["input_ids"] = tokenize_captions(examples) return examples if jax.process_index() == 0: if args.max_train_samples is not None: if args.streaming: dataset["train"] = dataset["train"].shuffle(seed=args.seed).take(args.max_train_samples) else: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms if args.streaming: train_dataset = dataset["train"].map( preprocess_train, batched=True, batch_size=batch_size, remove_columns=list(dataset["train"].features.keys()), ) else: train_dataset = dataset["train"].with_transform(preprocess_train) return train_dataset def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) batch = { "pixel_values": pixel_values, "conditioning_pixel_values": conditioning_pixel_values, "input_ids": input_ids, } batch = {k: v.numpy() for k, v in batch.items()} return batch def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # wandb init if jax.process_index() == 0 and args.report_to == "wandb": wandb.init( entity=args.wandb_entity, project=args.tracker_project_name, job_type="train", config=args, ) if args.seed is not None: set_seed(args.seed) rng = jax.random.PRNGKey(0) # Handle the repository creation if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) else: raise NotImplementedError("No tokenizer specified!") # Get the datasets: you can either provide your own training and evaluation files (see below) total_train_batch_size = args.train_batch_size * jax.local_device_count() * args.gradient_accumulation_steps train_dataset = make_train_dataset(args, tokenizer, batch_size=total_train_batch_size) train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=not args.streaming, collate_fn=collate_fn, batch_size=total_train_batch_size, num_workers=args.dataloader_num_workers, drop_last=True, ) weight_dtype = jnp.float32 if args.mixed_precision == "fp16": weight_dtype = jnp.float16 elif args.mixed_precision == "bf16": weight_dtype = jnp.bfloat16 # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", dtype=weight_dtype, revision=args.revision, from_pt=args.from_pt, ) vae, vae_params = FlaxAutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, subfolder="vae", dtype=weight_dtype, from_pt=args.from_pt, ) unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", dtype=weight_dtype, revision=args.revision, from_pt=args.from_pt, ) if args.controlnet_model_name_or_path: logger.info("Loading existing controlnet weights") controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( args.controlnet_model_name_or_path, revision=args.controlnet_revision, from_pt=args.controlnet_from_pt, dtype=jnp.float32, ) else: logger.info("Initializing controlnet weights from unet") rng, rng_params = jax.random.split(rng) controlnet = FlaxControlNetModel( in_channels=unet.config.in_channels, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, attention_head_dim=unet.config.attention_head_dim, cross_attention_dim=unet.config.cross_attention_dim, use_linear_projection=unet.config.use_linear_projection, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, ) controlnet_params = controlnet.init_weights(rng=rng_params) controlnet_params = unfreeze(controlnet_params) for key in [ "conv_in", "time_embedding", "down_blocks_0", "down_blocks_1", "down_blocks_2", "down_blocks_3", "mid_block", ]: controlnet_params[key] = unet_params[key] pipeline, pipeline_params = FlaxStableDiffusionControlNetPipeline.from_pretrained( args.pretrained_model_name_or_path, tokenizer=tokenizer, controlnet=controlnet, safety_checker=None, dtype=weight_dtype, revision=args.revision, from_pt=args.from_pt, ) pipeline_params = jax_utils.replicate(pipeline_params) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) adamw = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) optimizer = optax.chain( optax.clip_by_global_norm(args.max_grad_norm), adamw, ) state = train_state.TrainState.create(apply_fn=controlnet.__call__, params=controlnet_params, tx=optimizer) noise_scheduler, noise_scheduler_state = FlaxDDPMScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler" ) # Initialize our training validation_rng, train_rngs = jax.random.split(rng) train_rngs = jax.random.split(train_rngs, jax.local_device_count()) def compute_snr(timesteps): """ Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 """ alphas_cumprod = noise_scheduler_state.common.alphas_cumprod sqrt_alphas_cumprod = alphas_cumprod**0.5 sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 alpha = sqrt_alphas_cumprod[timesteps] sigma = sqrt_one_minus_alphas_cumprod[timesteps] # Compute SNR. snr = (alpha / sigma) ** 2 return snr def train_step(state, unet_params, text_encoder_params, vae_params, batch, train_rng): # reshape batch, add grad_step_dim if gradient_accumulation_steps > 1 if args.gradient_accumulation_steps > 1: grad_steps = args.gradient_accumulation_steps batch = jax.tree_map(lambda x: x.reshape((grad_steps, x.shape[0] // grad_steps) + x.shape[1:]), batch) def compute_loss(params, minibatch, sample_rng): # Convert images to latent space vae_outputs = vae.apply( {"params": vae_params}, minibatch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) # Sample a random timestep for each image bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder( minibatch["input_ids"], params=text_encoder_params, train=False, )[0] controlnet_cond = minibatch["conditioning_pixel_values"] # Predict the noise residual and compute loss down_block_res_samples, mid_block_res_sample = controlnet.apply( {"params": params}, noisy_latents, timesteps, encoder_hidden_states, controlnet_cond, train=True, return_dict=False, ) model_pred = unet.apply( {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = (target - model_pred) ** 2 if args.snr_gamma is not None: snr = jnp.array(compute_snr(timesteps)) snr_loss_weights = jnp.where(snr < args.snr_gamma, snr, jnp.ones_like(snr) * args.snr_gamma) if noise_scheduler.config.prediction_type == "epsilon": snr_loss_weights = snr_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": snr_loss_weights = snr_loss_weights / (snr + 1) loss = loss * snr_loss_weights loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) # get a minibatch (one gradient accumulation slice) def get_minibatch(batch, grad_idx): return jax.tree_util.tree_map( lambda x: jax.lax.dynamic_index_in_dim(x, grad_idx, keepdims=False), batch, ) def loss_and_grad(grad_idx, train_rng): # create minibatch for the grad step minibatch = get_minibatch(batch, grad_idx) if grad_idx is not None else batch sample_rng, train_rng = jax.random.split(train_rng, 2) loss, grad = grad_fn(state.params, minibatch, sample_rng) return loss, grad, train_rng if args.gradient_accumulation_steps == 1: loss, grad, new_train_rng = loss_and_grad(None, train_rng) else: init_loss_grad_rng = ( 0.0, # initial value for cumul_loss jax.tree_map(jnp.zeros_like, state.params), # initial value for cumul_grad train_rng, # initial value for train_rng ) def cumul_grad_step(grad_idx, loss_grad_rng): cumul_loss, cumul_grad, train_rng = loss_grad_rng loss, grad, new_train_rng = loss_and_grad(grad_idx, train_rng) cumul_loss, cumul_grad = jax.tree_map(jnp.add, (cumul_loss, cumul_grad), (loss, grad)) return cumul_loss, cumul_grad, new_train_rng loss, grad, new_train_rng = jax.lax.fori_loop( 0, args.gradient_accumulation_steps, cumul_grad_step, init_loss_grad_rng, ) loss, grad = jax.tree_map(lambda x: x / args.gradient_accumulation_steps, (loss, grad)) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") def l2(xs): return jnp.sqrt(sum([jnp.vdot(x, x) for x in jax.tree_util.tree_leaves(xs)])) metrics["l2_grads"] = l2(jax.tree_util.tree_leaves(grad)) return new_state, metrics, new_train_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) unet_params = jax_utils.replicate(unet_params) text_encoder_params = jax_utils.replicate(text_encoder.params) vae_params = jax_utils.replicate(vae_params) # Train! if args.streaming: dataset_length = args.max_train_samples else: dataset_length = len(train_dataloader) num_update_steps_per_epoch = math.ceil(dataset_length / args.gradient_accumulation_steps) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {args.max_train_samples if args.streaming else len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.num_train_epochs * num_update_steps_per_epoch}") if jax.process_index() == 0 and args.report_to == "wandb": wandb.define_metric("*", step_metric="train/step") wandb.define_metric("train/step", step_metric="walltime") wandb.config.update( { "num_train_examples": args.max_train_samples if args.streaming else len(train_dataset), "total_train_batch_size": total_train_batch_size, "total_optimization_step": args.num_train_epochs * num_update_steps_per_epoch, "num_devices": jax.device_count(), "controlnet_params": sum(np.prod(x.shape) for x in jax.tree_util.tree_leaves(state.params)), } ) global_step = step0 = 0 epochs = tqdm( range(args.num_train_epochs), desc="Epoch ... ", position=0, disable=jax.process_index() > 0, ) if args.profile_memory: jax.profiler.save_device_memory_profile(os.path.join(args.output_dir, "memory_initial.prof")) t00 = t0 = time.monotonic() for epoch in epochs: # ======================== Training ================================ train_metrics = [] train_metric = None steps_per_epoch = ( args.max_train_samples // total_train_batch_size if args.streaming or args.max_train_samples else len(train_dataset) // total_train_batch_size ) train_step_progress_bar = tqdm( total=steps_per_epoch, desc="Training...", position=1, leave=False, disable=jax.process_index() > 0, ) # train for batch in train_dataloader: if args.profile_steps and global_step == 1: train_metric["loss"].block_until_ready() jax.profiler.start_trace(args.output_dir) if args.profile_steps and global_step == 1 + args.profile_steps: train_metric["loss"].block_until_ready() jax.profiler.stop_trace() batch = shard(batch) with jax.profiler.StepTraceAnnotation("train", step_num=global_step): state, train_metric, train_rngs = p_train_step( state, unet_params, text_encoder_params, vae_params, batch, train_rngs ) train_metrics.append(train_metric) train_step_progress_bar.update(1) global_step += 1 if global_step >= args.max_train_steps: break if ( args.validation_prompt is not None and global_step % args.validation_steps == 0 and jax.process_index() == 0 ): _ = log_validation( pipeline, pipeline_params, state.params, tokenizer, args, validation_rng, weight_dtype ) if global_step % args.logging_steps == 0 and jax.process_index() == 0: if args.report_to == "wandb": train_metrics = jax_utils.unreplicate(train_metrics) train_metrics = jax.tree_util.tree_map(lambda *m: jnp.array(m).mean(), *train_metrics) wandb.log( { "walltime": time.monotonic() - t00, "train/step": global_step, "train/epoch": global_step / dataset_length, "train/steps_per_sec": (global_step - step0) / (time.monotonic() - t0), **{f"train/{k}": v for k, v in train_metrics.items()}, } ) t0, step0 = time.monotonic(), global_step train_metrics = [] if global_step % args.checkpointing_steps == 0 and jax.process_index() == 0: controlnet.save_pretrained( f"{args.output_dir}/{global_step}", params=get_params_to_save(state.params), ) train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") # Final validation & store model. if jax.process_index() == 0: if args.validation_prompt is not None: if args.profile_validation: jax.profiler.start_trace(args.output_dir) image_logs = log_validation( pipeline, pipeline_params, state.params, tokenizer, args, validation_rng, weight_dtype ) if args.profile_validation: jax.profiler.stop_trace() else: image_logs = None controlnet.save_pretrained( args.output_dir, params=get_params_to_save(state.params), ) if args.push_to_hub: save_model_card( repo_id, image_logs=image_logs, base_model=args.pretrained_model_name_or_path, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if args.profile_memory: jax.profiler.save_device_memory_profile(os.path.join(args.output_dir, "memory_final.prof")) logger.info("Finished training.") if __name__ == "__main__": main()
diffusers/examples/controlnet/train_controlnet_flax.py/0
{ "file_path": "diffusers/examples/controlnet/train_controlnet_flax.py", "repo_id": "diffusers", "token_count": 20119 }
139
# DreamBooth training example for Stable Diffusion XL (SDXL) [DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject. The `train_dreambooth_lora_sdxl.py` script shows how to implement the training procedure and adapt it for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). > 💡 **Note**: For now, we only allow DreamBooth fine-tuning of the SDXL UNet via LoRA. LoRA is a parameter-efficient fine-tuning technique introduced in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/dreambooth` folder and run ```bash pip install -r requirements_sdxl.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Dog toy example Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example. Let's first download it locally: ```python from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform. Now, we can launch training using: ```bash export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export INSTANCE_DIR="dog" export OUTPUT_DIR="lora-trained-xl" export VAE_PATH="madebyollin/sdxl-vae-fp16-fix" accelerate launch train_dreambooth_lora_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --pretrained_vae_model_name_or_path=$VAE_PATH \ --output_dir=$OUTPUT_DIR \ --mixed_precision="fp16" \ --instance_prompt="a photo of sks dog" \ --resolution=1024 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=25 \ --seed="0" \ --push_to_hub ``` To better track our training experiments, we're using the following flags in the command above: * `report_to="wandb` will ensure the training runs are tracked on [Weights and Biases](https://wandb.ai/site). To use it, be sure to install `wandb` with `pip install wandb`. Don't forget to call `wandb login <your_api_key>` before training if you haven't done it before. * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 40GB A100 GPU. ### Dog toy example with < 16GB VRAM By making use of [`gradient_checkpointing`](https://pytorch.org/docs/stable/checkpoint.html) (which is natively supported in Diffusers), [`xformers`](https://github.com/facebookresearch/xformers), and [`bitsandbytes`](https://github.com/TimDettmers/bitsandbytes) libraries, you can train SDXL LoRAs with less than 16GB of VRAM by adding the following flags to your accelerate launch command: ```diff + --enable_xformers_memory_efficient_attention \ + --gradient_checkpointing \ + --use_8bit_adam \ + --mixed_precision="fp16" \ ``` and making sure that you have the following libraries installed: ``` bitsandbytes>=0.40.0 xformers>=0.0.20 ``` ### Inference Once training is done, we can perform inference like so: ```python from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline import torch lora_model_id = <"lora-sdxl-dreambooth-id"> card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.load_lora_weights(lora_model_id) image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] image.save("sks_dog.png") ``` We can further refine the outputs with the [Refiner](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0): ```python from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline, StableDiffusionXLImg2ImgPipeline import torch lora_model_id = <"lora-sdxl-dreambooth-id"> card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] # Load the base pipeline and load the LoRA parameters into it. pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.load_lora_weights(lora_model_id) # Load the refiner. refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" ) refiner.to("cuda") prompt = "A picture of a sks dog in a bucket" generator = torch.Generator("cuda").manual_seed(0) # Run inference. image = pipe(prompt=prompt, output_type="latent", generator=generator).images[0] image = refiner(prompt=prompt, image=image[None, :], generator=generator).images[0] image.save("refined_sks_dog.png") ``` Here's a side-by-side comparison of the with and without Refiner pipeline outputs: | Without Refiner | With Refiner | |---|---| | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/sks_dog.png) | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/refined_sks_dog.png) | ### Training with text encoder(s) Alongside the UNet, LoRA fine-tuning of the text encoders is also supported. To do so, just specify `--train_text_encoder` while launching training. Please keep the following points in mind: * SDXL has two text encoders. So, we fine-tune both using LoRA. * When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory. ### Specifying a better VAE SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). ## Notes In our experiments, we found that SDXL yields good initial results without extensive hyperparameter tuning. For example, without fine-tuning the text encoders and without using prior-preservation, we observed decent results. We didn't explore further hyper-parameter tuning experiments, but we do encourage the community to explore this avenue further and share their results with us 🤗 ## Results You can explore the results from a couple of our internal experiments by checking out this link: [https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl](https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl). Specifically, we used the same script with the exact same hyperparameters on the following datasets: * [Dogs](https://huggingface.co/datasets/diffusers/dog-example) * [Starbucks logo](https://huggingface.co/datasets/diffusers/starbucks-example) * [Mr. Potato Head](https://huggingface.co/datasets/diffusers/potato-head-example) * [Keramer face](https://huggingface.co/datasets/diffusers/keramer-face-example) ## Running on a free-tier Colab Notebook Check out [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_DreamBooth_LoRA_.ipynb). ## Conducting EDM-style training It's now possible to perform EDM-style training as proposed in [Elucidating the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364). For the SDXL model, simple set: ```diff + --do_edm_style_training \ ``` Other SDXL-like models that use the EDM formulation, such as [playgroundai/playground-v2.5-1024px-aesthetic](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic), can also be DreamBooth'd with the script. Below is an example command: ```bash accelerate launch train_dreambooth_lora_sdxl.py \ --pretrained_model_name_or_path="playgroundai/playground-v2.5-1024px-aesthetic" \ --instance_data_dir="dog" \ --output_dir="dog-playground-lora" \ --mixed_precision="fp16" \ --instance_prompt="a photo of sks dog" \ --resolution=1024 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --learning_rate=1e-4 \ --use_8bit_adam \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=25 \ --seed="0" \ --push_to_hub ``` > [!CAUTION] > Min-SNR gamma is not supported with the EDM-style training yet. When training with the PlaygroundAI model, it's recommended to not pass any "variant". ### DoRA training The script now supports DoRA training too! > Proposed in [DoRA: Weight-Decomposed Low-Rank Adaptation](https://huggingface.co/papers/2402.09353), **DoRA** is very similar to LoRA, except it decomposes the pre-trained weight into two components, **magnitude** and **direction** and employs LoRA for _directional_ updates to efficiently minimize the number of trainable parameters. The authors found that by using DoRA, both the learning capacity and training stability of LoRA are enhanced without any additional overhead during inference. > [!NOTE] > 💡DoRA training is still _experimental_ > and is likely to require different hyperparameter values to perform best compared to a LoRA. > Specifically, we've noticed 2 differences to take into account your training: > 1. **LoRA seem to converge faster than DoRA** (so a set of parameters that may lead to overfitting when training a LoRA may be working well for a DoRA) > 2. **DoRA quality superior to LoRA especially in lower ranks** the difference in quality of DoRA of rank 8 and LoRA of rank 8 appears to be more significant than when training ranks of 32 or 64 for example. > This is also aligned with some of the quantitative analysis shown in the paper. **Usage** 1. To use DoRA you need to upgrade the installation of `peft`: ```bash pip install -U peft ``` 2. Enable DoRA training by adding this flag ```bash --use_dora ``` **Inference** The inference is the same as if you train a regular LoRA 🤗 ## Format compatibility You can pass `--output_kohya_format` to additionally generate a state dictionary which should be compatible with other platforms and tools such as Automatic 1111, Comfy, Kohya, etc. The `output_dir` will contain a file named "pytorch_lora_weights_kohya.safetensors".
diffusers/examples/dreambooth/README_sdxl.md/0
{ "file_path": "diffusers/examples/dreambooth/README_sdxl.md", "repo_id": "diffusers", "token_count": 3776 }
140
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import itertools import json import logging import math import os import random import shutil import warnings from contextlib import nullcontext from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed from huggingface_hub import create_repo, hf_hub_download, upload_folder from huggingface_hub.utils import insecure_hashlib from packaging import version from peft import LoraConfig, set_peft_model_state_dict from peft.utils import get_peft_model_state_dict from PIL import Image from PIL.ImageOps import exif_transpose from safetensors.torch import load_file, save_file from torch.utils.data import Dataset from torchvision import transforms from torchvision.transforms.functional import crop from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DPMSolverMultistepScheduler, EDMEulerScheduler, EulerDiscreteScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, ) from diffusers.loaders import StableDiffusionLoraLoaderMixin from diffusers.optimization import get_scheduler from diffusers.training_utils import _set_state_dict_into_text_encoder, cast_training_params, compute_snr from diffusers.utils import ( check_min_version, convert_all_state_dict_to_peft, convert_state_dict_to_diffusers, convert_state_dict_to_kohya, convert_unet_state_dict_to_peft, is_peft_version, is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.36.0.dev0") logger = get_logger(__name__) def determine_scheduler_type(pretrained_model_name_or_path, revision): model_index_filename = "model_index.json" if os.path.isdir(pretrained_model_name_or_path): model_index = os.path.join(pretrained_model_name_or_path, model_index_filename) else: model_index = hf_hub_download( repo_id=pretrained_model_name_or_path, filename=model_index_filename, revision=revision ) with open(model_index, "r") as f: scheduler_type = json.load(f)["scheduler"][1] return scheduler_type def save_model_card( repo_id: str, use_dora: bool, images=None, base_model: str = None, train_text_encoder=False, instance_prompt=None, validation_prompt=None, repo_folder=None, vae_path=None, ): widget_dict = [] if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) widget_dict.append( {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}} ) model_description = f""" # {"SDXL" if "playground" not in base_model else "Playground"} LoRA DreamBooth - {repo_id} <Gallery /> ## Model description These are {repo_id} LoRA adaption weights for {base_model}. The weights were trained using [DreamBooth](https://dreambooth.github.io/). LoRA for the text encoder was enabled: {train_text_encoder}. Special VAE used for training: {vae_path}. ## Trigger words You should use {instance_prompt} to trigger the image generation. ## Download model Weights for this model are available in Safetensors format. [Download]({repo_id}/tree/main) them in the Files & versions tab. """ if "playground" in base_model: model_description += """\n ## License Please adhere to the licensing terms as described [here](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic/blob/main/LICENSE.md). """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="openrail++" if "playground" not in base_model else "playground-v2dot5-community", base_model=base_model, prompt=instance_prompt, model_description=model_description, widget=widget_dict, ) tags = [ "text-to-image", "text-to-image", "diffusers-training", "diffusers", "lora" if not use_dora else "dora", "template:sd-lora", ] if "playground" in base_model: tags.extend(["playground", "playground-diffusers"]) else: tags.extend(["stable-diffusion-xl", "stable-diffusion-xl-diffusers"]) model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation( pipeline, args, accelerator, pipeline_args, epoch, torch_dtype, is_final_validation=False, ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it scheduler_args = {} if not args.do_edm_style_training: if "variance_type" in pipeline.scheduler.config: variance_type = pipeline.scheduler.config.variance_type if variance_type in ["learned", "learned_range"]: variance_type = "fixed_small" scheduler_args["variance_type"] = variance_type pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None # Currently the context determination is a bit hand-wavy. We can improve it in the future if there's a better # way to condition it. Reference: https://github.com/huggingface/diffusers/pull/7126#issuecomment-1968523051 if torch.backends.mps.is_available() or "playground" in args.pretrained_model_name_or_path: autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type) if not is_final_validation else nullcontext() with autocast_ctx: images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)] for tracker in accelerator.trackers: phase_name = "test" if is_final_validation else "validation" if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { phase_name: [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() return images def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--instance_data_dir", type=str, default=None, help=("A folder containing the training data. "), ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image. By " "default, the standard Image Dataset maps out 'file_name' " "to 'image'.", ) parser.add_argument( "--caption_column", type=str, default=None, help="The column of the dataset containing the instance prompt for each image", ) parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=50, help=( "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--do_edm_style_training", default=False, action="store_true", help="Flag to conduct training using the EDM formulation as introduced in https://huggingface.co/papers/2206.00364.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="lora-dreambooth-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--output_kohya_format", action="store_true", help="Flag to additionally generate final state dict in the Kohya format so that it becomes compatible with A111, Comfy, Kohya, etc.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=1024, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_text_encoder", action="store_true", help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--text_encoder_lr", type=float, default=5e-6, help="Text encoder learning rate to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://huggingface.co/papers/2303.09556.", ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--optimizer", type=str, default="AdamW", help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--prodigy_beta3", type=float, default=None, help="coefficients for computing the Prodigy stepsize using running averages. If set to None, " "uses the value of square root of beta2. Ignored if optimizer is adamW", ) parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay") parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params") parser.add_argument( "--adam_weight_decay_text_encoder", type=float, default=1e-03, help="Weight decay to use for text_encoder" ) parser.add_argument( "--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer and Prodigy optimizers.", ) parser.add_argument( "--prodigy_use_bias_correction", type=bool, default=True, help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW", ) parser.add_argument( "--prodigy_safeguard_warmup", type=bool, default=True, help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. " "Ignored if optimizer is adamW", ) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--rank", type=int, default=4, help=("The dimension of the LoRA update matrices."), ) parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") parser.add_argument( "--use_dora", action="store_true", default=False, help=( "Whether to train a DoRA as proposed in- DoRA: Weight-Decomposed Low-Rank Adaptation https://huggingface.co/papers/2402.09353. " "Note: to use DoRA you need to install peft from main, `pip install git+https://github.com/huggingface/peft.git`" ), ) parser.add_argument( "--image_interpolation_mode", type=str, default="lanczos", choices=[ f.lower() for f in dir(transforms.InterpolationMode) if not f.startswith("__") and not f.endswith("__") ], help="The image interpolation method to use for resizing images.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.dataset_name is None and args.instance_data_dir is None: raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`") if args.dataset_name is not None and args.instance_data_dir is not None: raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`") env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images. """ def __init__( self, instance_data_root, instance_prompt, class_prompt, class_data_root=None, class_num=None, size=1024, repeats=1, center_crop=False, ): self.size = size self.center_crop = center_crop self.instance_prompt = instance_prompt self.custom_instance_prompts = None self.class_prompt = class_prompt # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory, # we load the training data using load_dataset if args.dataset_name is not None: try: from datasets import load_dataset except ImportError: raise ImportError( "You are trying to load your data using the datasets library. If you wish to train using custom " "captions please install the datasets library: `pip install datasets`. If you wish to load a " "local folder containing images only, specify --instance_data_dir instead." ) # Downloading and loading a dataset from the hub. # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) # Preprocessing the datasets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) instance_images = dataset["train"][image_column] if args.caption_column is None: logger.info( "No caption column provided, defaulting to instance_prompt for all images. If your dataset " "contains captions/prompts for the images, make sure to specify the " "column as --caption_column" ) self.custom_instance_prompts = None else: if args.caption_column not in column_names: raise ValueError( f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) custom_instance_prompts = dataset["train"][args.caption_column] # create final list of captions according to --repeats self.custom_instance_prompts = [] for caption in custom_instance_prompts: self.custom_instance_prompts.extend(itertools.repeat(caption, repeats)) else: self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())] self.custom_instance_prompts = None self.instance_images = [] for img in instance_images: self.instance_images.extend(itertools.repeat(img, repeats)) # image processing to prepare for using SD-XL micro-conditioning self.original_sizes = [] self.crop_top_lefts = [] self.pixel_values = [] interpolation = getattr(transforms.InterpolationMode, args.image_interpolation_mode.upper(), None) if interpolation is None: raise ValueError(f"Unsupported interpolation mode {interpolation=}.") train_resize = transforms.Resize(size, interpolation=interpolation) train_crop = transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size) train_flip = transforms.RandomHorizontalFlip(p=1.0) train_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) for image in self.instance_images: image = exif_transpose(image) if not image.mode == "RGB": image = image.convert("RGB") self.original_sizes.append((image.height, image.width)) image = train_resize(image) if args.random_flip and random.random() < 0.5: # flip image = train_flip(image) if args.center_crop: y1 = max(0, int(round((image.height - args.resolution) / 2.0))) x1 = max(0, int(round((image.width - args.resolution) / 2.0))) image = train_crop(image) else: y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) image = crop(image, y1, x1, h, w) crop_top_left = (y1, x1) self.crop_top_lefts.append(crop_top_left) image = train_transforms(image) self.pixel_values.append(image) self.num_instance_images = len(self.instance_images) self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) if class_num is not None: self.num_class_images = min(len(self.class_images_path), class_num) else: self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=interpolation), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = self.pixel_values[index % self.num_instance_images] original_size = self.original_sizes[index % self.num_instance_images] crop_top_left = self.crop_top_lefts[index % self.num_instance_images] example["instance_images"] = instance_image example["original_size"] = original_size example["crop_top_left"] = crop_top_left if self.custom_instance_prompts: caption = self.custom_instance_prompts[index % self.num_instance_images] if caption: example["instance_prompt"] = caption else: example["instance_prompt"] = self.instance_prompt else: # custom prompts were provided, but length does not match size of image dataset example["instance_prompt"] = self.instance_prompt if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) class_image = exif_transpose(class_image) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt"] = self.class_prompt return example def collate_fn(examples, with_prior_preservation=False): pixel_values = [example["instance_images"] for example in examples] prompts = [example["instance_prompt"] for example in examples] original_sizes = [example["original_size"] for example in examples] crop_top_lefts = [example["crop_top_left"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: pixel_values += [example["class_images"] for example in examples] prompts += [example["class_prompt"] for example in examples] original_sizes += [example["original_size"] for example in examples] crop_top_lefts += [example["crop_top_left"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() batch = { "pixel_values": pixel_values, "prompts": prompts, "original_sizes": original_sizes, "crop_top_lefts": crop_top_lefts, } return batch class PromptDataset(Dataset): """A simple dataset to prepare the prompts to generate class images on multiple GPUs.""" def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def tokenize_prompt(tokenizer, prompt): text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids return text_input_ids # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None): prompt_embeds_list = [] for i, text_encoder in enumerate(text_encoders): if tokenizers is not None: tokenizer = tokenizers[i] text_input_ids = tokenize_prompt(tokenizer, prompt) else: assert text_input_ids_list is not None text_input_ids = text_input_ids_list[i] prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, return_dict=False ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds[-1][-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return prompt_embeds, pooled_prompt_embeds def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) if args.do_edm_style_training and args.snr_gamma is not None: raise ValueError("Min-SNR formulation is not supported when conducting EDM-style training.") if torch.backends.mps.is_available() and args.mixed_precision == "bf16": # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: has_supported_fp16_accelerator = torch.cuda.is_available() or torch.backends.mps.is_available() torch_dtype = torch.float16 if has_supported_fp16_accelerator else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, revision=args.revision, variant=args.variant, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizers tokenizer_one = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False, ) # import correct text encoder classes text_encoder_cls_one = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision ) text_encoder_cls_two = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) # Load scheduler and models scheduler_type = determine_scheduler_type(args.pretrained_model_name_or_path, args.revision) if "EDM" in scheduler_type: args.do_edm_style_training = True noise_scheduler = EDMEulerScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") logger.info("Performing EDM-style training!") elif args.do_edm_style_training: noise_scheduler = EulerDiscreteScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler" ) logger.info("Performing EDM-style training!") else: noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder_one = text_encoder_cls_one.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder_two = text_encoder_cls_two.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant ) vae_path = ( args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) latents_mean = latents_std = None if hasattr(vae.config, "latents_mean") and vae.config.latents_mean is not None: latents_mean = torch.tensor(vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(vae.config, "latents_std") and vae.config.latents_std is not None: latents_std = torch.tensor(vae.config.latents_std).view(1, 4, 1, 1) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # We only train the additional adapter LoRA layers vae.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) unet.requires_grad_(False) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) # Move unet, vae and text_encoder to device and cast to weight_dtype unet.to(accelerator.device, dtype=weight_dtype) # The VAE is always in float32 to avoid NaN losses. vae.to(accelerator.device, dtype=torch.float32) text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, " "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.train_text_encoder: text_encoder_one.gradient_checkpointing_enable() text_encoder_two.gradient_checkpointing_enable() def get_lora_config(rank, dropout, use_dora, target_modules): base_config = { "r": rank, "lora_alpha": rank, "lora_dropout": dropout, "init_lora_weights": "gaussian", "target_modules": target_modules, } if use_dora: if is_peft_version("<", "0.9.0"): raise ValueError( "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." ) else: base_config["use_dora"] = True return LoraConfig(**base_config) # now we will add new LoRA weights to the attention layers unet_target_modules = ["to_k", "to_q", "to_v", "to_out.0"] unet_lora_config = get_lora_config( rank=args.rank, dropout=args.lora_dropout, use_dora=args.use_dora, target_modules=unet_target_modules, ) unet.add_adapter(unet_lora_config) # The text encoder comes from 🤗 transformers, so we cannot directly modify it. # So, instead, we monkey-patch the forward calls of its attention-blocks. if args.train_text_encoder: text_target_modules = ["q_proj", "k_proj", "v_proj", "out_proj"] text_lora_config = get_lora_config( rank=args.rank, dropout=args.lora_dropout, use_dora=args.use_dora, target_modules=text_target_modules, ) text_encoder_one.add_adapter(text_lora_config) text_encoder_two.add_adapter(text_lora_config) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: # there are only two options here. Either are just the unet attn processor layers # or there are the unet and text encoder atten layers unet_lora_layers_to_save = None text_encoder_one_lora_layers_to_save = None text_encoder_two_lora_layers_to_save = None for model in models: if isinstance(model, type(unwrap_model(unet))): unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model)) elif isinstance(model, type(unwrap_model(text_encoder_one))): text_encoder_one_lora_layers_to_save = convert_state_dict_to_diffusers( get_peft_model_state_dict(model) ) elif isinstance(model, type(unwrap_model(text_encoder_two))): text_encoder_two_lora_layers_to_save = convert_state_dict_to_diffusers( get_peft_model_state_dict(model) ) else: raise ValueError(f"unexpected save model: {model.__class__}") # make sure to pop weight so that corresponding model is not saved again weights.pop() StableDiffusionXLPipeline.save_lora_weights( output_dir, unet_lora_layers=unet_lora_layers_to_save, text_encoder_lora_layers=text_encoder_one_lora_layers_to_save, text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save, ) def load_model_hook(models, input_dir): unet_ = None text_encoder_one_ = None text_encoder_two_ = None while len(models) > 0: model = models.pop() if isinstance(model, type(unwrap_model(unet))): unet_ = model elif isinstance(model, type(unwrap_model(text_encoder_one))): text_encoder_one_ = model elif isinstance(model, type(unwrap_model(text_encoder_two))): text_encoder_two_ = model else: raise ValueError(f"unexpected save model: {model.__class__}") lora_state_dict, network_alphas = StableDiffusionLoraLoaderMixin.lora_state_dict(input_dir) unet_state_dict = {f"{k.replace('unet.', '')}": v for k, v in lora_state_dict.items() if k.startswith("unet.")} unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict) incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name="default") if incompatible_keys is not None: # check only for unexpected keys unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: logger.warning( f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " f" {unexpected_keys}. " ) if args.train_text_encoder: # Do we need to call `scale_lora_layers()` here? _set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_) _set_state_dict_into_text_encoder( lora_state_dict, prefix="text_encoder_2.", text_encoder=text_encoder_two_ ) # Make sure the trainable params are in float32. This is again needed since the base models # are in `weight_dtype`. More details: # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804 if args.mixed_precision == "fp16": models = [unet_] if args.train_text_encoder: models.extend([text_encoder_one_, text_encoder_two_]) # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models) accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32 and torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Make sure the trainable params are in float32. if args.mixed_precision == "fp16": models = [unet] if args.train_text_encoder: models.extend([text_encoder_one, text_encoder_two]) # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models, dtype=torch.float32) unet_lora_parameters = list(filter(lambda p: p.requires_grad, unet.parameters())) if args.train_text_encoder: text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters())) text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters())) # Optimization parameters unet_lora_parameters_with_lr = {"params": unet_lora_parameters, "lr": args.learning_rate} if args.train_text_encoder: # different learning rate for text encoder and unet text_lora_parameters_one_with_lr = { "params": text_lora_parameters_one, "weight_decay": args.adam_weight_decay_text_encoder, "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate, } text_lora_parameters_two_with_lr = { "params": text_lora_parameters_two, "weight_decay": args.adam_weight_decay_text_encoder, "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate, } params_to_optimize = [ unet_lora_parameters_with_lr, text_lora_parameters_one_with_lr, text_lora_parameters_two_with_lr, ] else: params_to_optimize = [unet_lora_parameters_with_lr] # Optimizer creation if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): logger.warning( f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." "Defaulting to adamW" ) args.optimizer = "adamw" if args.use_8bit_adam and not args.optimizer.lower() == "adamw": logger.warning( f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"set to {args.optimizer.lower()}" ) if args.optimizer.lower() == "adamw": if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) if args.optimizer.lower() == "prodigy": try: import prodigyopt except ImportError: raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`") optimizer_class = prodigyopt.Prodigy if args.learning_rate <= 0.1: logger.warning( "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" ) if args.train_text_encoder and args.text_encoder_lr: logger.warning( f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:" f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. " f"When using prodigy only learning_rate is used as the initial learning rate." ) # changes the learning rate of text_encoder_parameters_one and text_encoder_parameters_two to be # --learning_rate params_to_optimize[1]["lr"] = args.learning_rate params_to_optimize[2]["lr"] = args.learning_rate optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), beta3=args.prodigy_beta3, weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, decouple=args.prodigy_decouple, use_bias_correction=args.prodigy_use_bias_correction, safeguard_warmup=args.prodigy_safeguard_warmup, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_prompt=args.class_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_num=args.num_class_images, size=args.resolution, repeats=args.repeats, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers, ) # Computes additional embeddings/ids required by the SDXL UNet. # regular text embeddings (when `train_text_encoder` is not True) # pooled text embeddings # time ids def compute_time_ids(original_size, crops_coords_top_left): # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids target_size = (args.resolution, args.resolution) add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = torch.tensor([add_time_ids]) add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) return add_time_ids if not args.train_text_encoder: tokenizers = [tokenizer_one, tokenizer_two] text_encoders = [text_encoder_one, text_encoder_two] def compute_text_embeddings(prompt, text_encoders, tokenizers): with torch.no_grad(): prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt) prompt_embeds = prompt_embeds.to(accelerator.device) pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device) return prompt_embeds, pooled_prompt_embeds # If no type of tuning is done on the text_encoder and custom instance prompts are NOT # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid # the redundant encoding. if not args.train_text_encoder and not train_dataset.custom_instance_prompts: instance_prompt_hidden_states, instance_pooled_prompt_embeds = compute_text_embeddings( args.instance_prompt, text_encoders, tokenizers ) # Handle class prompt for prior-preservation. if args.with_prior_preservation: if not args.train_text_encoder: class_prompt_hidden_states, class_pooled_prompt_embeds = compute_text_embeddings( args.class_prompt, text_encoders, tokenizers ) # Clear the memory here if not args.train_text_encoder and not train_dataset.custom_instance_prompts: del tokenizers, text_encoders gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() # If custom instance prompts are NOT provided (i.e. the instance prompt is used for all images), # pack the statically computed variables appropriately here. This is so that we don't # have to pass them to the dataloader. if not train_dataset.custom_instance_prompts: if not args.train_text_encoder: prompt_embeds = instance_prompt_hidden_states unet_add_text_embeds = instance_pooled_prompt_embeds if args.with_prior_preservation: prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0) unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0) # if we're optimizing the text encoder (both if instance prompt is used for all images or custom prompts) we need to tokenize and encode the # batch prompts on all training steps else: tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt) tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt) if args.with_prior_preservation: class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt) class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt) tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0) tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0) # Scheduler and math around the number of training steps. # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation. num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes if args.max_train_steps is None: len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) num_training_steps_for_scheduler = ( args.num_train_epochs * accelerator.num_processes * num_update_steps_per_epoch ) else: num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=num_warmup_steps_for_scheduler, num_training_steps=num_training_steps_for_scheduler, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch if num_training_steps_for_scheduler != args.max_train_steps: logger.warning( f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " f"This inconsistency may result in the learning rate scheduler not functioning properly." ) args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_name = ( "dreambooth-lora-sd-xl" if "playground" not in args.pretrained_model_name_or_path else "dreambooth-lora-playground" ) accelerator.init_trackers(tracker_name, config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): sigmas = noise_scheduler.sigmas.to(device=accelerator.device, dtype=dtype) schedule_timesteps = noise_scheduler.timesteps.to(accelerator.device) timesteps = timesteps.to(accelerator.device) step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < n_dim: sigma = sigma.unsqueeze(-1) return sigma for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder_one.train() text_encoder_two.train() # set top parameter requires_grad = True for gradient checkpointing works accelerator.unwrap_model(text_encoder_one).text_model.embeddings.requires_grad_(True) accelerator.unwrap_model(text_encoder_two).text_model.embeddings.requires_grad_(True) for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): pixel_values = batch["pixel_values"].to(dtype=vae.dtype) prompts = batch["prompts"] # encode batch prompts when custom prompts are provided for each image - if train_dataset.custom_instance_prompts: if not args.train_text_encoder: prompt_embeds, unet_add_text_embeds = compute_text_embeddings( prompts, text_encoders, tokenizers ) else: tokens_one = tokenize_prompt(tokenizer_one, prompts) tokens_two = tokenize_prompt(tokenizer_two, prompts) # Convert images to latent space model_input = vae.encode(pixel_values).latent_dist.sample() if latents_mean is None and latents_std is None: model_input = model_input * vae.config.scaling_factor if args.pretrained_vae_model_name_or_path is None: model_input = model_input.to(weight_dtype) else: latents_mean = latents_mean.to(device=model_input.device, dtype=model_input.dtype) latents_std = latents_std.to(device=model_input.device, dtype=model_input.dtype) model_input = (model_input - latents_mean) * vae.config.scaling_factor / latents_std model_input = model_input.to(dtype=weight_dtype) # Sample noise that we'll add to the latents noise = torch.randn_like(model_input) bsz = model_input.shape[0] # Sample a random timestep for each image if not args.do_edm_style_training: timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device ) timesteps = timesteps.long() else: # in EDM formulation, the model is conditioned on the pre-conditioned noise levels # instead of discrete timesteps, so here we sample indices to get the noise levels # from `scheduler.timesteps` indices = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,)) timesteps = noise_scheduler.timesteps[indices].to(device=model_input.device) # Add noise to the model input according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) # For EDM-style training, we first obtain the sigmas based on the continuous timesteps. # We then precondition the final model inputs based on these sigmas instead of the timesteps. # Follow: Section 5 of https://huggingface.co/papers/2206.00364. if args.do_edm_style_training: sigmas = get_sigmas(timesteps, len(noisy_model_input.shape), noisy_model_input.dtype) if "EDM" in scheduler_type: inp_noisy_latents = noise_scheduler.precondition_inputs(noisy_model_input, sigmas) else: inp_noisy_latents = noisy_model_input / ((sigmas**2 + 1) ** 0.5) # time ids add_time_ids = torch.cat( [ compute_time_ids(original_size=s, crops_coords_top_left=c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"]) ] ) # Calculate the elements to repeat depending on the use of prior-preservation and custom captions. if not train_dataset.custom_instance_prompts: elems_to_repeat_text_embeds = bsz // 2 if args.with_prior_preservation else bsz else: elems_to_repeat_text_embeds = 1 # Predict the noise residual if not args.train_text_encoder: unet_added_conditions = { "time_ids": add_time_ids, "text_embeds": unet_add_text_embeds.repeat(elems_to_repeat_text_embeds, 1), } prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1) model_pred = unet( inp_noisy_latents if args.do_edm_style_training else noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions, return_dict=False, )[0] else: unet_added_conditions = {"time_ids": add_time_ids} prompt_embeds, pooled_prompt_embeds = encode_prompt( text_encoders=[text_encoder_one, text_encoder_two], tokenizers=None, prompt=None, text_input_ids_list=[tokens_one, tokens_two], ) unet_added_conditions.update( {"text_embeds": pooled_prompt_embeds.repeat(elems_to_repeat_text_embeds, 1)} ) prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1) model_pred = unet( inp_noisy_latents if args.do_edm_style_training else noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions, return_dict=False, )[0] weighting = None if args.do_edm_style_training: # Similar to the input preconditioning, the model predictions are also preconditioned # on noised model inputs (before preconditioning) and the sigmas. # Follow: Section 5 of https://huggingface.co/papers/2206.00364. if "EDM" in scheduler_type: model_pred = noise_scheduler.precondition_outputs(noisy_model_input, model_pred, sigmas) else: if noise_scheduler.config.prediction_type == "epsilon": model_pred = model_pred * (-sigmas) + noisy_model_input elif noise_scheduler.config.prediction_type == "v_prediction": model_pred = model_pred * (-sigmas / (sigmas**2 + 1) ** 0.5) + ( noisy_model_input / (sigmas**2 + 1) ) # We are not doing weighting here because it tends result in numerical problems. # See: https://github.com/huggingface/diffusers/pull/7126#issuecomment-1968523051 # There might be other alternatives for weighting as well: # https://github.com/huggingface/diffusers/pull/7126#discussion_r1505404686 if "EDM" not in scheduler_type: weighting = (sigmas**-2.0).float() # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = model_input if args.do_edm_style_training else noise elif noise_scheduler.config.prediction_type == "v_prediction": target = ( model_input if args.do_edm_style_training else noise_scheduler.get_velocity(model_input, noise, timesteps) ) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute prior loss if weighting is not None: prior_loss = torch.mean( (weighting.float() * (model_pred_prior.float() - target_prior.float()) ** 2).reshape( target_prior.shape[0], -1 ), 1, ) prior_loss = prior_loss.mean() else: prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") if args.snr_gamma is None: if weighting is not None: loss = torch.mean( (weighting.float() * (model_pred.float() - target.float()) ** 2).reshape( target.shape[0], -1 ), 1, ) loss = loss.mean() else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://huggingface.co/papers/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) base_weight = ( torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr ) if noise_scheduler.config.prediction_type == "v_prediction": # Velocity objective needs to be floored to an SNR weight of one. mse_loss_weights = base_weight + 1 else: # Epsilon and sample both use the same loss weights. mse_loss_weights = base_weight loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() if args.with_prior_preservation: # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two) if args.train_text_encoder else unet_lora_parameters ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: # create pipeline if not args.train_text_encoder: text_encoder_one = text_encoder_cls_one.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant, ) text_encoder_two = text_encoder_cls_two.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant, ) pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=vae, text_encoder=accelerator.unwrap_model(text_encoder_one), text_encoder_2=accelerator.unwrap_model(text_encoder_two), unet=accelerator.unwrap_model(unet), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline_args = {"prompt": args.validation_prompt} images = log_validation( pipeline, args, accelerator, pipeline_args, epoch, torch_dtype=weight_dtype, ) # Save the lora layers accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) unet = unet.to(torch.float32) unet_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet)) if args.train_text_encoder: text_encoder_one = unwrap_model(text_encoder_one) text_encoder_lora_layers = convert_state_dict_to_diffusers( get_peft_model_state_dict(text_encoder_one.to(torch.float32)) ) text_encoder_two = unwrap_model(text_encoder_two) text_encoder_2_lora_layers = convert_state_dict_to_diffusers( get_peft_model_state_dict(text_encoder_two.to(torch.float32)) ) else: text_encoder_lora_layers = None text_encoder_2_lora_layers = None StableDiffusionXLPipeline.save_lora_weights( save_directory=args.output_dir, unet_lora_layers=unet_lora_layers, text_encoder_lora_layers=text_encoder_lora_layers, text_encoder_2_lora_layers=text_encoder_2_lora_layers, ) if args.output_kohya_format: lora_state_dict = load_file(f"{args.output_dir}/pytorch_lora_weights.safetensors") peft_state_dict = convert_all_state_dict_to_peft(lora_state_dict) kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict) save_file(kohya_state_dict, f"{args.output_dir}/pytorch_lora_weights_kohya.safetensors") # Final inference # Load previous pipeline vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=vae, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) # load attention processors pipeline.load_lora_weights(args.output_dir) # run inference images = [] if args.validation_prompt and args.num_validation_images > 0: pipeline_args = {"prompt": args.validation_prompt, "num_inference_steps": 25} images = log_validation( pipeline, args, accelerator, pipeline_args, epoch, is_final_validation=True, torch_dtype=weight_dtype, ) if args.push_to_hub: save_model_card( repo_id, use_dora=args.use_dora, images=images, base_model=args.pretrained_model_name_or_path, train_text_encoder=args.train_text_encoder, instance_prompt=args.instance_prompt, validation_prompt=args.validation_prompt, repo_folder=args.output_dir, vae_path=args.pretrained_vae_model_name_or_path, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/dreambooth/train_dreambooth_lora_sdxl.py/0
{ "file_path": "diffusers/examples/dreambooth/train_dreambooth_lora_sdxl.py", "repo_id": "diffusers", "token_count": 39424 }
141
from torch import nn class CTCHead(nn.Module): def __init__( self, in_channels, out_channels=6625, fc_decay=0.0004, mid_channels=None, return_feats=False, **kwargs ): super(CTCHead, self).__init__() if mid_channels is None: self.fc = nn.Linear( in_channels, out_channels, bias=True, ) else: self.fc1 = nn.Linear( in_channels, mid_channels, bias=True, ) self.fc2 = nn.Linear( mid_channels, out_channels, bias=True, ) self.out_channels = out_channels self.mid_channels = mid_channels self.return_feats = return_feats def forward(self, x, labels=None): if self.mid_channels is None: predicts = self.fc(x) else: x = self.fc1(x) predicts = self.fc2(x) if self.return_feats: result = {} result["ctc"] = predicts result["ctc_neck"] = x else: result = predicts return result
diffusers/examples/research_projects/anytext/ocr_recog/RecCTCHead.py/0
{ "file_path": "diffusers/examples/research_projects/anytext/ocr_recog/RecCTCHead.py", "repo_id": "diffusers", "token_count": 689 }
142
compute_environment: LOCAL_MACHINE debug: false deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED downcast_bf16: 'no' enable_cpu_affinity: false machine_rank: 0 main_training_function: main mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false
diffusers/examples/research_projects/flux_lora_quantization/ds2.yaml/0
{ "file_path": "diffusers/examples/research_projects/flux_lora_quantization/ds2.yaml", "repo_id": "diffusers", "token_count": 196 }
143
# Würstchen text-to-image fine-tuning ## Running locally with PyTorch Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd into the example folder and run ```bash cd examples/wuerstchen/text_to_image pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag to the training script. To log in, run: ```bash hf auth login ``` ## Prior training You can fine-tune the Würstchen prior model with the `train_text_to_image_prior.py` script. Note that we currently support `--gradient_checkpointing` for prior model fine-tuning so you can use it for more GPU memory constrained setups. <br> <!-- accelerate_snippet_start --> ```bash export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch train_text_to_image_prior.py \ --mixed_precision="fp16" \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=4 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --dataloader_num_workers=4 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --validation_prompts="A robot naruto, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="wuerstchen-prior-naruto-model" ``` <!-- accelerate_snippet_end --> ## Training with LoRA Low-Rank Adaption of Large Language Models (or LoRA) was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. ### Prior Training First, you need to set up your development environment as explained in the [installation](#Running-locally-with-PyTorch) section. Make sure to set the `DATASET_NAME` environment variable. Here, we will use the [Naruto captions dataset](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions). ```bash export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch train_text_to_image_lora_prior.py \ --mixed_precision="fp16" \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=768 \ --train_batch_size=8 \ --num_train_epochs=100 --checkpointing_steps=5000 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --rank=4 \ --validation_prompt="cute dragon creature" \ --report_to="wandb" \ --push_to_hub \ --output_dir="wuerstchen-prior-naruto-lora" ```
diffusers/examples/research_projects/wuerstchen/text_to_image/README.md/0
{ "file_path": "diffusers/examples/research_projects/wuerstchen/text_to_image/README.md", "repo_id": "diffusers", "token_count": 1206 }
144
# Stable Diffusion text-to-image fine-tuning The `train_text_to_image.py` script shows how to fine-tune stable diffusion model on your own dataset. ___Note___: ___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___ ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Naruto example You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-4`, so you'll need to visit [its card](https://huggingface.co/CompVis/stable-diffusion-v1-4), read the license and tick the checkbox if you agree. You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). Run the following command to authenticate your token ```bash hf auth login ``` If you have already cloned the repo, then you won't need to go through these steps. <br> #### Hardware With `gradient_checkpointing` and `mixed_precision` it should be possible to fine tune the model on a single 24GB GPU. For higher `batch_size` and faster training it's better to use GPUs with >30GB memory. **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** <!-- accelerate_snippet_start --> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" ``` <!-- accelerate_snippet_end --> To run on your own training files prepare the dataset according to the format required by `datasets`, you can find the instructions for how to do that in this [document](https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder-with-metadata). If you wish to use custom loading logic, you should modify the script, we have left pointers for that in the training script. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export TRAIN_DIR="path_to_your_dataset" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" ``` Once the training is finished the model will be saved in the `output_dir` specified in the command. In this example it's `sd-naruto-model`. To load the fine-tuned model for inference just pass that path to `StableDiffusionPipeline` ```python import torch from diffusers import StableDiffusionPipeline model_path = "path_to_saved_model" pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) pipe.to("cuda") image = pipe(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` Checkpoints only save the unet, so to run inference from a checkpoint, just load the unet ```python import torch from diffusers import StableDiffusionPipeline, UNet2DConditionModel model_path = "path_to_saved_model" unet = UNet2DConditionModel.from_pretrained(model_path + "/checkpoint-<N>/unet", torch_dtype=torch.float16) pipe = StableDiffusionPipeline.from_pretrained("<initial model>", unet=unet, torch_dtype=torch.float16) pipe.to("cuda") image = pipe(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` #### Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATASET_NAME="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" --multi_gpu train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" ``` #### Training with Min-SNR weighting We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://huggingface.co/papers/2303.09556) which helps to achieve faster convergence by rebalancing the loss. In order to use it, one needs to set the `--snr_gamma` argument. The recommended value when using it is 5.0. You can find [this project on Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) that compares the loss surfaces of the following setups: * Training without the Min-SNR weighting strategy * Training with the Min-SNR weighting strategy (`snr_gamma` set to 5.0) * Training with the Min-SNR weighting strategy (`snr_gamma` set to 1.0) For our small Narutos dataset, the effects of Min-SNR weighting strategy might not appear to be pronounced, but for larger datasets, we believe the effects will be more pronounced. Also, note that in this example, we either predict `epsilon` (i.e., the noise) or the `v_prediction`. For both of these cases, the formulation of the Min-SNR weighting strategy that we have used holds. #### Training with EMA weights Through the `EMAModel` class, we support a convenient method of tracking an exponential moving average of model parameters. This helps to smooth out noise in model parameter updates and generally improves model performance. If enabled with the `--use_ema` argument, the final model checkpoint that is saved at the end of training will use the EMA weights. EMA weights require an additional full-precision copy of the model parameters to be stored in memory, but otherwise have very little performance overhead. `--foreach_ema` can be used to further reduce the overhead. If you are short on VRAM and still want to use EMA weights, you can store them in CPU RAM by using the `--offload_ema` argument. This will keep the EMA weights in pinned CPU memory during the training step. Then, once every model parameter update, it will transfer the EMA weights back to the GPU which can then update the parameters on the GPU, before sending them back to the CPU. Both of these transfers are set up as non-blocking, so CUDA devices should be able to overlap this transfer with other computations. With sufficient bandwidth between the host and device and a sufficiently long gap between model parameter updates, storing EMA weights in CPU RAM should have no additional performance overhead, as long as no other calls force synchronization. #### Training with DREAM We support training epsilon (noise) prediction models using the [DREAM (Diffusion Rectification and Estimation-Adaptive Models) strategy](https://huggingface.co/papers/2312.00210). DREAM claims to increase model fidelity for the performance cost of an extra grad-less unet `forward` step in the training loop. You can turn on DREAM training by using the `--dream_training` argument. The `--dream_detail_preservation` argument controls the detail preservation variable p and is the default of 1 from the paper. ## Training with LoRA Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset on consumer GPUs like Tesla T4, Tesla V100. ### Training First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Stable Diffusion v1-4](https://hf.co/CompVis/stable-diffusion-v1-4) and the [Narutos dataset](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions). **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** **___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATASET_NAME="lambdalabs/naruto-blip-captions" ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag. ```bash hf auth login ``` Now we can start training! ```bash accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=512 --random_flip \ --train_batch_size=1 \ --num_train_epochs=100 --checkpointing_steps=5000 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --output_dir="sd-naruto-model-lora" \ --validation_prompt="cute dragon creature" --report_to="wandb" ``` The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases. **___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run `train_text_to_image_lora.py` in consumer GPUs like T4 or V100.___** The final LoRA embedding weights have been uploaded to [sayakpaul/sd-model-finetuned-lora-t4](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4). **___Note: [The final weights](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/pytorch_lora_weights.bin) are only 3 MB in size, which is orders of magnitudes smaller than the original model.___** You can check some inference samples that were logged during the course of the fine-tuning process [here](https://wandb.ai/sayakpaul/text2image-fine-tune/runs/q4lc0xsw). ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline` after loading the trained LoRA weights. You need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-naruto-model-lora`. ```python from diffusers import StableDiffusionPipeline import torch model_path = "sayakpaul/sd-model-finetuned-lora-t4" pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.unet.load_attn_procs(model_path) pipe.to("cuda") prompt = "A naruto with green eyes and red legs." image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] image.save("naruto.png") ``` If you are loading the LoRA parameters from the Hub and if the Hub repository has a `base_model` tag (such as [this](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/README.md?code=true#L4)), then you can do: ```py from huggingface_hub.repocard import RepoCard lora_model_id = "sayakpaul/sd-model-finetuned-lora-t4" card = RepoCard.load(lora_model_id) base_model_id = card.data.to_dict()["base_model"] pipe = StableDiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) ... ``` ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. **___Note: The flax example doesn't yet support features like gradient checkpoint, gradient accumulation etc, so to use flax for faster training we will need >30GB cards or TPU v3.___** Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -U -r requirements_flax.txt ``` ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATASET_NAME="lambdalabs/naruto-blip-captions" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --mixed_precision="fp16" \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-naruto-model" ``` To run on your own training files prepare the dataset according to the format required by `datasets`, you can find the instructions for how to do that in this [document](https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder-with-metadata). If you wish to use custom loading logic, you should modify the script, we have left pointers for that in the training script. ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export TRAIN_DIR="path_to_your_dataset" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --mixed_precision="fp16" \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-naruto-model" ``` ### Training with xFormers: You can enable memory efficient attention by [installing xFormers](https://huggingface.co/docs/diffusers/main/en/optimization/xformers) and passing the `--enable_xformers_memory_efficient_attention` argument to the script. xFormers training is not available for Flax/JAX. **Note**: According to [this issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training in some GPUs. If you observe that problem, please install a development version as indicated in that comment. ## Stable Diffusion XL * We support fine-tuning the UNet shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) via the `train_text_to_image_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md). * We also support fine-tuning of the UNet and Text Encoder shipped in [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) with LoRA via the `train_text_to_image_lora_sdxl.py` script. Please refer to the docs [here](./README_sdxl.md).
diffusers/examples/text_to_image/README.md/0
{ "file_path": "diffusers/examples/text_to_image/README.md", "repo_id": "diffusers", "token_count": 5279 }
145
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class TextualInversion(ExamplesTestsAccelerate): def test_textual_inversion(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "learned_embeds.safetensors"))) def test_textual_inversion_checkpointing(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 3 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-3"}, ) def test_textual_inversion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-1", "checkpoint-2"}, ) resume_run_args = f""" examples/textual_inversion/textual_inversion.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --train_data_dir docs/source/en/imgs --learnable_property object --placeholder_token <cat-toy> --initializer_token a --save_steps 1 --num_vectors 2 --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --resume_from_checkpoint=checkpoint-2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-3"}, )
diffusers/examples/textual_inversion/test_textual_inversion.py/0
{ "file_path": "diffusers/examples/textual_inversion/test_textual_inversion.py", "repo_id": "diffusers", "token_count": 2914 }
146
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the LDM checkpoints.""" import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNet2DConditionModel, UNet2DModel do_only_config = False do_only_weights = True do_only_renaming = False if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--repo_path", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() config_parameters_to_change = { "image_size": "sample_size", "num_res_blocks": "layers_per_block", "block_channels": "block_out_channels", "down_blocks": "down_block_types", "up_blocks": "up_block_types", "downscale_freq_shift": "freq_shift", "resnet_num_groups": "norm_num_groups", "resnet_act_fn": "act_fn", "resnet_eps": "norm_eps", "num_head_channels": "attention_head_dim", } key_parameters_to_change = { "time_steps": "time_proj", "mid": "mid_block", "downsample_blocks": "down_blocks", "upsample_blocks": "up_blocks", } subfolder = "" if has_file(args.repo_path, "config.json") else "unet" with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader: text = reader.read() config = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, "config.json"): model = UNet2DModel(**config) else: class_name = UNet2DConditionModel if "ldm-text2im-large-256" in args.repo_path else UNet2DModel model = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) config = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: config[value] = config[key] del config[key] config["down_block_types"] = [k.replace("UNetRes", "") for k in config["down_block_types"]] config["up_block_types"] = [k.replace("UNetRes", "") for k in config["up_block_types"]] if do_only_weights: state_dict = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin")) new_state_dict = {} for param_key, param_value in state_dict.items(): if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"): continue has_changed = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(".")[0] == key: new_state_dict[".".join([new_key] + param_key.split(".")[1:])] = param_value has_changed = True if not has_changed: new_state_dict[param_key] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
diffusers/scripts/change_naming_configs_and_checkpoints.py/0
{ "file_path": "diffusers/scripts/change_naming_configs_and_checkpoints.py", "repo_id": "diffusers", "token_count": 1631 }
147
#!/usr/bin/env python3 import argparse import math import os from copy import deepcopy import requests import torch from audio_diffusion.models import DiffusionAttnUnet1D from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel from diffusers.utils.constants import DIFFUSERS_REQUEST_TIMEOUT MODELS_MAP = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 48000, "sample_size": 65536, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 48000, "sample_size": 65536, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 48000, "sample_size": 131072, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 16000, "sample_size": 65536, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 16000, "sample_size": 65536, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 16000, "sample_size": 65536, }, } def alpha_sigma_to_t(alpha, sigma): """Returns a timestep, given the scaling factors for the clean image and for the noise.""" return torch.atan2(sigma, alpha) / math.pi * 2 def get_crash_schedule(t): sigma = torch.sin(t * math.pi / 2) ** 2 alpha = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(alpha, sigma) class Object(object): pass class DiffusionUncond(nn.Module): def __init__(self, global_args): super().__init__() self.diffusion = DiffusionAttnUnet1D(global_args, n_attn_layers=4) self.diffusion_ema = deepcopy(self.diffusion) self.rng = torch.quasirandom.SobolEngine(1, scramble=True) def download(model_name): url = MODELS_MAP[model_name]["url"] r = requests.get(url, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT) local_filename = f"./{model_name}.ckpt" with open(local_filename, "wb") as fp: for chunk in r.iter_content(chunk_size=8192): fp.write(chunk) return local_filename DOWN_NUM_TO_LAYER = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } UP_NUM_TO_LAYER = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } MID_NUM_TO_LAYER = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } DEPTH_0_TO_LAYER = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } RES_CONV_MAP = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } ATTN_MAP = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def convert_resconv_naming(name): if name.startswith("skip"): return name.replace("skip", RES_CONV_MAP["skip"]) # name has to be of format main.{digit} if not name.startswith("main."): raise ValueError(f"ResConvBlock error with {name}") return name.replace(name[:6], RES_CONV_MAP[name[:6]]) def convert_attn_naming(name): for key, value in ATTN_MAP.items(): if name.startswith(key) and not isinstance(value, list): return name.replace(key, value) elif name.startswith(key): return [name.replace(key, v) for v in value] raise ValueError(f"Attn error with {name}") def rename(input_string, max_depth=13): string = input_string if string.split(".")[0] == "timestep_embed": return string.replace("timestep_embed", "time_proj") depth = 0 if string.startswith("net.3."): depth += 1 string = string[6:] elif string.startswith("net."): string = string[4:] while string.startswith("main.7."): depth += 1 string = string[7:] if string.startswith("main."): string = string[5:] # mid block if string[:2].isdigit(): layer_num = string[:2] string_left = string[2:] else: layer_num = string[0] string_left = string[1:] if depth == max_depth: new_layer = MID_NUM_TO_LAYER[layer_num] prefix = "mid_block" elif depth > 0 and int(layer_num) < 7: new_layer = DOWN_NUM_TO_LAYER[layer_num] prefix = f"down_blocks.{depth}" elif depth > 0 and int(layer_num) > 7: new_layer = UP_NUM_TO_LAYER[layer_num] prefix = f"up_blocks.{max_depth - depth - 1}" elif depth == 0: new_layer = DEPTH_0_TO_LAYER[layer_num] prefix = f"up_blocks.{max_depth - 1}" if int(layer_num) > 3 else "down_blocks.0" if not string_left.startswith("."): raise ValueError(f"Naming error with {input_string} and string_left: {string_left}.") string_left = string_left[1:] if "resnets" in new_layer: string_left = convert_resconv_naming(string_left) elif "attentions" in new_layer: new_string_left = convert_attn_naming(string_left) string_left = new_string_left if not isinstance(string_left, list): new_string = prefix + "." + new_layer + "." + string_left else: new_string = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def rename_orig_weights(state_dict): new_state_dict = {} for k, v in state_dict.items(): if k.endswith("kernel"): # up- and downsample layers, don't have trainable weights continue new_k = rename(k) # check if we need to transform from Conv => Linear for attention if isinstance(new_k, list): new_state_dict = transform_conv_attns(new_state_dict, new_k, v) else: new_state_dict[new_k] = v return new_state_dict def transform_conv_attns(new_state_dict, new_k, v): if len(new_k) == 1: if len(v.shape) == 3: # weight new_state_dict[new_k[0]] = v[:, :, 0] else: # bias new_state_dict[new_k[0]] = v else: # qkv matrices trippled_shape = v.shape[0] single_shape = trippled_shape // 3 for i in range(3): if len(v.shape) == 3: new_state_dict[new_k[i]] = v[i * single_shape : (i + 1) * single_shape, :, 0] else: new_state_dict[new_k[i]] = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def main(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_name = args.model_path.split("/")[-1].split(".")[0] if not os.path.isfile(args.model_path): assert model_name == args.model_path, ( f"Make sure to provide one of the official model names {MODELS_MAP.keys()}" ) args.model_path = download(model_name) sample_rate = MODELS_MAP[model_name]["sample_rate"] sample_size = MODELS_MAP[model_name]["sample_size"] config = Object() config.sample_size = sample_size config.sample_rate = sample_rate config.latent_dim = 0 diffusers_model = UNet1DModel(sample_size=sample_size, sample_rate=sample_rate) diffusers_state_dict = diffusers_model.state_dict() orig_model = DiffusionUncond(config) orig_model.load_state_dict(torch.load(args.model_path, map_location=device)["state_dict"]) orig_model = orig_model.diffusion_ema.eval() orig_model_state_dict = orig_model.state_dict() renamed_state_dict = rename_orig_weights(orig_model_state_dict) renamed_minus_diffusers = set(renamed_state_dict.keys()) - set(diffusers_state_dict.keys()) diffusers_minus_renamed = set(diffusers_state_dict.keys()) - set(renamed_state_dict.keys()) assert len(renamed_minus_diffusers) == 0, f"Problem with {renamed_minus_diffusers}" assert all(k.endswith("kernel") for k in list(diffusers_minus_renamed)), f"Problem with {diffusers_minus_renamed}" for key, value in renamed_state_dict.items(): assert diffusers_state_dict[key].squeeze().shape == value.squeeze().shape, ( f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}" ) if key == "time_proj.weight": value = value.squeeze() diffusers_state_dict[key] = value diffusers_model.load_state_dict(diffusers_state_dict) steps = 100 seed = 33 diffusers_scheduler = IPNDMScheduler(num_train_timesteps=steps) generator = torch.manual_seed(seed) noise = torch.randn([1, 2, config.sample_size], generator=generator).to(device) t = torch.linspace(1, 0, steps + 1, device=device)[:-1] step_list = get_crash_schedule(t) pipe = DanceDiffusionPipeline(unet=diffusers_model, scheduler=diffusers_scheduler) generator = torch.manual_seed(33) audio = pipe(num_inference_steps=steps, generator=generator).audios generated = sampling.iplms_sample(orig_model, noise, step_list, {}) generated = generated.clamp(-1, 1) diff_sum = (generated - audio).abs().sum() diff_max = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path) print("Diff sum", diff_sum) print("Diff max", diff_max) assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/" print(f"Conversion for {model_name} successful!") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() main(args)
diffusers/scripts/convert_dance_diffusion_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_dance_diffusion_to_diffusers.py", "repo_id": "diffusers", "token_count": 4672 }
148
import argparse import tempfile import torch from accelerate import load_checkpoint_and_dispatch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from diffusers import UnCLIPPipeline, UNet2DConditionModel, UNet2DModel from diffusers.models.transformers.prior_transformer import PriorTransformer from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.schedulers.scheduling_unclip import UnCLIPScheduler r""" Example - From the diffusers root directory: Download weights: ```sh $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/efdf6206d8ed593961593dc029a8affa/decoder-ckpt-step%3D01000000-of-01000000.ckpt $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/4226b831ae0279020d134281f3c31590/improved-sr-ckpt-step%3D1.2M.ckpt $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/85626483eaca9f581e2a78d31ff905ca/prior-ckpt-step%3D01000000-of-01000000.ckpt $ wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/0b62380a75e56f073e2844ab5199153d/ViT-L-14_stats.th ``` Convert the model: ```sh $ python scripts/convert_kakao_brain_unclip_to_diffusers.py \ --decoder_checkpoint_path ./decoder-ckpt-step\=01000000-of-01000000.ckpt \ --super_res_unet_checkpoint_path ./improved-sr-ckpt-step\=1.2M.ckpt \ --prior_checkpoint_path ./prior-ckpt-step\=01000000-of-01000000.ckpt \ --clip_stat_path ./ViT-L-14_stats.th \ --dump_path <path where to save model> ``` """ # prior PRIOR_ORIGINAL_PREFIX = "model" # Uses default arguments PRIOR_CONFIG = {} def prior_model_from_original_config(): model = PriorTransformer(**PRIOR_CONFIG) return model def prior_original_checkpoint_to_diffusers_checkpoint(model, checkpoint, clip_stats_checkpoint): diffusers_checkpoint = {} # <original>.time_embed.0 -> <diffusers>.time_embedding.linear_1 diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.weight"], "time_embedding.linear_1.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.bias"], } ) # <original>.clip_img_proj -> <diffusers>.proj_in diffusers_checkpoint.update( { "proj_in.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.weight"], "proj_in.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.bias"], } ) # <original>.text_emb_proj -> <diffusers>.embedding_proj diffusers_checkpoint.update( { "embedding_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.weight"], "embedding_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.bias"], } ) # <original>.text_enc_proj -> <diffusers>.encoder_hidden_states_proj diffusers_checkpoint.update( { "encoder_hidden_states_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.weight"], "encoder_hidden_states_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.bias"], } ) # <original>.positional_embedding -> <diffusers>.positional_embedding diffusers_checkpoint.update({"positional_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.positional_embedding"]}) # <original>.prd_emb -> <diffusers>.prd_embedding diffusers_checkpoint.update({"prd_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.prd_emb"]}) # <original>.time_embed.2 -> <diffusers>.time_embedding.linear_2 diffusers_checkpoint.update( { "time_embedding.linear_2.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.weight"], "time_embedding.linear_2.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.bias"], } ) # <original>.resblocks.<x> -> <diffusers>.transformer_blocks.<x> for idx in range(len(model.transformer_blocks)): diffusers_transformer_prefix = f"transformer_blocks.{idx}" original_transformer_prefix = f"{PRIOR_ORIGINAL_PREFIX}.transformer.resblocks.{idx}" # <original>.attn -> <diffusers>.attn1 diffusers_attention_prefix = f"{diffusers_transformer_prefix}.attn1" original_attention_prefix = f"{original_transformer_prefix}.attn" diffusers_checkpoint.update( prior_attention_to_diffusers( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, original_attention_prefix=original_attention_prefix, attention_head_dim=model.attention_head_dim, ) ) # <original>.mlp -> <diffusers>.ff diffusers_ff_prefix = f"{diffusers_transformer_prefix}.ff" original_ff_prefix = f"{original_transformer_prefix}.mlp" diffusers_checkpoint.update( prior_ff_to_diffusers( checkpoint, diffusers_ff_prefix=diffusers_ff_prefix, original_ff_prefix=original_ff_prefix ) ) # <original>.ln_1 -> <diffusers>.norm1 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm1.weight": checkpoint[ f"{original_transformer_prefix}.ln_1.weight" ], f"{diffusers_transformer_prefix}.norm1.bias": checkpoint[f"{original_transformer_prefix}.ln_1.bias"], } ) # <original>.ln_2 -> <diffusers>.norm3 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm3.weight": checkpoint[ f"{original_transformer_prefix}.ln_2.weight" ], f"{diffusers_transformer_prefix}.norm3.bias": checkpoint[f"{original_transformer_prefix}.ln_2.bias"], } ) # <original>.final_ln -> <diffusers>.norm_out diffusers_checkpoint.update( { "norm_out.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.weight"], "norm_out.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.bias"], } ) # <original>.out_proj -> <diffusers>.proj_to_clip_embeddings diffusers_checkpoint.update( { "proj_to_clip_embeddings.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.weight"], "proj_to_clip_embeddings.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.bias"], } ) # clip stats clip_mean, clip_std = clip_stats_checkpoint clip_mean = clip_mean[None, :] clip_std = clip_std[None, :] diffusers_checkpoint.update({"clip_mean": clip_mean, "clip_std": clip_std}) return diffusers_checkpoint def prior_attention_to_diffusers( checkpoint, *, diffusers_attention_prefix, original_attention_prefix, attention_head_dim ): diffusers_checkpoint = {} # <original>.c_qkv -> <diffusers>.{to_q, to_k, to_v} [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions( weight=checkpoint[f"{original_attention_prefix}.c_qkv.weight"], bias=checkpoint[f"{original_attention_prefix}.c_qkv.bias"], split=3, chunk_size=attention_head_dim, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_q.weight": q_weight, f"{diffusers_attention_prefix}.to_q.bias": q_bias, f"{diffusers_attention_prefix}.to_k.weight": k_weight, f"{diffusers_attention_prefix}.to_k.bias": k_bias, f"{diffusers_attention_prefix}.to_v.weight": v_weight, f"{diffusers_attention_prefix}.to_v.bias": v_bias, } ) # <original>.c_proj -> <diffusers>.to_out.0 diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{original_attention_prefix}.c_proj.weight"], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{original_attention_prefix}.c_proj.bias"], } ) return diffusers_checkpoint def prior_ff_to_diffusers(checkpoint, *, diffusers_ff_prefix, original_ff_prefix): diffusers_checkpoint = { # <original>.c_fc -> <diffusers>.net.0.proj f"{diffusers_ff_prefix}.net.{0}.proj.weight": checkpoint[f"{original_ff_prefix}.c_fc.weight"], f"{diffusers_ff_prefix}.net.{0}.proj.bias": checkpoint[f"{original_ff_prefix}.c_fc.bias"], # <original>.c_proj -> <diffusers>.net.2 f"{diffusers_ff_prefix}.net.{2}.weight": checkpoint[f"{original_ff_prefix}.c_proj.weight"], f"{diffusers_ff_prefix}.net.{2}.bias": checkpoint[f"{original_ff_prefix}.c_proj.bias"], } return diffusers_checkpoint # done prior # decoder DECODER_ORIGINAL_PREFIX = "model" # We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can # update then. DECODER_CONFIG = { "sample_size": 64, "layers_per_block": 3, "down_block_types": ( "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", ), "up_block_types": ( "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D", ), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (320, 640, 960, 1280), "in_channels": 3, "out_channels": 6, "cross_attention_dim": 1536, "class_embed_type": "identity", "attention_head_dim": 64, "resnet_time_scale_shift": "scale_shift", } def decoder_model_from_original_config(): model = UNet2DConditionModel(**DECODER_CONFIG) return model def decoder_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} original_unet_prefix = DECODER_ORIGINAL_PREFIX num_head_channels = DECODER_CONFIG["attention_head_dim"] diffusers_checkpoint.update(unet_time_embeddings(checkpoint, original_unet_prefix)) diffusers_checkpoint.update(unet_conv_in(checkpoint, original_unet_prefix)) # <original>.input_blocks -> <diffusers>.down_blocks original_down_block_idx = 1 for diffusers_down_block_idx in range(len(model.down_blocks)): checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint( model, checkpoint, diffusers_down_block_idx=diffusers_down_block_idx, original_down_block_idx=original_down_block_idx, original_unet_prefix=original_unet_prefix, num_head_channels=num_head_channels, ) original_down_block_idx += num_original_down_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.input_blocks -> <diffusers>.down_blocks diffusers_checkpoint.update( unet_midblock_to_diffusers_checkpoint( model, checkpoint, original_unet_prefix=original_unet_prefix, num_head_channels=num_head_channels, ) ) # <original>.output_blocks -> <diffusers>.up_blocks original_up_block_idx = 0 for diffusers_up_block_idx in range(len(model.up_blocks)): checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint( model, checkpoint, diffusers_up_block_idx=diffusers_up_block_idx, original_up_block_idx=original_up_block_idx, original_unet_prefix=original_unet_prefix, num_head_channels=num_head_channels, ) original_up_block_idx += num_original_up_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.output_blocks -> <diffusers>.up_blocks diffusers_checkpoint.update(unet_conv_norm_out(checkpoint, original_unet_prefix)) diffusers_checkpoint.update(unet_conv_out(checkpoint, original_unet_prefix)) return diffusers_checkpoint # done decoder # text proj def text_proj_from_original_config(): # From the conditional unet constructor where the dimension of the projected time embeddings is # constructed time_embed_dim = DECODER_CONFIG["block_out_channels"][0] * 4 cross_attention_dim = DECODER_CONFIG["cross_attention_dim"] model = UnCLIPTextProjModel(time_embed_dim=time_embed_dim, cross_attention_dim=cross_attention_dim) return model # Note that the input checkpoint is the original decoder checkpoint def text_proj_original_checkpoint_to_diffusers_checkpoint(checkpoint): diffusers_checkpoint = { # <original>.text_seq_proj.0 -> <diffusers>.encoder_hidden_states_proj "encoder_hidden_states_proj.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.0.weight"], "encoder_hidden_states_proj.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.0.bias"], # <original>.text_seq_proj.1 -> <diffusers>.text_encoder_hidden_states_norm "text_encoder_hidden_states_norm.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.1.weight"], "text_encoder_hidden_states_norm.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_seq_proj.1.bias"], # <original>.clip_tok_proj -> <diffusers>.clip_extra_context_tokens_proj "clip_extra_context_tokens_proj.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.clip_tok_proj.weight"], "clip_extra_context_tokens_proj.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.clip_tok_proj.bias"], # <original>.text_feat_proj -> <diffusers>.embedding_proj "embedding_proj.weight": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_feat_proj.weight"], "embedding_proj.bias": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.text_feat_proj.bias"], # <original>.cf_param -> <diffusers>.learned_classifier_free_guidance_embeddings "learned_classifier_free_guidance_embeddings": checkpoint[f"{DECODER_ORIGINAL_PREFIX}.cf_param"], # <original>.clip_emb -> <diffusers>.clip_image_embeddings_project_to_time_embeddings "clip_image_embeddings_project_to_time_embeddings.weight": checkpoint[ f"{DECODER_ORIGINAL_PREFIX}.clip_emb.weight" ], "clip_image_embeddings_project_to_time_embeddings.bias": checkpoint[ f"{DECODER_ORIGINAL_PREFIX}.clip_emb.bias" ], } return diffusers_checkpoint # done text proj # super res unet first steps SUPER_RES_UNET_FIRST_STEPS_PREFIX = "model_first_steps" SUPER_RES_UNET_FIRST_STEPS_CONFIG = { "sample_size": 256, "layers_per_block": 3, "down_block_types": ( "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", ), "up_block_types": ( "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ), "block_out_channels": (320, 640, 960, 1280), "in_channels": 6, "out_channels": 3, "add_attention": False, } def super_res_unet_first_steps_model_from_original_config(): model = UNet2DModel(**SUPER_RES_UNET_FIRST_STEPS_CONFIG) return model def super_res_unet_first_steps_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} original_unet_prefix = SUPER_RES_UNET_FIRST_STEPS_PREFIX diffusers_checkpoint.update(unet_time_embeddings(checkpoint, original_unet_prefix)) diffusers_checkpoint.update(unet_conv_in(checkpoint, original_unet_prefix)) # <original>.input_blocks -> <diffusers>.down_blocks original_down_block_idx = 1 for diffusers_down_block_idx in range(len(model.down_blocks)): checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint( model, checkpoint, diffusers_down_block_idx=diffusers_down_block_idx, original_down_block_idx=original_down_block_idx, original_unet_prefix=original_unet_prefix, num_head_channels=None, ) original_down_block_idx += num_original_down_blocks diffusers_checkpoint.update(checkpoint_update) diffusers_checkpoint.update( unet_midblock_to_diffusers_checkpoint( model, checkpoint, original_unet_prefix=original_unet_prefix, num_head_channels=None, ) ) # <original>.output_blocks -> <diffusers>.up_blocks original_up_block_idx = 0 for diffusers_up_block_idx in range(len(model.up_blocks)): checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint( model, checkpoint, diffusers_up_block_idx=diffusers_up_block_idx, original_up_block_idx=original_up_block_idx, original_unet_prefix=original_unet_prefix, num_head_channels=None, ) original_up_block_idx += num_original_up_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.output_blocks -> <diffusers>.up_blocks diffusers_checkpoint.update(unet_conv_norm_out(checkpoint, original_unet_prefix)) diffusers_checkpoint.update(unet_conv_out(checkpoint, original_unet_prefix)) return diffusers_checkpoint # done super res unet first steps # super res unet last step SUPER_RES_UNET_LAST_STEP_PREFIX = "model_last_step" SUPER_RES_UNET_LAST_STEP_CONFIG = { "sample_size": 256, "layers_per_block": 3, "down_block_types": ( "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", ), "up_block_types": ( "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ), "block_out_channels": (320, 640, 960, 1280), "in_channels": 6, "out_channels": 3, "add_attention": False, } def super_res_unet_last_step_model_from_original_config(): model = UNet2DModel(**SUPER_RES_UNET_LAST_STEP_CONFIG) return model def super_res_unet_last_step_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} original_unet_prefix = SUPER_RES_UNET_LAST_STEP_PREFIX diffusers_checkpoint.update(unet_time_embeddings(checkpoint, original_unet_prefix)) diffusers_checkpoint.update(unet_conv_in(checkpoint, original_unet_prefix)) # <original>.input_blocks -> <diffusers>.down_blocks original_down_block_idx = 1 for diffusers_down_block_idx in range(len(model.down_blocks)): checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint( model, checkpoint, diffusers_down_block_idx=diffusers_down_block_idx, original_down_block_idx=original_down_block_idx, original_unet_prefix=original_unet_prefix, num_head_channels=None, ) original_down_block_idx += num_original_down_blocks diffusers_checkpoint.update(checkpoint_update) diffusers_checkpoint.update( unet_midblock_to_diffusers_checkpoint( model, checkpoint, original_unet_prefix=original_unet_prefix, num_head_channels=None, ) ) # <original>.output_blocks -> <diffusers>.up_blocks original_up_block_idx = 0 for diffusers_up_block_idx in range(len(model.up_blocks)): checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint( model, checkpoint, diffusers_up_block_idx=diffusers_up_block_idx, original_up_block_idx=original_up_block_idx, original_unet_prefix=original_unet_prefix, num_head_channels=None, ) original_up_block_idx += num_original_up_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.output_blocks -> <diffusers>.up_blocks diffusers_checkpoint.update(unet_conv_norm_out(checkpoint, original_unet_prefix)) diffusers_checkpoint.update(unet_conv_out(checkpoint, original_unet_prefix)) return diffusers_checkpoint # done super res unet last step # unet utils # <original>.time_embed -> <diffusers>.time_embedding def unet_time_embeddings(checkpoint, original_unet_prefix): diffusers_checkpoint = {} diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint[f"{original_unet_prefix}.time_embed.0.weight"], "time_embedding.linear_1.bias": checkpoint[f"{original_unet_prefix}.time_embed.0.bias"], "time_embedding.linear_2.weight": checkpoint[f"{original_unet_prefix}.time_embed.2.weight"], "time_embedding.linear_2.bias": checkpoint[f"{original_unet_prefix}.time_embed.2.bias"], } ) return diffusers_checkpoint # <original>.input_blocks.0 -> <diffusers>.conv_in def unet_conv_in(checkpoint, original_unet_prefix): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_in.weight": checkpoint[f"{original_unet_prefix}.input_blocks.0.0.weight"], "conv_in.bias": checkpoint[f"{original_unet_prefix}.input_blocks.0.0.bias"], } ) return diffusers_checkpoint # <original>.out.0 -> <diffusers>.conv_norm_out def unet_conv_norm_out(checkpoint, original_unet_prefix): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_norm_out.weight": checkpoint[f"{original_unet_prefix}.out.0.weight"], "conv_norm_out.bias": checkpoint[f"{original_unet_prefix}.out.0.bias"], } ) return diffusers_checkpoint # <original>.out.2 -> <diffusers>.conv_out def unet_conv_out(checkpoint, original_unet_prefix): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_out.weight": checkpoint[f"{original_unet_prefix}.out.2.weight"], "conv_out.bias": checkpoint[f"{original_unet_prefix}.out.2.bias"], } ) return diffusers_checkpoint # <original>.input_blocks -> <diffusers>.down_blocks def unet_downblock_to_diffusers_checkpoint( model, checkpoint, *, diffusers_down_block_idx, original_down_block_idx, original_unet_prefix, num_head_channels ): diffusers_checkpoint = {} diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.resnets" original_down_block_prefix = f"{original_unet_prefix}.input_blocks" down_block = model.down_blocks[diffusers_down_block_idx] num_resnets = len(down_block.resnets) if down_block.downsamplers is None: downsampler = False else: assert len(down_block.downsamplers) == 1 downsampler = True # The downsample block is also a resnet num_resnets += 1 for resnet_idx_inc in range(num_resnets): full_resnet_prefix = f"{original_down_block_prefix}.{original_down_block_idx + resnet_idx_inc}.0" if downsampler and resnet_idx_inc == num_resnets - 1: # this is a downsample block full_diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.downsamplers.0" else: # this is a regular resnet block full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}" diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix ) ) if hasattr(down_block, "attentions"): num_attentions = len(down_block.attentions) diffusers_attention_prefix = f"down_blocks.{diffusers_down_block_idx}.attentions" for attention_idx_inc in range(num_attentions): full_attention_prefix = f"{original_down_block_prefix}.{original_down_block_idx + attention_idx_inc}.1" full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}" diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, attention_prefix=full_attention_prefix, diffusers_attention_prefix=full_diffusers_attention_prefix, num_head_channels=num_head_channels, ) ) num_original_down_blocks = num_resnets return diffusers_checkpoint, num_original_down_blocks # <original>.middle_block -> <diffusers>.mid_block def unet_midblock_to_diffusers_checkpoint(model, checkpoint, *, original_unet_prefix, num_head_channels): diffusers_checkpoint = {} # block 0 original_block_idx = 0 diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, diffusers_resnet_prefix="mid_block.resnets.0", resnet_prefix=f"{original_unet_prefix}.middle_block.{original_block_idx}", ) ) original_block_idx += 1 # optional block 1 if hasattr(model.mid_block, "attentions") and model.mid_block.attentions[0] is not None: diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix="mid_block.attentions.0", attention_prefix=f"{original_unet_prefix}.middle_block.{original_block_idx}", num_head_channels=num_head_channels, ) ) original_block_idx += 1 # block 1 or block 2 diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, diffusers_resnet_prefix="mid_block.resnets.1", resnet_prefix=f"{original_unet_prefix}.middle_block.{original_block_idx}", ) ) return diffusers_checkpoint # <original>.output_blocks -> <diffusers>.up_blocks def unet_upblock_to_diffusers_checkpoint( model, checkpoint, *, diffusers_up_block_idx, original_up_block_idx, original_unet_prefix, num_head_channels ): diffusers_checkpoint = {} diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.resnets" original_up_block_prefix = f"{original_unet_prefix}.output_blocks" up_block = model.up_blocks[diffusers_up_block_idx] num_resnets = len(up_block.resnets) if up_block.upsamplers is None: upsampler = False else: assert len(up_block.upsamplers) == 1 upsampler = True # The upsample block is also a resnet num_resnets += 1 has_attentions = hasattr(up_block, "attentions") for resnet_idx_inc in range(num_resnets): if upsampler and resnet_idx_inc == num_resnets - 1: # this is an upsample block if has_attentions: # There is a middle attention block that we skip original_resnet_block_idx = 2 else: original_resnet_block_idx = 1 # we add the `minus 1` because the last two resnets are stuck together in the same output block full_resnet_prefix = ( f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc - 1}.{original_resnet_block_idx}" ) full_diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.upsamplers.0" else: # this is a regular resnet block full_resnet_prefix = f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc}.0" full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}" diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix ) ) if has_attentions: num_attentions = len(up_block.attentions) diffusers_attention_prefix = f"up_blocks.{diffusers_up_block_idx}.attentions" for attention_idx_inc in range(num_attentions): full_attention_prefix = f"{original_up_block_prefix}.{original_up_block_idx + attention_idx_inc}.1" full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}" diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, attention_prefix=full_attention_prefix, diffusers_attention_prefix=full_diffusers_attention_prefix, num_head_channels=num_head_channels, ) ) num_original_down_blocks = num_resnets - 1 if upsampler else num_resnets return diffusers_checkpoint, num_original_down_blocks def resnet_to_diffusers_checkpoint(checkpoint, *, diffusers_resnet_prefix, resnet_prefix): diffusers_checkpoint = { f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.in_layers.0.weight"], f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.in_layers.0.bias"], f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.in_layers.2.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.in_layers.2.bias"], f"{diffusers_resnet_prefix}.time_emb_proj.weight": checkpoint[f"{resnet_prefix}.emb_layers.1.weight"], f"{diffusers_resnet_prefix}.time_emb_proj.bias": checkpoint[f"{resnet_prefix}.emb_layers.1.bias"], f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.out_layers.0.weight"], f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.out_layers.0.bias"], f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.out_layers.3.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.out_layers.3.bias"], } skip_connection_prefix = f"{resnet_prefix}.skip_connection" if f"{skip_connection_prefix}.weight" in checkpoint: diffusers_checkpoint.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{skip_connection_prefix}.weight"], f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{skip_connection_prefix}.bias"], } ) return diffusers_checkpoint def attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix, num_head_channels): diffusers_checkpoint = {} # <original>.norm -> <diffusers>.group_norm diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"], f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"], } ) # <original>.qkv -> <diffusers>.{query, key, value} [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions( weight=checkpoint[f"{attention_prefix}.qkv.weight"][:, :, 0], bias=checkpoint[f"{attention_prefix}.qkv.bias"], split=3, chunk_size=num_head_channels, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_q.weight": q_weight, f"{diffusers_attention_prefix}.to_q.bias": q_bias, f"{diffusers_attention_prefix}.to_k.weight": k_weight, f"{diffusers_attention_prefix}.to_k.bias": k_bias, f"{diffusers_attention_prefix}.to_v.weight": v_weight, f"{diffusers_attention_prefix}.to_v.bias": v_bias, } ) # <original>.encoder_kv -> <diffusers>.{context_key, context_value} [encoder_k_weight, encoder_v_weight], [encoder_k_bias, encoder_v_bias] = split_attentions( weight=checkpoint[f"{attention_prefix}.encoder_kv.weight"][:, :, 0], bias=checkpoint[f"{attention_prefix}.encoder_kv.bias"], split=2, chunk_size=num_head_channels, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.add_k_proj.weight": encoder_k_weight, f"{diffusers_attention_prefix}.add_k_proj.bias": encoder_k_bias, f"{diffusers_attention_prefix}.add_v_proj.weight": encoder_v_weight, f"{diffusers_attention_prefix}.add_v_proj.bias": encoder_v_bias, } ) # <original>.proj_out (1d conv) -> <diffusers>.proj_attn (linear) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][ :, :, 0 ], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], } ) return diffusers_checkpoint # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) def split_attentions(*, weight, bias, split, chunk_size): weights = [None] * split biases = [None] * split weights_biases_idx = 0 for starting_row_index in range(0, weight.shape[0], chunk_size): row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size) weight_rows = weight[row_indices, :] bias_rows = bias[row_indices] if weights[weights_biases_idx] is None: assert weights[weights_biases_idx] is None weights[weights_biases_idx] = weight_rows biases[weights_biases_idx] = bias_rows else: assert weights[weights_biases_idx] is not None weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows]) biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows]) weights_biases_idx = (weights_biases_idx + 1) % split return weights, biases # done unet utils # Driver functions def text_encoder(): print("loading CLIP text encoder") clip_name = "openai/clip-vit-large-patch14" # sets pad_value to 0 pad_token = "!" tokenizer_model = CLIPTokenizer.from_pretrained(clip_name, pad_token=pad_token, device_map="auto") assert tokenizer_model.convert_tokens_to_ids(pad_token) == 0 text_encoder_model = CLIPTextModelWithProjection.from_pretrained( clip_name, # `CLIPTextModel` does not support device_map="auto" # device_map="auto" ) print("done loading CLIP text encoder") return text_encoder_model, tokenizer_model def prior(*, args, checkpoint_map_location): print("loading prior") prior_checkpoint = torch.load(args.prior_checkpoint_path, map_location=checkpoint_map_location) prior_checkpoint = prior_checkpoint["state_dict"] clip_stats_checkpoint = torch.load(args.clip_stat_path, map_location=checkpoint_map_location) prior_model = prior_model_from_original_config() prior_diffusers_checkpoint = prior_original_checkpoint_to_diffusers_checkpoint( prior_model, prior_checkpoint, clip_stats_checkpoint ) del prior_checkpoint del clip_stats_checkpoint load_checkpoint_to_model(prior_diffusers_checkpoint, prior_model, strict=True) print("done loading prior") return prior_model def decoder(*, args, checkpoint_map_location): print("loading decoder") decoder_checkpoint = torch.load(args.decoder_checkpoint_path, map_location=checkpoint_map_location) decoder_checkpoint = decoder_checkpoint["state_dict"] decoder_model = decoder_model_from_original_config() decoder_diffusers_checkpoint = decoder_original_checkpoint_to_diffusers_checkpoint( decoder_model, decoder_checkpoint ) # text proj interlude # The original decoder implementation includes a set of parameters that are used # for creating the `encoder_hidden_states` which are what the U-net is conditioned # on. The diffusers conditional unet directly takes the encoder_hidden_states. We pull # the parameters into the UnCLIPTextProjModel class text_proj_model = text_proj_from_original_config() text_proj_checkpoint = text_proj_original_checkpoint_to_diffusers_checkpoint(decoder_checkpoint) load_checkpoint_to_model(text_proj_checkpoint, text_proj_model, strict=True) # done text proj interlude del decoder_checkpoint load_checkpoint_to_model(decoder_diffusers_checkpoint, decoder_model, strict=True) print("done loading decoder") return decoder_model, text_proj_model def super_res_unet(*, args, checkpoint_map_location): print("loading super resolution unet") super_res_checkpoint = torch.load(args.super_res_unet_checkpoint_path, map_location=checkpoint_map_location) super_res_checkpoint = super_res_checkpoint["state_dict"] # model_first_steps super_res_first_model = super_res_unet_first_steps_model_from_original_config() super_res_first_steps_checkpoint = super_res_unet_first_steps_original_checkpoint_to_diffusers_checkpoint( super_res_first_model, super_res_checkpoint ) # model_last_step super_res_last_model = super_res_unet_last_step_model_from_original_config() super_res_last_step_checkpoint = super_res_unet_last_step_original_checkpoint_to_diffusers_checkpoint( super_res_last_model, super_res_checkpoint ) del super_res_checkpoint load_checkpoint_to_model(super_res_first_steps_checkpoint, super_res_first_model, strict=True) load_checkpoint_to_model(super_res_last_step_checkpoint, super_res_last_model, strict=True) print("done loading super resolution unet") return super_res_first_model, super_res_last_model def load_checkpoint_to_model(checkpoint, model, strict=False): with tempfile.NamedTemporaryFile() as file: torch.save(checkpoint, file.name) del checkpoint if strict: model.load_state_dict(torch.load(file.name), strict=True) else: load_checkpoint_and_dispatch(model, file.name, device_map="auto") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--prior_checkpoint_path", default=None, type=str, required=True, help="Path to the prior checkpoint to convert.", ) parser.add_argument( "--decoder_checkpoint_path", default=None, type=str, required=True, help="Path to the decoder checkpoint to convert.", ) parser.add_argument( "--super_res_unet_checkpoint_path", default=None, type=str, required=True, help="Path to the super resolution checkpoint to convert.", ) parser.add_argument( "--clip_stat_path", default=None, type=str, required=True, help="Path to the clip stats checkpoint to convert." ) parser.add_argument( "--checkpoint_load_device", default="cpu", type=str, required=False, help="The device passed to `map_location` when loading checkpoints.", ) parser.add_argument( "--debug", default=None, type=str, required=False, help="Only run a specific stage of the convert script. Used for debugging", ) args = parser.parse_args() print(f"loading checkpoints to {args.checkpoint_load_device}") checkpoint_map_location = torch.device(args.checkpoint_load_device) if args.debug is not None: print(f"debug: only executing {args.debug}") if args.debug is None: text_encoder_model, tokenizer_model = text_encoder() prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location) decoder_model, text_proj_model = decoder(args=args, checkpoint_map_location=checkpoint_map_location) super_res_first_model, super_res_last_model = super_res_unet( args=args, checkpoint_map_location=checkpoint_map_location ) prior_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample_range=5.0, ) decoder_scheduler = UnCLIPScheduler( variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, ) super_res_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, ) print(f"saving Kakao Brain unCLIP to {args.dump_path}") pipe = UnCLIPPipeline( prior=prior_model, decoder=decoder_model, text_proj=text_proj_model, tokenizer=tokenizer_model, text_encoder=text_encoder_model, super_res_first=super_res_first_model, super_res_last=super_res_last_model, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) pipe.save_pretrained(args.dump_path) print("done writing Kakao Brain unCLIP") elif args.debug == "text_encoder": text_encoder_model, tokenizer_model = text_encoder() elif args.debug == "prior": prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location) elif args.debug == "decoder": decoder_model, text_proj_model = decoder(args=args, checkpoint_map_location=checkpoint_map_location) elif args.debug == "super_res_unet": super_res_first_model, super_res_last_model = super_res_unet( args=args, checkpoint_map_location=checkpoint_map_location ) else: raise ValueError(f"unknown debug value : {args.debug}")
diffusers/scripts/convert_kakao_brain_unclip_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_kakao_brain_unclip_to_diffusers.py", "repo_id": "diffusers", "token_count": 18242 }
149
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the MusicLDM checkpoints.""" import argparse import re import torch import yaml from transformers import ( AutoFeatureExtractor, AutoTokenizer, ClapConfig, ClapModel, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LMSDiscreteScheduler, MusicLDMPipeline, PNDMScheduler, UNet2DConditionModel, ) # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.shave_segments def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_resnet_paths def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_vae_resnet_paths def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("nin_shortcut", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_attention_paths def renew_attention_paths(old_list): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("q.weight", "to_q.weight") new_item = new_item.replace("q.bias", "to_q.bias") new_item = new_item.replace("k.weight", "to_k.weight") new_item = new_item.replace("k.bias", "to_k.bias") new_item = new_item.replace("v.weight", "to_v.weight") new_item = new_item.replace("v.bias", "to_v.bias") new_item = new_item.replace("proj_out.weight", "to_out.0.weight") new_item = new_item.replace("proj_out.bias", "to_out.0.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.assign_to_checkpoint def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] else: checkpoint[new_path] = old_checkpoint[path["old"]] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["to_q.weight", "to_k.weight", "to_v.weight"] proj_key = "to_out.0.weight" for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys or ".".join(key.split(".")[-3:]) == proj_key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key].squeeze() def create_unet_diffusers_config(original_config, image_size: int): """ Creates a UNet config for diffusers based on the config of the original MusicLDM model. """ unet_params = original_config["model"]["params"]["unet_config"]["params"] vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" up_block_types.append(block_type) resolution //= 2 vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) cross_attention_dim = ( unet_params["cross_attention_dim"] if "cross_attention_dim" in unet_params else block_out_channels ) class_embed_type = "simple_projection" if "extra_film_condition_dim" in unet_params else None projection_class_embeddings_input_dim = ( unet_params["extra_film_condition_dim"] if "extra_film_condition_dim" in unet_params else None ) class_embeddings_concat = unet_params["extra_film_use_concat"] if "extra_film_use_concat" in unet_params else None config = { "sample_size": image_size // vae_scale_factor, "in_channels": unet_params["in_channels"], "out_channels": unet_params["out_channels"], "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params["num_res_blocks"], "cross_attention_dim": cross_attention_dim, "class_embed_type": class_embed_type, "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, "class_embeddings_concat": class_embeddings_concat, } return config # Adapted from diffusers.pipelines.stable_diffusion.convert_from_ckpt.create_vae_diffusers_config def create_vae_diffusers_config(original_config, checkpoint, image_size: int): """ Creates a VAE config for diffusers based on the config of the original MusicLDM model. Compared to the original Stable Diffusion conversion, this function passes a *learnt* VAE scaling factor to the diffusers VAE. """ vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] _ = original_config["model"]["params"]["first_stage_config"]["params"]["embed_dim"] block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) scaling_factor = checkpoint["scale_factor"] if "scale_by_std" in original_config["model"]["params"] else 0.18215 config = { "sample_size": image_size, "in_channels": vae_params["in_channels"], "out_channels": vae_params["out_ch"], "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "latent_channels": vae_params["z_channels"], "layers_per_block": vae_params["num_res_blocks"], "scaling_factor": float(scaling_factor), } return config # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.create_diffusers_schedular def create_diffusers_schedular(original_config): schedular = DDIMScheduler( num_train_timesteps=original_config["model"]["params"]["timesteps"], beta_start=original_config["model"]["params"]["linear_start"], beta_end=original_config["model"]["params"]["linear_end"], beta_schedule="scaled_linear", ) return schedular def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False): """ Takes a state dict and a config, and returns a converted checkpoint. Compared to the original Stable Diffusion conversion, this function additionally converts the learnt film embedding linear layer. """ # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) unet_key = "model.diffusion_model." # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: print(f"Checkpoint {path} has both EMA and non-EMA weights.") print( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: print( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] new_checkpoint["class_embedding.weight"] = unet_state_dict["film_emb.weight"] new_checkpoint["class_embedding.bias"] = unet_state_dict["film_emb.bias"] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] return new_checkpoint # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.convert_ldm_vae_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} vae_key = "first_stage_model." keys = list(checkpoint.keys()) for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint CLAP_KEYS_TO_MODIFY_MAPPING = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } CLAP_KEYS_TO_IGNORE = [ "text_transform", "audio_transform", "stft", "logmel_extractor", "tscam_conv", "head", "attn_mask", ] CLAP_EXPECTED_MISSING_KEYS = ["text_model.embeddings.token_type_ids"] def convert_open_clap_checkpoint(checkpoint): """ Takes a state dict and returns a converted CLAP checkpoint. """ # extract state dict for CLAP text embedding model, discarding the audio component model_state_dict = {} model_key = "cond_stage_model.model." keys = list(checkpoint.keys()) for key in keys: if key.startswith(model_key): model_state_dict[key.replace(model_key, "")] = checkpoint.get(key) new_checkpoint = {} sequential_layers_pattern = r".*sequential.(\d+).*" text_projection_pattern = r".*_projection.(\d+).*" for key, value in model_state_dict.items(): # check if key should be ignored in mapping - if so map it to a key name that we'll filter out at the end for key_to_ignore in CLAP_KEYS_TO_IGNORE: if key_to_ignore in key: key = "spectrogram" # check if any key needs to be modified for key_to_modify, new_key in CLAP_KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) if re.match(sequential_layers_pattern, key): # replace sequential layers with list sequential_layer = re.match(sequential_layers_pattern, key).group(1) key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer) // 3}.linear.") elif re.match(text_projection_pattern, key): projecton_layer = int(re.match(text_projection_pattern, key).group(1)) # Because in CLAP they use `nn.Sequential`... transformers_projection_layer = 1 if projecton_layer == 0 else 2 key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.") if "audio" and "qkv" in key: # split qkv into query key and value mixed_qkv = value qkv_dim = mixed_qkv.size(0) // 3 query_layer = mixed_qkv[:qkv_dim] key_layer = mixed_qkv[qkv_dim : qkv_dim * 2] value_layer = mixed_qkv[qkv_dim * 2 :] new_checkpoint[key.replace("qkv", "query")] = query_layer new_checkpoint[key.replace("qkv", "key")] = key_layer new_checkpoint[key.replace("qkv", "value")] = value_layer elif key != "spectrogram": new_checkpoint[key] = value return new_checkpoint def create_transformers_vocoder_config(original_config): """ Creates a config for transformers SpeechT5HifiGan based on the config of the vocoder model. """ vocoder_params = original_config["model"]["params"]["vocoder_config"]["params"] config = { "model_in_dim": vocoder_params["num_mels"], "sampling_rate": vocoder_params["sampling_rate"], "upsample_initial_channel": vocoder_params["upsample_initial_channel"], "upsample_rates": list(vocoder_params["upsample_rates"]), "upsample_kernel_sizes": list(vocoder_params["upsample_kernel_sizes"]), "resblock_kernel_sizes": list(vocoder_params["resblock_kernel_sizes"]), "resblock_dilation_sizes": [ list(resblock_dilation) for resblock_dilation in vocoder_params["resblock_dilation_sizes"] ], "normalize_before": False, } return config def convert_hifigan_checkpoint(checkpoint, config): """ Takes a state dict and config, and returns a converted HiFiGAN vocoder checkpoint. """ # extract state dict for vocoder vocoder_state_dict = {} vocoder_key = "first_stage_model.vocoder." keys = list(checkpoint.keys()) for key in keys: if key.startswith(vocoder_key): vocoder_state_dict[key.replace(vocoder_key, "")] = checkpoint.get(key) # fix upsampler keys, everything else is correct already for i in range(len(config.upsample_rates)): vocoder_state_dict[f"upsampler.{i}.weight"] = vocoder_state_dict.pop(f"ups.{i}.weight") vocoder_state_dict[f"upsampler.{i}.bias"] = vocoder_state_dict.pop(f"ups.{i}.bias") if not config.normalize_before: # if we don't set normalize_before then these variables are unused, so we set them to their initialised values vocoder_state_dict["mean"] = torch.zeros(config.model_in_dim) vocoder_state_dict["scale"] = torch.ones(config.model_in_dim) return vocoder_state_dict # Adapted from https://huggingface.co/spaces/haoheliu/MusicLDM-text-to-audio-generation/blob/84a0384742a22bd80c44e903e241f0623e874f1d/MusicLDM/utils.py#L72-L73 DEFAULT_CONFIG = { "model": { "params": { "linear_start": 0.0015, "linear_end": 0.0195, "timesteps": 1000, "channels": 8, "scale_by_std": True, "unet_config": { "target": "MusicLDM.latent_diffusion.openaimodel.UNetModel", "params": { "extra_film_condition_dim": 512, "extra_film_use_concat": True, "in_channels": 8, "out_channels": 8, "model_channels": 128, "attention_resolutions": [8, 4, 2], "num_res_blocks": 2, "channel_mult": [1, 2, 3, 5], "num_head_channels": 32, }, }, "first_stage_config": { "target": "MusicLDM.variational_autoencoder.autoencoder.AutoencoderKL", "params": { "embed_dim": 8, "ddconfig": { "z_channels": 8, "resolution": 256, "in_channels": 1, "out_ch": 1, "ch": 128, "ch_mult": [1, 2, 4], "num_res_blocks": 2, }, }, }, "vocoder_config": { "target": "MusicLDM.first_stage_model.vocoder", "params": { "upsample_rates": [5, 4, 2, 2, 2], "upsample_kernel_sizes": [16, 16, 8, 4, 4], "upsample_initial_channel": 1024, "resblock_kernel_sizes": [3, 7, 11], "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], "num_mels": 64, "sampling_rate": 16000, }, }, }, }, } def load_pipeline_from_original_MusicLDM_ckpt( checkpoint_path: str, original_config_file: str = None, image_size: int = 1024, prediction_type: str = None, extract_ema: bool = False, scheduler_type: str = "ddim", num_in_channels: int = None, model_channels: int = None, num_head_channels: int = None, device: str = None, from_safetensors: bool = False, ) -> MusicLDMPipeline: """ Load an MusicLDM pipeline object from a `.ckpt`/`.safetensors` file and (ideally) a `.yaml` config file. Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is recommended that you override the default values and/or supply an `original_config_file` wherever possible. Args: checkpoint_path (`str`): Path to `.ckpt` file. original_config_file (`str`): Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically set to the MusicLDM-s-full-v2 config. image_size (`int`, *optional*, defaults to 1024): The image size that the model was trained on. prediction_type (`str`, *optional*): The prediction type that the model was trained on. If `None`, will be automatically inferred by looking for a key in the config. For the default config, the prediction type is `'epsilon'`. num_in_channels (`int`, *optional*, defaults to None): The number of UNet input channels. If `None`, it will be automatically inferred from the config. model_channels (`int`, *optional*, defaults to None): The number of UNet model channels. If `None`, it will be automatically inferred from the config. Override to 128 for the small checkpoints, 192 for the medium checkpoints and 256 for the large. num_head_channels (`int`, *optional*, defaults to None): The number of UNet head channels. If `None`, it will be automatically inferred from the config. Override to 32 for the small and medium checkpoints, and 64 for the large. scheduler_type (`str`, *optional*, defaults to 'pndm'): Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", "ddim"]`. extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning. device (`str`, *optional*, defaults to `None`): The device to use. Pass `None` to determine automatically. from_safetensors (`str`, *optional*, defaults to `False`): If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch. return: An MusicLDMPipeline object representing the passed-in `.ckpt`/`.safetensors` file. """ if from_safetensors: from safetensors import safe_open checkpoint = {} with safe_open(checkpoint_path, framework="pt", device="cpu") as f: for key in f.keys(): checkpoint[key] = f.get_tensor(key) else: if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" checkpoint = torch.load(checkpoint_path, map_location=device) else: checkpoint = torch.load(checkpoint_path, map_location=device) if "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] if original_config_file is None: original_config = DEFAULT_CONFIG else: original_config = yaml.safe_load(original_config_file) if num_in_channels is not None: original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels if model_channels is not None: original_config["model"]["params"]["unet_config"]["params"]["model_channels"] = model_channels if num_head_channels is not None: original_config["model"]["params"]["unet_config"]["params"]["num_head_channels"] = num_head_channels if ( "parameterization" in original_config["model"]["params"] and original_config["model"]["params"]["parameterization"] == "v" ): if prediction_type is None: prediction_type = "v_prediction" else: if prediction_type is None: prediction_type = "epsilon" if image_size is None: image_size = 512 num_train_timesteps = original_config["model"]["params"]["timesteps"] beta_start = original_config["model"]["params"]["linear_start"] beta_end = original_config["model"]["params"]["linear_end"] scheduler = DDIMScheduler( beta_end=beta_end, beta_schedule="scaled_linear", beta_start=beta_start, num_train_timesteps=num_train_timesteps, steps_offset=1, clip_sample=False, set_alpha_to_one=False, prediction_type=prediction_type, ) # make sure scheduler works correctly with DDIM scheduler.register_to_config(clip_sample=False) if scheduler_type == "pndm": config = dict(scheduler.config) config["skip_prk_steps"] = True scheduler = PNDMScheduler.from_config(config) elif scheduler_type == "lms": scheduler = LMSDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "heun": scheduler = HeunDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "euler": scheduler = EulerDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "euler-ancestral": scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "dpm": scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) elif scheduler_type == "ddim": scheduler = scheduler else: raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") # Convert the UNet2DModel unet_config = create_unet_diffusers_config(original_config, image_size=image_size) unet = UNet2DConditionModel(**unet_config) converted_unet_checkpoint = convert_ldm_unet_checkpoint( checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema ) unet.load_state_dict(converted_unet_checkpoint) # Convert the VAE model vae_config = create_vae_diffusers_config(original_config, checkpoint=checkpoint, image_size=image_size) converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) # Convert the text model # MusicLDM uses the same tokenizer as the original CLAP model, but a slightly different configuration config = ClapConfig.from_pretrained("laion/clap-htsat-unfused") config.audio_config.update( { "patch_embeds_hidden_size": 128, "hidden_size": 1024, "depths": [2, 2, 12, 2], } ) tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused") converted_text_model = convert_open_clap_checkpoint(checkpoint) text_model = ClapModel(config) missing_keys, unexpected_keys = text_model.load_state_dict(converted_text_model, strict=False) # we expect not to have token_type_ids in our original state dict so let's ignore them missing_keys = list(set(missing_keys) - set(CLAP_EXPECTED_MISSING_KEYS)) if len(unexpected_keys) > 0: raise ValueError(f"Unexpected keys when loading CLAP model: {unexpected_keys}") if len(missing_keys) > 0: raise ValueError(f"Missing keys when loading CLAP model: {missing_keys}") # Convert the vocoder model vocoder_config = create_transformers_vocoder_config(original_config) vocoder_config = SpeechT5HifiGanConfig(**vocoder_config) converted_vocoder_checkpoint = convert_hifigan_checkpoint(checkpoint, vocoder_config) vocoder = SpeechT5HifiGan(vocoder_config) vocoder.load_state_dict(converted_vocoder_checkpoint) # Instantiate the diffusers pipeline pipe = MusicLDMPipeline( vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, vocoder=vocoder, feature_extractor=feature_extractor, ) return pipe if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--model_channels", default=None, type=int, help="The number of UNet model channels. If `None`, it will be automatically inferred from the config. Override" " to 128 for the small checkpoints, 192 for the medium checkpoints and 256 for the large.", ) parser.add_argument( "--num_head_channels", default=None, type=int, help="The number of UNet head channels. If `None`, it will be automatically inferred from the config. Override" " to 32 for the small and medium checkpoints, and 64 for the large.", ) parser.add_argument( "--scheduler_type", default="ddim", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--image_size", default=None, type=int, help=("The image size that the model was trained on."), ) parser.add_argument( "--prediction_type", default=None, type=str, help=("The prediction type that the model was trained on."), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") args = parser.parse_args() pipe = load_pipeline_from_original_MusicLDM_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, model_channels=args.model_channels, num_head_channels=args.num_head_channels, from_safetensors=args.from_safetensors, device=args.device, ) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
diffusers/scripts/convert_original_musicldm_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_original_musicldm_to_diffusers.py", "repo_id": "diffusers", "token_count": 19562 }
150
import argparse import sys import tensorrt as trt def convert_models(onnx_path: str, num_controlnet: int, output_path: str, fp16: bool = False, sd_xl: bool = False): """ Function to convert models in stable diffusion controlnet pipeline into TensorRT format Example: python convert_stable_diffusion_controlnet_to_tensorrt.py --onnx_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.onnx --output_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.engine --fp16 --num_controlnet 2 Example for SD XL: python convert_stable_diffusion_controlnet_to_tensorrt.py --onnx_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.onnx --output_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine --fp16 --num_controlnet 1 --sd_xl Returns: unet/model.engine run test script in diffusers/examples/community python test_onnx_controlnet.py --sd_model danbrown/RevAnimated-v1-2-2 --onnx_model_dir path-to-models-stable_diffusion/RevAnimated-v1-2-2 --unet_engine_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine --qr_img_path path-to-qr-code-image """ # UNET if sd_xl: batch_size = 1 unet_in_channels = 4 unet_sample_size = 64 num_tokens = 77 text_hidden_size = 2048 img_size = 512 text_embeds_shape = (2 * batch_size, 1280) time_ids_shape = (2 * batch_size, 6) else: batch_size = 1 unet_in_channels = 4 unet_sample_size = 64 num_tokens = 77 text_hidden_size = 768 img_size = 512 batch_size = 1 latents_shape = (2 * batch_size, unet_in_channels, unet_sample_size, unet_sample_size) embed_shape = (2 * batch_size, num_tokens, text_hidden_size) controlnet_conds_shape = (num_controlnet, 2 * batch_size, 3, img_size, img_size) TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) TRT_BUILDER = trt.Builder(TRT_LOGGER) TRT_RUNTIME = trt.Runtime(TRT_LOGGER) network = TRT_BUILDER.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) onnx_parser = trt.OnnxParser(network, TRT_LOGGER) parse_success = onnx_parser.parse_from_file(onnx_path) for idx in range(onnx_parser.num_errors): print(onnx_parser.get_error(idx)) if not parse_success: sys.exit("ONNX model parsing failed") print("Load Onnx model done") profile = TRT_BUILDER.create_optimization_profile() profile.set_shape("sample", latents_shape, latents_shape, latents_shape) profile.set_shape("encoder_hidden_states", embed_shape, embed_shape, embed_shape) profile.set_shape("controlnet_conds", controlnet_conds_shape, controlnet_conds_shape, controlnet_conds_shape) if sd_xl: profile.set_shape("text_embeds", text_embeds_shape, text_embeds_shape, text_embeds_shape) profile.set_shape("time_ids", time_ids_shape, time_ids_shape, time_ids_shape) config = TRT_BUILDER.create_builder_config() config.add_optimization_profile(profile) config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, True) if fp16: config.set_flag(trt.BuilderFlag.FP16) plan = TRT_BUILDER.build_serialized_network(network, config) if plan is None: sys.exit("Failed building engine") print("Succeeded building engine") engine = TRT_RUNTIME.deserialize_cuda_engine(plan) ## save TRT engine with open(output_path, "wb") as f: f.write(engine.serialize()) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_xl", action="store_true", default=False, help="SD XL pipeline") parser.add_argument( "--onnx_path", type=str, required=True, help="Path to the onnx checkpoint to convert", ) parser.add_argument("--num_controlnet", type=int) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") args = parser.parse_args() convert_models(args.onnx_path, args.num_controlnet, args.output_path, args.fp16, args.sd_xl)
diffusers/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py/0
{ "file_path": "diffusers/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py", "repo_id": "diffusers", "token_count": 1860 }
151
from typing import Any, Dict, List from .configuration_utils import ConfigMixin, register_to_config from .utils import CONFIG_NAME class PipelineCallback(ConfigMixin): """ Base class for all the official callbacks used in a pipeline. This class provides a structure for implementing custom callbacks and ensures that all callbacks have a consistent interface. Please implement the following: `tensor_inputs`: This should return a list of tensor inputs specific to your callback. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. `callback_fn`: This method defines the core functionality of your callback. """ config_name = CONFIG_NAME @register_to_config def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None): super().__init__() if (cutoff_step_ratio is None and cutoff_step_index is None) or ( cutoff_step_ratio is not None and cutoff_step_index is not None ): raise ValueError("Either cutoff_step_ratio or cutoff_step_index should be provided, not both or none.") if cutoff_step_ratio is not None and ( not isinstance(cutoff_step_ratio, float) or not (0.0 <= cutoff_step_ratio <= 1.0) ): raise ValueError("cutoff_step_ratio must be a float between 0.0 and 1.0.") @property def tensor_inputs(self) -> List[str]: raise NotImplementedError(f"You need to set the attribute `tensor_inputs` for {self.__class__}") def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]: raise NotImplementedError(f"You need to implement the method `callback_fn` for {self.__class__}") def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: return self.callback_fn(pipeline, step_index, timestep, callback_kwargs) class MultiPipelineCallbacks: """ This class is designed to handle multiple pipeline callbacks. It accepts a list of PipelineCallback objects and provides a unified interface for calling all of them. """ def __init__(self, callbacks: List[PipelineCallback]): self.callbacks = callbacks @property def tensor_inputs(self) -> List[str]: return [input for callback in self.callbacks for input in callback.tensor_inputs] def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: """ Calls all the callbacks in order with the given arguments and returns the final callback_kwargs. """ for callback in self.callbacks: callback_kwargs = callback(pipeline, step_index, timestep, callback_kwargs) return callback_kwargs class SDCFGCutoffCallback(PipelineCallback): """ Callback function for Stable Diffusion Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = ["prompt_embeds"] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds return callback_kwargs class SDXLCFGCutoffCallback(PipelineCallback): """ Callback function for the base Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = [ "prompt_embeds", "add_text_embeds", "add_time_ids", ] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. add_text_embeds = callback_kwargs[self.tensor_inputs[1]] add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens add_time_ids = callback_kwargs[self.tensor_inputs[2]] add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds callback_kwargs[self.tensor_inputs[1]] = add_text_embeds callback_kwargs[self.tensor_inputs[2]] = add_time_ids return callback_kwargs class SDXLControlnetCFGCutoffCallback(PipelineCallback): """ Callback function for the Controlnet Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = [ "prompt_embeds", "add_text_embeds", "add_time_ids", "image", ] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. add_text_embeds = callback_kwargs[self.tensor_inputs[1]] add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens add_time_ids = callback_kwargs[self.tensor_inputs[2]] add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector # For Controlnet image = callback_kwargs[self.tensor_inputs[3]] image = image[-1:] pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds callback_kwargs[self.tensor_inputs[1]] = add_text_embeds callback_kwargs[self.tensor_inputs[2]] = add_time_ids callback_kwargs[self.tensor_inputs[3]] = image return callback_kwargs class IPAdapterScaleCutoffCallback(PipelineCallback): """ Callback function for any pipeline that inherits `IPAdapterMixin`. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will set the IP Adapter scale to `0.0`. Note: This callback mutates the IP Adapter attention processors by setting the scale to 0.0 after the cutoff step. """ tensor_inputs = [] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: pipeline.set_ip_adapter_scale(0.0) return callback_kwargs class SD3CFGCutoffCallback(PipelineCallback): """ Callback function for Stable Diffusion 3 Pipelines. After certain number of steps (set by `cutoff_step_ratio` or `cutoff_step_index`), this callback will disable the CFG. Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. """ tensor_inputs = ["prompt_embeds", "pooled_prompt_embeds"] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio cutoff_step = ( cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) ) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. pooled_prompt_embeds = callback_kwargs[self.tensor_inputs[1]] pooled_prompt_embeds = pooled_prompt_embeds[ -1: ] # "-1" denotes the embeddings for conditional pooled text tokens. pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds callback_kwargs[self.tensor_inputs[1]] = pooled_prompt_embeds return callback_kwargs
diffusers/src/diffusers/callbacks.py/0
{ "file_path": "diffusers/src/diffusers/callbacks.py", "repo_id": "diffusers", "token_count": 4021 }
152
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import torch from ..configuration_utils import register_to_config from .guider_utils import BaseGuidance, rescale_noise_cfg if TYPE_CHECKING: from ..modular_pipelines.modular_pipeline import BlockState class ClassifierFreeGuidance(BaseGuidance): """ Classifier-free guidance (CFG): https://huggingface.co/papers/2207.12598 CFG is a technique used to improve generation quality and condition-following in diffusion models. It works by jointly training a model on both conditional and unconditional data, and using a weighted sum of the two during inference. This allows the model to tradeoff between generation quality and sample diversity. The original paper proposes scaling and shifting the conditional distribution based on the difference between conditional and unconditional predictions. [x_pred = x_cond + scale * (x_cond - x_uncond)] Diffusers implemented the scaling and shifting on the unconditional prediction instead based on the [Imagen paper](https://huggingface.co/papers/2205.11487), which is equivalent to what the original paper proposed in theory. [x_pred = x_uncond + scale * (x_cond - x_uncond)] The intution behind the original formulation can be thought of as moving the conditional distribution estimates further away from the unconditional distribution estimates, while the diffusers-native implementation can be thought of as moving the unconditional distribution towards the conditional distribution estimates to get rid of the unconditional predictions (usually negative features like "bad quality, bad anotomy, watermarks", etc.) The `use_original_formulation` argument can be set to `True` to use the original CFG formulation mentioned in the paper. By default, we use the diffusers-native implementation that has been in the codebase for a long time. Args: guidance_scale (`float`, defaults to `7.5`): The scale parameter for classifier-free guidance. Higher values result in stronger conditioning on the text prompt, while lower values allow for more freedom in generation. Higher values may lead to saturation and deterioration of image quality. guidance_rescale (`float`, defaults to `0.0`): The rescale factor applied to the noise predictions. This is used to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). use_original_formulation (`bool`, defaults to `False`): Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default, we use the diffusers-native implementation that has been in the codebase for a long time. See [~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details. start (`float`, defaults to `0.0`): The fraction of the total number of denoising steps after which guidance starts. stop (`float`, defaults to `1.0`): The fraction of the total number of denoising steps after which guidance stops. """ _input_predictions = ["pred_cond", "pred_uncond"] @register_to_config def __init__( self, guidance_scale: float = 7.5, guidance_rescale: float = 0.0, use_original_formulation: bool = False, start: float = 0.0, stop: float = 1.0, ): super().__init__(start, stop) self.guidance_scale = guidance_scale self.guidance_rescale = guidance_rescale self.use_original_formulation = use_original_formulation def prepare_inputs( self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None ) -> List["BlockState"]: if input_fields is None: input_fields = self._input_fields tuple_indices = [0] if self.num_conditions == 1 else [0, 1] data_batches = [] for i in range(self.num_conditions): data_batch = self._prepare_batch(input_fields, data, tuple_indices[i], self._input_predictions[i]) data_batches.append(data_batch) return data_batches def forward(self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None) -> torch.Tensor: pred = None if not self._is_cfg_enabled(): pred = pred_cond else: shift = pred_cond - pred_uncond pred = pred_cond if self.use_original_formulation else pred_uncond pred = pred + self.guidance_scale * shift if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) return pred, {} @property def is_conditional(self) -> bool: return self._count_prepared == 1 @property def num_conditions(self) -> int: num_conditions = 1 if self._is_cfg_enabled(): num_conditions += 1 return num_conditions def _is_cfg_enabled(self) -> bool: if not self._enabled: return False is_within_range = True if self._num_inference_steps is not None: skip_start_step = int(self._start * self._num_inference_steps) skip_stop_step = int(self._stop * self._num_inference_steps) is_within_range = skip_start_step <= self._step < skip_stop_step is_close = False if self.use_original_formulation: is_close = math.isclose(self.guidance_scale, 0.0) else: is_close = math.isclose(self.guidance_scale, 1.0) return is_within_range and not is_close
diffusers/src/diffusers/guiders/classifier_free_guidance.py/0
{ "file_path": "diffusers/src/diffusers/guiders/classifier_free_guidance.py", "repo_id": "diffusers", "token_count": 2248 }
153
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import Optional, Tuple, Type, Union import torch from ..utils import get_logger, is_peft_available, is_peft_version from ._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS from .hooks import HookRegistry, ModelHook logger = get_logger(__name__) # pylint: disable=invalid-name # fmt: off _LAYERWISE_CASTING_HOOK = "layerwise_casting" _PEFT_AUTOCAST_DISABLE_HOOK = "peft_autocast_disable" DEFAULT_SKIP_MODULES_PATTERN = ("pos_embed", "patch_embed", "norm", "^proj_in$", "^proj_out$") # fmt: on _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST = is_peft_available() and is_peft_version(">", "0.14.0") if _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST: from peft.helpers import disable_input_dtype_casting from peft.tuners.tuners_utils import BaseTunerLayer class LayerwiseCastingHook(ModelHook): r""" A hook that casts the weights of a module to a high precision dtype for computation, and to a low precision dtype for storage. This process may lead to quality loss in the output, but can significantly reduce the memory footprint. """ _is_stateful = False def __init__(self, storage_dtype: torch.dtype, compute_dtype: torch.dtype, non_blocking: bool) -> None: self.storage_dtype = storage_dtype self.compute_dtype = compute_dtype self.non_blocking = non_blocking def initialize_hook(self, module: torch.nn.Module): module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking) return module def deinitalize_hook(self, module: torch.nn.Module): raise NotImplementedError( "LayerwiseCastingHook does not support deinitialization. A model once enabled with layerwise casting will " "have casted its weights to a lower precision dtype for storage. Casting this back to the original dtype " "will lead to precision loss, which might have an impact on the model's generation quality. The model should " "be re-initialized and loaded in the original dtype." ) def pre_forward(self, module: torch.nn.Module, *args, **kwargs): module.to(dtype=self.compute_dtype, non_blocking=self.non_blocking) return args, kwargs def post_forward(self, module: torch.nn.Module, output): module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking) return output class PeftInputAutocastDisableHook(ModelHook): r""" A hook that disables the casting of inputs to the module weight dtype during the forward pass. By default, PEFT casts the inputs to the weight dtype of the module, which can lead to precision loss. The reasons for needing this are: - If we don't add PEFT layers' weight names to `skip_modules_pattern` when applying layerwise casting, the inputs will be casted to the, possibly lower precision, storage dtype. Reference: https://github.com/huggingface/peft/blob/0facdebf6208139cbd8f3586875acb378813dd97/src/peft/tuners/lora/layer.py#L706 - We can, on our end, use something like accelerate's `send_to_device` but for dtypes. This way, we can ensure that the inputs are casted to the computation dtype correctly always. However, there are two goals we are hoping to achieve: 1. Making forward implementations independent of device/dtype casting operations as much as possible. 2. Performing inference without losing information from casting to different precisions. With the current PEFT implementation (as linked in the reference above), and assuming running layerwise casting inference with storage_dtype=torch.float8_e4m3fn and compute_dtype=torch.bfloat16, inputs are cast to torch.float8_e4m3fn in the lora layer. We will then upcast back to torch.bfloat16 when we continue the forward pass in PEFT linear forward or Diffusers layer forward, with a `send_to_dtype` operation from LayerwiseCastingHook. This will be a lossy operation and result in poorer generation quality. """ def new_forward(self, module: torch.nn.Module, *args, **kwargs): with disable_input_dtype_casting(module): return self.fn_ref.original_forward(*args, **kwargs) def apply_layerwise_casting( module: torch.nn.Module, storage_dtype: torch.dtype, compute_dtype: torch.dtype, skip_modules_pattern: Union[str, Tuple[str, ...]] = "auto", skip_modules_classes: Optional[Tuple[Type[torch.nn.Module], ...]] = None, non_blocking: bool = False, ) -> None: r""" Applies layerwise casting to a given module. The module expected here is a Diffusers ModelMixin but it can be any nn.Module using diffusers layers or pytorch primitives. Example: ```python >>> import torch >>> from diffusers import CogVideoXTransformer3DModel >>> transformer = CogVideoXTransformer3DModel.from_pretrained( ... model_id, subfolder="transformer", torch_dtype=torch.bfloat16 ... ) >>> apply_layerwise_casting( ... transformer, ... storage_dtype=torch.float8_e4m3fn, ... compute_dtype=torch.bfloat16, ... skip_modules_pattern=["patch_embed", "norm", "proj_out"], ... non_blocking=True, ... ) ``` Args: module (`torch.nn.Module`): The module whose leaf modules will be cast to a high precision dtype for computation, and to a low precision dtype for storage. storage_dtype (`torch.dtype`): The dtype to cast the module to before/after the forward pass for storage. compute_dtype (`torch.dtype`): The dtype to cast the module to during the forward pass for computation. skip_modules_pattern (`Tuple[str, ...]`, defaults to `"auto"`): A list of patterns to match the names of the modules to skip during the layerwise casting process. If set to `"auto"`, the default patterns are used. If set to `None`, no modules are skipped. If set to `None` alongside `skip_modules_classes` being `None`, the layerwise casting is applied directly to the module instead of its internal submodules. skip_modules_classes (`Tuple[Type[torch.nn.Module], ...]`, defaults to `None`): A list of module classes to skip during the layerwise casting process. non_blocking (`bool`, defaults to `False`): If `True`, the weight casting operations are non-blocking. """ if skip_modules_pattern == "auto": skip_modules_pattern = DEFAULT_SKIP_MODULES_PATTERN if skip_modules_classes is None and skip_modules_pattern is None: apply_layerwise_casting_hook(module, storage_dtype, compute_dtype, non_blocking) return _apply_layerwise_casting( module, storage_dtype, compute_dtype, skip_modules_pattern, skip_modules_classes, non_blocking, ) _disable_peft_input_autocast(module) def _apply_layerwise_casting( module: torch.nn.Module, storage_dtype: torch.dtype, compute_dtype: torch.dtype, skip_modules_pattern: Optional[Tuple[str, ...]] = None, skip_modules_classes: Optional[Tuple[Type[torch.nn.Module], ...]] = None, non_blocking: bool = False, _prefix: str = "", ) -> None: should_skip = (skip_modules_classes is not None and isinstance(module, skip_modules_classes)) or ( skip_modules_pattern is not None and any(re.search(pattern, _prefix) for pattern in skip_modules_pattern) ) if should_skip: logger.debug(f'Skipping layerwise casting for layer "{_prefix}"') return if isinstance(module, _GO_LC_SUPPORTED_PYTORCH_LAYERS): logger.debug(f'Applying layerwise casting to layer "{_prefix}"') apply_layerwise_casting_hook(module, storage_dtype, compute_dtype, non_blocking) return for name, submodule in module.named_children(): layer_name = f"{_prefix}.{name}" if _prefix else name _apply_layerwise_casting( submodule, storage_dtype, compute_dtype, skip_modules_pattern, skip_modules_classes, non_blocking, _prefix=layer_name, ) def apply_layerwise_casting_hook( module: torch.nn.Module, storage_dtype: torch.dtype, compute_dtype: torch.dtype, non_blocking: bool ) -> None: r""" Applies a `LayerwiseCastingHook` to a given module. Args: module (`torch.nn.Module`): The module to attach the hook to. storage_dtype (`torch.dtype`): The dtype to cast the module to before the forward pass. compute_dtype (`torch.dtype`): The dtype to cast the module to during the forward pass. non_blocking (`bool`): If `True`, the weight casting operations are non-blocking. """ registry = HookRegistry.check_if_exists_or_initialize(module) hook = LayerwiseCastingHook(storage_dtype, compute_dtype, non_blocking) registry.register_hook(hook, _LAYERWISE_CASTING_HOOK) def _is_layerwise_casting_active(module: torch.nn.Module) -> bool: for submodule in module.modules(): if ( hasattr(submodule, "_diffusers_hook") and submodule._diffusers_hook.get_hook(_LAYERWISE_CASTING_HOOK) is not None ): return True return False def _disable_peft_input_autocast(module: torch.nn.Module) -> None: if not _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST: return for submodule in module.modules(): if isinstance(submodule, BaseTunerLayer) and _is_layerwise_casting_active(submodule): registry = HookRegistry.check_if_exists_or_initialize(submodule) hook = PeftInputAutocastDisableHook() registry.register_hook(hook, _PEFT_AUTOCAST_DISABLE_HOOK)
diffusers/src/diffusers/hooks/layerwise_casting.py/0
{ "file_path": "diffusers/src/diffusers/hooks/layerwise_casting.py", "repo_id": "diffusers", "token_count": 3938 }
154
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import nullcontext from typing import Dict from ..models.attention_processor import SD3IPAdapterJointAttnProcessor2_0 from ..models.embeddings import IPAdapterTimeImageProjection from ..models.model_loading_utils import load_model_dict_into_meta from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT from ..utils import is_accelerate_available, is_torch_version, logging from ..utils.torch_utils import empty_device_cache logger = logging.get_logger(__name__) class SD3Transformer2DLoadersMixin: """Load IP-Adapters and LoRA layers into a `[SD3Transformer2DModel]`.""" def _convert_ip_adapter_attn_to_diffusers( self, state_dict: Dict, low_cpu_mem_usage: bool = _LOW_CPU_MEM_USAGE_DEFAULT ) -> Dict: if low_cpu_mem_usage: if is_accelerate_available(): from accelerate import init_empty_weights else: low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) # IP-Adapter cross attention parameters hidden_size = self.config.attention_head_dim * self.config.num_attention_heads ip_hidden_states_dim = self.config.attention_head_dim * self.config.num_attention_heads timesteps_emb_dim = state_dict["0.norm_ip.linear.weight"].shape[1] # Dict where key is transformer layer index, value is attention processor's state dict # ip_adapter state dict keys example: "0.norm_ip.linear.weight" layer_state_dict = {idx: {} for idx in range(len(self.attn_processors))} for key, weights in state_dict.items(): idx, name = key.split(".", maxsplit=1) layer_state_dict[int(idx)][name] = weights # Create IP-Adapter attention processor & load state_dict attn_procs = {} init_context = init_empty_weights if low_cpu_mem_usage else nullcontext for idx, name in enumerate(self.attn_processors.keys()): with init_context(): attn_procs[name] = SD3IPAdapterJointAttnProcessor2_0( hidden_size=hidden_size, ip_hidden_states_dim=ip_hidden_states_dim, head_dim=self.config.attention_head_dim, timesteps_emb_dim=timesteps_emb_dim, ) if not low_cpu_mem_usage: attn_procs[name].load_state_dict(layer_state_dict[idx], strict=True) else: device_map = {"": self.device} load_model_dict_into_meta( attn_procs[name], layer_state_dict[idx], device_map=device_map, dtype=self.dtype ) empty_device_cache() return attn_procs def _convert_ip_adapter_image_proj_to_diffusers( self, state_dict: Dict, low_cpu_mem_usage: bool = _LOW_CPU_MEM_USAGE_DEFAULT ) -> IPAdapterTimeImageProjection: if low_cpu_mem_usage: if is_accelerate_available(): from accelerate import init_empty_weights else: low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) init_context = init_empty_weights if low_cpu_mem_usage else nullcontext # Convert to diffusers updated_state_dict = {} for key, value in state_dict.items(): # InstantX/SD3.5-Large-IP-Adapter if key.startswith("layers."): idx = key.split(".")[1] key = key.replace(f"layers.{idx}.0.norm1", f"layers.{idx}.ln0") key = key.replace(f"layers.{idx}.0.norm2", f"layers.{idx}.ln1") key = key.replace(f"layers.{idx}.0.to_q", f"layers.{idx}.attn.to_q") key = key.replace(f"layers.{idx}.0.to_kv", f"layers.{idx}.attn.to_kv") key = key.replace(f"layers.{idx}.0.to_out", f"layers.{idx}.attn.to_out.0") key = key.replace(f"layers.{idx}.1.0", f"layers.{idx}.adaln_norm") key = key.replace(f"layers.{idx}.1.1", f"layers.{idx}.ff.net.0.proj") key = key.replace(f"layers.{idx}.1.3", f"layers.{idx}.ff.net.2") key = key.replace(f"layers.{idx}.2.1", f"layers.{idx}.adaln_proj") updated_state_dict[key] = value # Image projection parameters embed_dim = updated_state_dict["proj_in.weight"].shape[1] output_dim = updated_state_dict["proj_out.weight"].shape[0] hidden_dim = updated_state_dict["proj_in.weight"].shape[0] heads = updated_state_dict["layers.0.attn.to_q.weight"].shape[0] // 64 num_queries = updated_state_dict["latents"].shape[1] timestep_in_dim = updated_state_dict["time_embedding.linear_1.weight"].shape[1] # Image projection with init_context(): image_proj = IPAdapterTimeImageProjection( embed_dim=embed_dim, output_dim=output_dim, hidden_dim=hidden_dim, heads=heads, num_queries=num_queries, timestep_in_dim=timestep_in_dim, ) if not low_cpu_mem_usage: image_proj.load_state_dict(updated_state_dict, strict=True) else: device_map = {"": self.device} load_model_dict_into_meta(image_proj, updated_state_dict, device_map=device_map, dtype=self.dtype) empty_device_cache() return image_proj def _load_ip_adapter_weights(self, state_dict: Dict, low_cpu_mem_usage: bool = _LOW_CPU_MEM_USAGE_DEFAULT) -> None: """Sets IP-Adapter attention processors, image projection, and loads state_dict. Args: state_dict (`Dict`): State dict with keys "ip_adapter", which contains parameters for attention processors, and "image_proj", which contains parameters for image projection net. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. """ attn_procs = self._convert_ip_adapter_attn_to_diffusers(state_dict["ip_adapter"], low_cpu_mem_usage) self.set_attn_processor(attn_procs) self.image_proj = self._convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"], low_cpu_mem_usage)
diffusers/src/diffusers/loaders/transformer_sd3.py/0
{ "file_path": "diffusers/src/diffusers/loaders/transformer_sd3.py", "repo_id": "diffusers", "token_count": 3903 }
155
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import deprecate from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0, ) from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalModelMixin, PeftAdapterMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without losing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix mid_block_add_attention (`bool`, *optional*, default to `True`): If enabled, the mid_block of the Encoder and Decoder will have attention blocks. If set to false, the mid_block will only have resnet blocks """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D"] @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: Tuple[str] = ("DownEncoderBlock2D",), up_block_types: Tuple[str] = ("UpDecoderBlock2D",), block_out_channels: Tuple[int] = (64,), layers_per_block: int = 1, act_fn: str = "silu", latent_channels: int = 4, norm_num_groups: int = 32, sample_size: int = 32, scaling_factor: float = 0.18215, shift_factor: Optional[float] = None, latents_mean: Optional[Tuple[float]] = None, latents_std: Optional[Tuple[float]] = None, force_upcast: bool = True, use_quant_conv: bool = True, use_post_quant_conv: bool = True, mid_block_add_attention: bool = True, ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=True, mid_block_add_attention=mid_block_add_attention, ) # pass init params to Decoder self.decoder = Decoder( in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, mid_block_add_attention=mid_block_add_attention, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) if use_quant_conv else None self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) if use_post_quant_conv else None self.use_slicing = False self.use_tiling = False # only relevant if vae tiling is enabled self.tile_sample_min_size = self.config.sample_size sample_size = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size ) self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) self.tile_overlap_factor = 0.25 def enable_tiling(self, use_tiling: bool = True): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.use_tiling = use_tiling def disable_tiling(self): r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.enable_tiling(False) def enable_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self): r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _encode(self, x: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = x.shape if self.use_tiling and (width > self.tile_sample_min_size or height > self.tile_sample_min_size): return self._tiled_encode(x) enc = self.encoder(x) if self.quant_conv is not None: enc = self.quant_conv(enc) return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(z, return_dict=return_dict) if self.post_quant_conv is not None: z = self.post_quant_conv(z) dec = self.decoder(z) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode( self, z: torch.FloatTensor, return_dict: bool = True, generator=None ) -> Union[DecoderOutput, torch.FloatTensor]: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[2], b.shape[2], blend_extent) for y in range(blend_extent): b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for x in range(blend_extent): b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _tiled_encode(self, x: torch.Tensor) -> torch.Tensor: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.Tensor`): Input batch of images. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) if self.config.use_quant_conv: tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) enc = torch.cat(result_rows, dim=2) return enc def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> AutoencoderKLOutput: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`: If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ deprecation_message = ( "The tiled_encode implementation supporting the `return_dict` parameter is deprecated. In the future, the " "implementation of this method will be replaced with that of `_tiled_encode` and you will no longer be able " "to pass `return_dict`. You will also have to create a `DiagonalGaussianDistribution()` from the returned value." ) deprecate("tiled_encode", "1.0.0", deprecation_message, standard_warn=False) overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) if self.config.use_quant_conv: tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) moments = torch.cat(result_rows, dim=2) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) row_limit = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, z.shape[2], overlap_size): row = [] for j in range(0, z.shape[3], overlap_size): tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] if self.config.use_post_quant_conv: tile = self.post_quant_conv(tile) decoded = self.decoder(tile) row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) dec = torch.cat(result_rows, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[DecoderOutput, torch.Tensor]: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py", "repo_id": "diffusers", "token_count": 10722 }
156
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import contextmanager from ..utils.logging import get_logger logger = get_logger(__name__) # pylint: disable=invalid-name class CacheMixin: r""" A class for enable/disabling caching techniques on diffusion models. Supported caching techniques: - [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) - [FasterCache](https://huggingface.co/papers/2410.19355) - [FirstBlockCache](https://github.com/chengzeyi/ParaAttention/blob/7a266123671b55e7e5a2fe9af3121f07a36afc78/README.md#first-block-cache-our-dynamic-caching) """ _cache_config = None @property def is_cache_enabled(self) -> bool: return self._cache_config is not None def enable_cache(self, config) -> None: r""" Enable caching techniques on the model. Args: config (`Union[PyramidAttentionBroadcastConfig]`): The configuration for applying the caching technique. Currently supported caching techniques are: - [`~hooks.PyramidAttentionBroadcastConfig`] Example: ```python >>> import torch >>> from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig >>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> config = PyramidAttentionBroadcastConfig( ... spatial_attention_block_skip_range=2, ... spatial_attention_timestep_skip_range=(100, 800), ... current_timestep_callback=lambda: pipe.current_timestep, ... ) >>> pipe.transformer.enable_cache(config) ``` """ from ..hooks import ( FasterCacheConfig, FirstBlockCacheConfig, PyramidAttentionBroadcastConfig, apply_faster_cache, apply_first_block_cache, apply_pyramid_attention_broadcast, ) if self.is_cache_enabled: raise ValueError( f"Caching has already been enabled with {type(self._cache_config)}. To apply a new caching technique, please disable the existing one first." ) if isinstance(config, FasterCacheConfig): apply_faster_cache(self, config) elif isinstance(config, FirstBlockCacheConfig): apply_first_block_cache(self, config) elif isinstance(config, PyramidAttentionBroadcastConfig): apply_pyramid_attention_broadcast(self, config) else: raise ValueError(f"Cache config {type(config)} is not supported.") self._cache_config = config def disable_cache(self) -> None: from ..hooks import FasterCacheConfig, FirstBlockCacheConfig, HookRegistry, PyramidAttentionBroadcastConfig from ..hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK from ..hooks.first_block_cache import _FBC_BLOCK_HOOK, _FBC_LEADER_BLOCK_HOOK from ..hooks.pyramid_attention_broadcast import _PYRAMID_ATTENTION_BROADCAST_HOOK if self._cache_config is None: logger.warning("Caching techniques have not been enabled, so there's nothing to disable.") return registry = HookRegistry.check_if_exists_or_initialize(self) if isinstance(self._cache_config, FasterCacheConfig): registry.remove_hook(_FASTER_CACHE_DENOISER_HOOK, recurse=True) registry.remove_hook(_FASTER_CACHE_BLOCK_HOOK, recurse=True) elif isinstance(self._cache_config, FirstBlockCacheConfig): registry.remove_hook(_FBC_LEADER_BLOCK_HOOK, recurse=True) registry.remove_hook(_FBC_BLOCK_HOOK, recurse=True) elif isinstance(self._cache_config, PyramidAttentionBroadcastConfig): registry.remove_hook(_PYRAMID_ATTENTION_BROADCAST_HOOK, recurse=True) else: raise ValueError(f"Cache config {type(self._cache_config)} is not supported.") self._cache_config = None def _reset_stateful_cache(self, recurse: bool = True) -> None: from ..hooks import HookRegistry HookRegistry.check_if_exists_or_initialize(self).reset_stateful_hooks(recurse=recurse) @contextmanager def cache_context(self, name: str): r"""Context manager that provides additional methods for cache management.""" from ..hooks import HookRegistry registry = HookRegistry.check_if_exists_or_initialize(self) registry._set_context(name) yield registry._set_context(None)
diffusers/src/diffusers/models/cache_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/cache_utils.py", "repo_id": "diffusers", "token_count": 2061 }
157
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...utils import logging from ..controlnets.controlnet import ControlNetModel, ControlNetOutput from ..modeling_utils import ModelMixin logger = logging.get_logger(__name__) class MultiControlNetModel(ModelMixin): r""" Multiple `ControlNetModel` wrapper class for Multi-ControlNet This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be compatible with `ControlNetModel`. Args: controlnets (`List[ControlNetModel]`): Provides additional conditioning to the unet during the denoising process. You must set multiple `ControlNetModel` as a list. """ def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]): super().__init__() self.nets = nn.ModuleList(controlnets) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: List[torch.tensor], conditioning_scale: List[float], class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): down_samples, mid_sample = controlnet( sample=sample, timestep=timestep, encoder_hidden_states=encoder_hidden_states, controlnet_cond=image, conditioning_scale=scale, class_labels=class_labels, timestep_cond=timestep_cond, attention_mask=attention_mask, added_cond_kwargs=added_cond_kwargs, cross_attention_kwargs=cross_attention_kwargs, guess_mode=guess_mode, return_dict=return_dict, ) # merge samples if i == 0: down_block_res_samples, mid_block_res_sample = down_samples, mid_sample else: down_block_res_samples = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(down_block_res_samples, down_samples) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, save_function: Callable = None, safe_serialization: bool = True, variant: Optional[str] = None, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `[`~models.controlnets.multicontrolnet.MultiControlNetModel.from_pretrained`]` class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model.<variant>.bin. """ for idx, controlnet in enumerate(self.nets): suffix = "" if idx == 0 else f"_{idx}" controlnet.save_pretrained( save_directory + suffix, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant, ) @classmethod def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_path (`os.PathLike`): A path to a *directory* containing model weights saved using [`~models.controlnets.multicontrolnet.MultiControlNetModel.save_pretrained`], e.g., `./my_model_directory/controlnet`. torch_dtype (`torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading by not initializing the weights and only loading the pre-trained weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, setting this argument to `True` will raise an error. variant (`str`, *optional*): If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is ignored when using `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from `safetensors` weights. If set to `False`, loading will *not* use `safetensors`. """ idx = 0 controlnets = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... model_path_to_load = pretrained_model_path while os.path.isdir(model_path_to_load): controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs) controlnets.append(controlnet) idx += 1 model_path_to_load = pretrained_model_path + f"_{idx}" logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.") if len(controlnets) == 0: raise ValueError( f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." ) return cls(controlnets)
diffusers/src/diffusers/models/controlnets/multicontrolnet.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnets/multicontrolnet.py", "repo_id": "diffusers", "token_count": 3836 }
158
# Copyright 2025 AuraFlow Authors, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph from ..attention_processor import ( Attention, AttentionProcessor, AuraFlowAttnProcessor2_0, FusedAuraFlowAttnProcessor2_0, ) from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormZero, FP32LayerNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Taken from the original aura flow inference code. def find_multiple(n: int, k: int) -> int: if n % k == 0: return n return n + k - (n % k) # Aura Flow patch embed doesn't use convs for projections. # Additionally, it uses learned positional embeddings. class AuraFlowPatchEmbed(nn.Module): def __init__( self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, pos_embed_max_size=None, ): super().__init__() self.num_patches = (height // patch_size) * (width // patch_size) self.pos_embed_max_size = pos_embed_max_size self.proj = nn.Linear(patch_size * patch_size * in_channels, embed_dim) self.pos_embed = nn.Parameter(torch.randn(1, pos_embed_max_size, embed_dim) * 0.1) self.patch_size = patch_size self.height, self.width = height // patch_size, width // patch_size self.base_size = height // patch_size def pe_selection_index_based_on_dim(self, h, w): # select subset of positional embedding based on H, W, where H, W is size of latent # PE will be viewed as 2d-grid, and H/p x W/p of the PE will be selected # because original input are in flattened format, we have to flatten this 2d grid as well. h_p, w_p = h // self.patch_size, w // self.patch_size h_max, w_max = int(self.pos_embed_max_size**0.5), int(self.pos_embed_max_size**0.5) # Calculate the top-left corner indices for the centered patch grid starth = h_max // 2 - h_p // 2 startw = w_max // 2 - w_p // 2 # Generate the row and column indices for the desired patch grid rows = torch.arange(starth, starth + h_p, device=self.pos_embed.device) cols = torch.arange(startw, startw + w_p, device=self.pos_embed.device) # Create a 2D grid of indices row_indices, col_indices = torch.meshgrid(rows, cols, indexing="ij") # Convert the 2D grid indices to flattened 1D indices selected_indices = (row_indices * w_max + col_indices).flatten() return selected_indices def forward(self, latent): batch_size, num_channels, height, width = latent.size() latent = latent.view( batch_size, num_channels, height // self.patch_size, self.patch_size, width // self.patch_size, self.patch_size, ) latent = latent.permute(0, 2, 4, 1, 3, 5).flatten(-3).flatten(1, 2) latent = self.proj(latent) pe_index = self.pe_selection_index_based_on_dim(height, width) return latent + self.pos_embed[:, pe_index] # Taken from the original Aura flow inference code. # Our feedforward only has GELU but Aura uses SiLU. class AuraFlowFeedForward(nn.Module): def __init__(self, dim, hidden_dim=None) -> None: super().__init__() if hidden_dim is None: hidden_dim = 4 * dim final_hidden_dim = int(2 * hidden_dim / 3) final_hidden_dim = find_multiple(final_hidden_dim, 256) self.linear_1 = nn.Linear(dim, final_hidden_dim, bias=False) self.linear_2 = nn.Linear(dim, final_hidden_dim, bias=False) self.out_projection = nn.Linear(final_hidden_dim, dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = F.silu(self.linear_1(x)) * self.linear_2(x) x = self.out_projection(x) return x class AuraFlowPreFinalBlock(nn.Module): def __init__(self, embedding_dim: int, conditioning_embedding_dim: int): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=False) def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) scale, shift = torch.chunk(emb, 2, dim=1) x = x * (1 + scale)[:, None, :] + shift[:, None, :] return x @maybe_allow_in_graph class AuraFlowSingleTransformerBlock(nn.Module): """Similar to `AuraFlowJointTransformerBlock` with a single DiT instead of an MMDiT.""" def __init__(self, dim, num_attention_heads, attention_head_dim): super().__init__() self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm") processor = AuraFlowAttnProcessor2_0() self.attn = Attention( query_dim=dim, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm="fp32_layer_norm", out_dim=dim, bias=False, out_bias=False, processor=processor, ) self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff = AuraFlowFeedForward(dim, dim * 4) def forward( self, hidden_states: torch.FloatTensor, temb: torch.FloatTensor, attention_kwargs: Optional[Dict[str, Any]] = None, ): residual = hidden_states attention_kwargs = attention_kwargs or {} # Norm + Projection. norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # Attention. attn_output = self.attn(hidden_states=norm_hidden_states, **attention_kwargs) # Process attention outputs for the `hidden_states`. hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output) hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(hidden_states) hidden_states = gate_mlp.unsqueeze(1) * ff_output hidden_states = residual + hidden_states return hidden_states @maybe_allow_in_graph class AuraFlowJointTransformerBlock(nn.Module): r""" Transformer block for Aura Flow. Similar to SD3 MMDiT. Differences (non-exhaustive): * QK Norm in the attention blocks * No bias in the attention blocks * Most LayerNorms are in FP32 Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. is_last (`bool`): Boolean to determine if this is the last block in the model. """ def __init__(self, dim, num_attention_heads, attention_head_dim): super().__init__() self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm") self.norm1_context = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm") processor = AuraFlowAttnProcessor2_0() self.attn = Attention( query_dim=dim, cross_attention_dim=None, added_kv_proj_dim=dim, added_proj_bias=False, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm="fp32_layer_norm", out_dim=dim, bias=False, out_bias=False, processor=processor, context_pre_only=False, ) self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff = AuraFlowFeedForward(dim, dim * 4) self.norm2_context = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff_context = AuraFlowFeedForward(dim, dim * 4) def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor, attention_kwargs: Optional[Dict[str, Any]] = None, ): residual = hidden_states residual_context = encoder_hidden_states attention_kwargs = attention_kwargs or {} # Norm + Projection. norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) # Attention. attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, **attention_kwargs, ) # Process attention outputs for the `hidden_states`. hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output) hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] hidden_states = gate_mlp.unsqueeze(1) * self.ff(hidden_states) hidden_states = residual + hidden_states # Process attention outputs for the `encoder_hidden_states`. encoder_hidden_states = self.norm2_context(residual_context + c_gate_msa.unsqueeze(1) * context_attn_output) encoder_hidden_states = encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] encoder_hidden_states = c_gate_mlp.unsqueeze(1) * self.ff_context(encoder_hidden_states) encoder_hidden_states = residual_context + encoder_hidden_states return encoder_hidden_states, hidden_states class AuraFlowTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): r""" A 2D Transformer model as introduced in AuraFlow (https://blog.fal.ai/auraflow/). Parameters: sample_size (`int`): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. patch_size (`int`): Patch size to turn the input data into small patches. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input. num_mmdit_layers (`int`, *optional*, defaults to 4): The number of layers of MMDiT Transformer blocks to use. num_single_dit_layers (`int`, *optional*, defaults to 32): The number of layers of Transformer blocks to use. These blocks use concatenated image and text representations. attention_head_dim (`int`, *optional*, defaults to 256): The number of channels in each head. num_attention_heads (`int`, *optional*, defaults to 12): The number of heads to use for multi-head attention. joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. caption_projection_dim (`int`): Number of dimensions to use when projecting the `encoder_hidden_states`. out_channels (`int`, defaults to 4): Number of output channels. pos_embed_max_size (`int`, defaults to 1024): Maximum positions to embed from the image latents. """ _no_split_modules = ["AuraFlowJointTransformerBlock", "AuraFlowSingleTransformerBlock", "AuraFlowPatchEmbed"] _skip_layerwise_casting_patterns = ["pos_embed", "norm"] _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: int = 64, patch_size: int = 2, in_channels: int = 4, num_mmdit_layers: int = 4, num_single_dit_layers: int = 32, attention_head_dim: int = 256, num_attention_heads: int = 12, joint_attention_dim: int = 2048, caption_projection_dim: int = 3072, out_channels: int = 4, pos_embed_max_size: int = 1024, ): super().__init__() default_out_channels = in_channels self.out_channels = out_channels if out_channels is not None else default_out_channels self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.pos_embed = AuraFlowPatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, pos_embed_max_size=pos_embed_max_size, ) self.context_embedder = nn.Linear( self.config.joint_attention_dim, self.config.caption_projection_dim, bias=False ) self.time_step_embed = Timesteps(num_channels=256, downscale_freq_shift=0, scale=1000, flip_sin_to_cos=True) self.time_step_proj = TimestepEmbedding(in_channels=256, time_embed_dim=self.inner_dim) self.joint_transformer_blocks = nn.ModuleList( [ AuraFlowJointTransformerBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim, ) for i in range(self.config.num_mmdit_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ AuraFlowSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim, ) for _ in range(self.config.num_single_dit_layers) ] ) self.norm_out = AuraFlowPreFinalBlock(self.inner_dim, self.inner_dim) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=False) # https://huggingface.co/papers/2309.16588 # prevents artifacts in the attention maps self.register_tokens = nn.Parameter(torch.randn(1, 8, self.inner_dim) * 0.02) self.gradient_checkpointing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedAuraFlowAttnProcessor2_0 def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAuraFlowAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, timestep: torch.LongTensor = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) height, width = hidden_states.shape[-2:] # Apply patch embedding, timestep embedding, and project the caption embeddings. hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too. temb = self.time_step_embed(timestep).to(dtype=next(self.parameters()).dtype) temb = self.time_step_proj(temb) encoder_hidden_states = self.context_embedder(encoder_hidden_states) encoder_hidden_states = torch.cat( [self.register_tokens.repeat(encoder_hidden_states.size(0), 1, 1), encoder_hidden_states], dim=1 ) # MMDiT blocks. for index_block, block in enumerate(self.joint_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, attention_kwargs=attention_kwargs, ) # Single DiT blocks that combine the `hidden_states` (image) and `encoder_hidden_states` (text) if len(self.single_transformer_blocks) > 0: encoder_seq_len = encoder_hidden_states.size(1) combined_hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: combined_hidden_states = self._gradient_checkpointing_func( block, combined_hidden_states, temb, ) else: combined_hidden_states = block( hidden_states=combined_hidden_states, temb=temb, attention_kwargs=attention_kwargs ) hidden_states = combined_hidden_states[:, encoder_seq_len:] hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) # unpatchify patch_size = self.config.patch_size out_channels = self.config.out_channels height = height // patch_size width = width // patch_size hidden_states = hidden_states.reshape( shape=(hidden_states.shape[0], height, width, patch_size, patch_size, out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(hidden_states.shape[0], out_channels, height * patch_size, width * patch_size) ) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py", "repo_id": "diffusers", "token_count": 10089 }
159
# Copyright 2025 Black Forest Labs, The HuggingFace Team and loadstone-rock . All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.import_utils import is_torch_npu_available from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionMixin, FeedForward from ..cache_utils import CacheMixin from ..embeddings import FluxPosEmbed, PixArtAlphaTextProjection, Timesteps, get_timestep_embedding from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import CombinedTimestepLabelEmbeddings, FP32LayerNorm, RMSNorm from .transformer_flux import FluxAttention, FluxAttnProcessor logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ChromaAdaLayerNormZeroPruned(nn.Module): r""" Norm layer adaptive layer norm zero (adaLN-Zero). Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, num_embeddings: Optional[int] = None, norm_type="layer_norm", bias=True): super().__init__() if num_embeddings is not None: self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim) else: self.emb = None if norm_type == "layer_norm": self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) elif norm_type == "fp32_layer_norm": self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=False, bias=False) else: raise ValueError( f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'." ) def forward( self, x: torch.Tensor, timestep: Optional[torch.Tensor] = None, class_labels: Optional[torch.LongTensor] = None, hidden_dtype: Optional[torch.dtype] = None, emb: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: if self.emb is not None: emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.flatten(1, 2).chunk(6, dim=1) x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class ChromaAdaLayerNormZeroSinglePruned(nn.Module): r""" Norm layer adaptive layer norm zero (adaLN-Zero). Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True): super().__init__() if norm_type == "layer_norm": self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) else: raise ValueError( f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'." ) def forward( self, x: torch.Tensor, emb: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: shift_msa, scale_msa, gate_msa = emb.flatten(1, 2).chunk(3, dim=1) x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa class ChromaAdaLayerNormContinuousPruned(nn.Module): r""" Adaptive normalization layer with a norm layer (layer_norm or rms_norm). Args: embedding_dim (`int`): Embedding dimension to use during projection. conditioning_embedding_dim (`int`): Dimension of the input condition. elementwise_affine (`bool`, defaults to `True`): Boolean flag to denote if affine transformation should be applied. eps (`float`, defaults to 1e-5): Epsilon factor. bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use. norm_type (`str`, defaults to `"layer_norm"`): Normalization layer to use. Values supported: "layer_norm", "rms_norm". """ def __init__( self, embedding_dim: int, conditioning_embedding_dim: int, # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters # because the output is immediately scaled and shifted by the projected conditioning embeddings. # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters. # However, this is how it was implemented in the original code, and it's rather likely you should # set `elementwise_affine` to False. elementwise_affine=True, eps=1e-5, bias=True, norm_type="layer_norm", ): super().__init__() if norm_type == "layer_norm": self.norm = nn.LayerNorm(embedding_dim, eps, elementwise_affine, bias) elif norm_type == "rms_norm": self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) else: raise ValueError(f"unknown norm_type {norm_type}") def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: # convert back to the original dtype in case `conditioning_embedding`` is upcasted to float32 (needed for hunyuanDiT) shift, scale = torch.chunk(emb.flatten(1, 2).to(x.dtype), 2, dim=1) x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] return x class ChromaCombinedTimestepTextProjEmbeddings(nn.Module): def __init__(self, num_channels: int, out_dim: int): super().__init__() self.time_proj = Timesteps(num_channels=num_channels, flip_sin_to_cos=True, downscale_freq_shift=0) self.guidance_proj = Timesteps(num_channels=num_channels, flip_sin_to_cos=True, downscale_freq_shift=0) self.register_buffer( "mod_proj", get_timestep_embedding( torch.arange(out_dim) * 1000, 2 * num_channels, flip_sin_to_cos=True, downscale_freq_shift=0 ), persistent=False, ) def forward(self, timestep: torch.Tensor) -> torch.Tensor: mod_index_length = self.mod_proj.shape[0] batch_size = timestep.shape[0] timesteps_proj = self.time_proj(timestep).to(dtype=timestep.dtype) guidance_proj = self.guidance_proj(torch.tensor([0] * batch_size)).to( dtype=timestep.dtype, device=timestep.device ) mod_proj = self.mod_proj.to(dtype=timesteps_proj.dtype, device=timesteps_proj.device).repeat(batch_size, 1, 1) timestep_guidance = ( torch.cat([timesteps_proj, guidance_proj], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1) ) input_vec = torch.cat([timestep_guidance, mod_proj], dim=-1) return input_vec.to(timestep.dtype) class ChromaApproximator(nn.Module): def __init__(self, in_dim: int, out_dim: int, hidden_dim: int, n_layers: int = 5): super().__init__() self.in_proj = nn.Linear(in_dim, hidden_dim, bias=True) self.layers = nn.ModuleList( [PixArtAlphaTextProjection(hidden_dim, hidden_dim, act_fn="silu") for _ in range(n_layers)] ) self.norms = nn.ModuleList([nn.RMSNorm(hidden_dim) for _ in range(n_layers)]) self.out_proj = nn.Linear(hidden_dim, out_dim) def forward(self, x): x = self.in_proj(x) for layer, norms in zip(self.layers, self.norms): x = x + layer(norms(x)) return self.out_proj(x) @maybe_allow_in_graph class ChromaSingleTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0, ): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm = ChromaAdaLayerNormZeroSinglePruned(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) if is_torch_npu_available(): from ..attention_processor import FluxAttnProcessor2_0_NPU deprecation_message = ( "Defaulting to FluxAttnProcessor2_0_NPU for NPU devices will be removed. Attention processors " "should be set explicitly using the `set_attn_processor` method." ) deprecate("npu_processor", "0.34.0", deprecation_message) processor = FluxAttnProcessor2_0_NPU() else: processor = FluxAttnProcessor() self.attn = FluxAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=processor, eps=1e-6, pre_only=True, ) def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.Tensor: residual = hidden_states norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) joint_attention_kwargs = joint_attention_kwargs or {} if attention_mask is not None: attention_mask = attention_mask[:, None, None, :] * attention_mask[:, None, :, None] attn_output = self.attn( hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb, attention_mask=attention_mask, **joint_attention_kwargs, ) hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) gate = gate.unsqueeze(1) hidden_states = gate * self.proj_out(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) return hidden_states @maybe_allow_in_graph class ChromaTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6, ): super().__init__() self.norm1 = ChromaAdaLayerNormZeroPruned(dim) self.norm1_context = ChromaAdaLayerNormZeroPruned(dim) self.attn = FluxAttention( query_dim=dim, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=FluxAttnProcessor(), eps=eps, ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: temb_img, temb_txt = temb[:, :6], temb[:, 6:] norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb_img) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb_txt ) joint_attention_kwargs = joint_attention_kwargs or {} if attention_mask is not None: attention_mask = attention_mask[:, None, None, :] * attention_mask[:, None, :, None] # Attention. attention_outputs = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, attention_mask=attention_mask, **joint_attention_kwargs, ) if len(attention_outputs) == 2: attn_output, context_attn_output = attention_outputs elif len(attention_outputs) == 3: attn_output, context_attn_output, ip_attn_output = attention_outputs # Process attention outputs for the `hidden_states`. attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output if len(attention_outputs) == 3: hidden_states = hidden_states + ip_attn_output # Process attention outputs for the `encoder_hidden_states`. context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states class ChromaTransformer2DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, FluxTransformer2DLoadersMixin, CacheMixin, AttentionMixin, ): """ The Transformer model introduced in Flux, modified for Chroma. Reference: https://huggingface.co/lodestones/Chroma Args: patch_size (`int`, defaults to `1`): Patch size to turn the input data into small patches. in_channels (`int`, defaults to `64`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `None`): The number of channels in the output. If not specified, it defaults to `in_channels`. num_layers (`int`, defaults to `19`): The number of layers of dual stream DiT blocks to use. num_single_layers (`int`, defaults to `38`): The number of layers of single stream DiT blocks to use. attention_head_dim (`int`, defaults to `128`): The number of dimensions to use for each attention head. num_attention_heads (`int`, defaults to `24`): The number of attention heads to use. joint_attention_dim (`int`, defaults to `4096`): The number of dimensions to use for the joint attention (embedding/channel dimension of `encoder_hidden_states`). axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`): The dimensions to use for the rotary positional embeddings. """ _supports_gradient_checkpointing = True _no_split_modules = ["ChromaTransformerBlock", "ChromaSingleTransformerBlock"] _repeated_blocks = ["ChromaTransformerBlock", "ChromaSingleTransformerBlock"] _skip_layerwise_casting_patterns = ["pos_embed", "norm"] @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, out_channels: Optional[int] = None, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, axes_dims_rope: Tuple[int, ...] = (16, 56, 56), approximator_num_channels: int = 64, approximator_hidden_dim: int = 5120, approximator_layers: int = 5, ): super().__init__() self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) self.time_text_embed = ChromaCombinedTimestepTextProjEmbeddings( num_channels=approximator_num_channels // 4, out_dim=3 * num_single_layers + 2 * 6 * num_layers + 2, ) self.distilled_guidance_layer = ChromaApproximator( in_dim=approximator_num_channels, out_dim=self.inner_dim, hidden_dim=approximator_hidden_dim, n_layers=approximator_layers, ) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ ChromaTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ ChromaSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_single_layers) ] ) self.norm_out = ChromaAdaLayerNormContinuousPruned( self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6 ) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, attention_mask: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_block_samples=None, controlnet_single_block_samples=None, return_dict: bool = True, controlnet_blocks_repeat: bool = False, ) -> Union[torch.Tensor, Transformer2DModelOutput]: """ The [`FluxTransformer2DModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): Input `hidden_states`. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) * 1000 input_vec = self.time_text_embed(timestep) pooled_temb = self.distilled_guidance_layer(input_vec) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if txt_ids.ndim == 3: logger.warning( "Passing `txt_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning( "Passing `img_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs: ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds") ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds) joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states}) for index_block, block in enumerate(self.transformer_blocks): img_offset = 3 * len(self.single_transformer_blocks) txt_offset = img_offset + 6 * len(self.transformer_blocks) img_modulation = img_offset + 6 * index_block text_modulation = txt_offset + 6 * index_block temb = torch.cat( ( pooled_temb[:, img_modulation : img_modulation + 6], pooled_temb[:, text_modulation : text_modulation + 6], ), dim=1, ) if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, attention_mask ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, attention_mask=attention_mask, joint_attention_kwargs=joint_attention_kwargs, ) # controlnet residual if controlnet_block_samples is not None: interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) interval_control = int(np.ceil(interval_control)) # For Xlabs ControlNet. if controlnet_blocks_repeat: hidden_states = ( hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)] ) else: hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for index_block, block in enumerate(self.single_transformer_blocks): start_idx = 3 * index_block temb = pooled_temb[:, start_idx : start_idx + 3] if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, temb, image_rotary_emb, ) else: hidden_states = block( hidden_states=hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, attention_mask=attention_mask, joint_attention_kwargs=joint_attention_kwargs, ) # controlnet residual if controlnet_single_block_samples is not None: interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) interval_control = int(np.ceil(interval_control)) hidden_states[:, encoder_hidden_states.shape[1] :, ...] = ( hidden_states[:, encoder_hidden_states.shape[1] :, ...] + controlnet_single_block_samples[index_block // interval_control] ) hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] temb = pooled_temb[:, -2:] hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_chroma.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_chroma.py", "repo_id": "diffusers", "token_count": 12382 }
160
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, Optional import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..attention import BasicTransformerBlock, TemporalBasicTransformerBlock from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..resnet import AlphaBlender @dataclass class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.Tensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.Tensor class TransformerTemporalModel(ModelMixin, ConfigMixin): """ A Transformer model for video-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. attention_bias (`bool`, *optional*): Configure if the `TransformerBlock` attention should contain a bias parameter. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported activation functions. norm_elementwise_affine (`bool`, *optional*): Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization. double_self_attention (`bool`, *optional*): Configure if each `TransformerBlock` should contain two self-attention layers. positional_embeddings: (`str`, *optional*): The type of positional embeddings to apply to the sequence input before passing use. num_positional_embeddings: (`int`, *optional*): The maximum length of the sequence over which to apply positional embeddings. """ _skip_layerwise_casting_patterns = ["norm"] @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, double_self_attention: bool = True, positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings, ) for d in range(num_layers) ] ) self.proj_out = nn.Linear(inner_dim, in_channels) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.LongTensor] = None, timestep: Optional[torch.LongTensor] = None, class_labels: torch.LongTensor = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> TransformerTemporalModelOutput: """ The [`TransformerTemporal`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous): Input hidden_states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. num_frames (`int`, *optional*, defaults to 1): The number of frames to be processed per batch. This is used to reshape the hidden states. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] instead of a plain tuple. Returns: [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: If `return_dict` is True, an [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input batch_frames, channel, height, width = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(hidden_states) # 2. Blocks for block in self.transformer_blocks: hidden_states = block( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output hidden_states = self.proj_out(hidden_states) hidden_states = ( hidden_states[None, None, :] .reshape(batch_size, height, width, num_frames, channel) .permute(0, 3, 4, 1, 2) .contiguous() ) hidden_states = hidden_states.reshape(batch_frames, channel, height, width) output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output) class TransformerSpatioTemporalModel(nn.Module): """ A Transformer model for video-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). out_channels (`int`, *optional*): The number of channels in the output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. """ def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: int = 320, out_channels: Optional[int] = None, num_layers: int = 1, cross_attention_dim: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.inner_dim = inner_dim # 2. Define input layers self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim, ) for d in range(num_layers) ] ) time_mix_inner_dim = inner_dim self.temporal_transformer_blocks = nn.ModuleList( [ TemporalBasicTransformerBlock( inner_dim, time_mix_inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim, ) for _ in range(num_layers) ] ) time_embed_dim = in_channels * 4 self.time_pos_embed = TimestepEmbedding(in_channels, time_embed_dim, out_dim=in_channels) self.time_proj = Timesteps(in_channels, True, 0) self.time_mixer = AlphaBlender(alpha=0.5, merge_strategy="learned_with_images") # 4. Define output layers self.out_channels = in_channels if out_channels is None else out_channels # TODO: should use out_channels for continuous projections self.proj_out = nn.Linear(inner_dim, in_channels) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ Args: hidden_states (`torch.Tensor` of shape `(batch size, channel, height, width)`): Input hidden_states. num_frames (`int`): The number of frames to be processed per batch. This is used to reshape the hidden states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. image_only_indicator (`torch.LongTensor` of shape `(batch size, num_frames)`, *optional*): A tensor indicating whether the input contains only images. 1 indicates that the input contains only images, 0 indicates that the input contains video frames. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] instead of a plain tuple. Returns: [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: If `return_dict` is True, an [`~models.transformers.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input batch_frames, _, height, width = hidden_states.shape num_frames = image_only_indicator.shape[-1] batch_size = batch_frames // num_frames time_context = encoder_hidden_states time_context_first_timestep = time_context[None, :].reshape( batch_size, num_frames, -1, time_context.shape[-1] )[:, 0] time_context = time_context_first_timestep[:, None].broadcast_to( batch_size, height * width, time_context.shape[-2], time_context.shape[-1] ) time_context = time_context.reshape(batch_size * height * width, -1, time_context.shape[-1]) residual = hidden_states hidden_states = self.norm(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_frames, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) num_frames_emb = torch.arange(num_frames, device=hidden_states.device) num_frames_emb = num_frames_emb.repeat(batch_size, 1) num_frames_emb = num_frames_emb.reshape(-1) t_emb = self.time_proj(num_frames_emb) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_pos_embed(t_emb) emb = emb[:, None, :] # 2. Blocks for block, temporal_block in zip(self.transformer_blocks, self.temporal_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, None, encoder_hidden_states, None ) else: hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states_mix = hidden_states hidden_states_mix = hidden_states_mix + emb hidden_states_mix = temporal_block( hidden_states_mix, num_frames=num_frames, encoder_hidden_states=time_context, ) hidden_states = self.time_mixer( x_spatial=hidden_states, x_temporal=hidden_states_mix, image_only_indicator=image_only_indicator, ) # 3. Output hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch_frames, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_temporal.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_temporal.py", "repo_id": "diffusers", "token_count": 7189 }
161
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import BaseOutput, logging from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_3d_blocks import UNetMidBlockSpatioTemporal, get_down_block, get_up_block logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetSpatioTemporalConditionOutput(BaseOutput): """ The output of [`UNetSpatioTemporalConditionModel`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.Tensor = None class UNetSpatioTemporalConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional Spatio-Temporal UNet model that takes a noisy video frames, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 8): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. addition_time_embed_dim: (`int`, defaults to 256): Dimension to to encode the additional time ids. projection_class_embeddings_input_dim (`int`, defaults to 768): The dimension of the projection of encoded `added_time_ids`. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unets.unet_3d_blocks.CrossAttnDownBlockSpatioTemporal`], [`~models.unets.unet_3d_blocks.CrossAttnUpBlockSpatioTemporal`], [`~models.unets.unet_3d_blocks.UNetMidBlockSpatioTemporal`]. num_attention_heads (`int`, `Tuple[int]`, defaults to `(5, 10, 10, 20)`): The number of attention heads. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 8, out_channels: int = 4, down_block_types: Tuple[str] = ( "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", ), up_block_types: Tuple[str] = ( "UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", ), block_out_channels: Tuple[int] = (320, 640, 1280, 1280), addition_time_embed_dim: int = 256, projection_class_embeddings_input_dim: int = 768, layers_per_block: Union[int, Tuple[int]] = 2, cross_attention_dim: Union[int, Tuple[int]] = 1024, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, num_attention_heads: Union[int, Tuple[int]] = (5, 10, 20, 20), num_frames: int = 25, ): super().__init__() self.sample_size = sample_size # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=3, padding=1, ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, downscale_freq_shift=0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) self.add_time_proj = Timesteps(addition_time_embed_dim, True, downscale_freq_shift=0) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=1e-5, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], resnet_act_fn="silu", ) self.down_blocks.append(down_block) # mid self.mid_block = UNetMidBlockSpatioTemporal( block_out_channels[-1], temb_channels=blocks_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block[-1], cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=1e-5, resolution_idx=i, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], resnet_act_fn="silu", ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-5) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=3, padding=1, ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors( name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor], ): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, added_time_ids: torch.Tensor, return_dict: bool = True, ) -> Union[UNetSpatioTemporalConditionOutput, Tuple]: r""" The [`UNetSpatioTemporalConditionModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch, num_frames, channel, height, width)`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states with shape `(batch, sequence_length, cross_attention_dim)`. added_time_ids: (`torch.Tensor`): The additional time ids with shape `(batch, num_additional_ids)`. These are encoded with sinusoidal embeddings and added to the time embeddings. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] instead of a plain tuple. Returns: [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_slatio_temporal.UNetSpatioTemporalConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" is_npu = sample.device.type == "npu" if isinstance(timestep, float): dtype = torch.float32 if (is_mps or is_npu) else torch.float64 else: dtype = torch.int32 if (is_mps or is_npu) else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML batch_size, num_frames = sample.shape[:2] timesteps = timesteps.expand(batch_size) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb) time_embeds = self.add_time_proj(added_time_ids.flatten()) time_embeds = time_embeds.reshape((batch_size, -1)) time_embeds = time_embeds.to(emb.dtype) aug_emb = self.add_embedding(time_embeds) emb = emb + aug_emb # Flatten the batch and frames dimensions # sample: [batch, frames, channels, height, width] -> [batch * frames, channels, height, width] sample = sample.flatten(0, 1) # Repeat the embeddings num_video_frames times # emb: [batch, channels] -> [batch * frames, channels] emb = emb.repeat_interleave(num_frames, dim=0, output_size=emb.shape[0] * num_frames) # encoder_hidden_states: [batch, 1, channels] -> [batch * frames, 1, channels] encoder_hidden_states = encoder_hidden_states.repeat_interleave( num_frames, dim=0, output_size=encoder_hidden_states.shape[0] * num_frames ) # 2. pre-process sample = self.conv_in(sample) image_only_indicator = torch.zeros(batch_size, num_frames, dtype=sample.dtype, device=sample.device) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) else: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, image_only_indicator=image_only_indicator, ) down_block_res_samples += res_samples # 4. mid sample = self.mid_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, ) # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, upsample_size=upsample_size, image_only_indicator=image_only_indicator, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, image_only_indicator=image_only_indicator, ) # 6. post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) # 7. Reshape back to original shape sample = sample.reshape(batch_size, num_frames, *sample.shape[1:]) if not return_dict: return (sample,) return UNetSpatioTemporalConditionOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_spatio_temporal_condition.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_spatio_temporal_condition.py", "repo_id": "diffusers", "token_count": 10387 }
162
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import re from collections import OrderedDict from dataclasses import dataclass, field, fields from typing import Any, Dict, List, Literal, Optional, Type, Union import torch from ..configuration_utils import ConfigMixin, FrozenDict from ..utils import is_torch_available, logging if is_torch_available(): pass logger = logging.get_logger(__name__) # pylint: disable=invalid-name class InsertableDict(OrderedDict): def insert(self, key, value, index): items = list(self.items()) # Remove key if it already exists to avoid duplicates items = [(k, v) for k, v in items if k != key] # Insert at the specified index items.insert(index, (key, value)) # Clear and update self self.clear() self.update(items) # Return self for method chaining return self def __repr__(self): if not self: return "InsertableDict()" items = [] for i, (key, value) in enumerate(self.items()): if isinstance(value, type): # For classes, show class name and <class ...> obj_repr = f"<class '{value.__module__}.{value.__name__}'>" else: # For objects (instances) and other types, show class name and module obj_repr = f"<obj '{value.__class__.__module__}.{value.__class__.__name__}'>" items.append(f"{i}: ({repr(key)}, {obj_repr})") return "InsertableDict([\n " + ",\n ".join(items) + "\n])" # YiYi TODO: # 1. validate the dataclass fields # 2. improve the docstring and potentially add a validator for load methods, make sure they are valid inputs to pass to from_pretrained() @dataclass class ComponentSpec: """Specification for a pipeline component. A component can be created in two ways: 1. From scratch using __init__ with a config dict 2. using `from_pretrained` Attributes: name: Name of the component type_hint: Type of the component (e.g. UNet2DConditionModel) description: Optional description of the component config: Optional config dict for __init__ creation repo: Optional repo path for from_pretrained creation subfolder: Optional subfolder in repo variant: Optional variant in repo revision: Optional revision in repo default_creation_method: Preferred creation method - "from_config" or "from_pretrained" """ name: Optional[str] = None type_hint: Optional[Type] = None description: Optional[str] = None config: Optional[FrozenDict] = None # YiYi Notes: should we change it to pretrained_model_name_or_path for consistency? a bit long for a field name repo: Optional[Union[str, List[str]]] = field(default=None, metadata={"loading": True}) subfolder: Optional[str] = field(default="", metadata={"loading": True}) variant: Optional[str] = field(default=None, metadata={"loading": True}) revision: Optional[str] = field(default=None, metadata={"loading": True}) default_creation_method: Literal["from_config", "from_pretrained"] = "from_pretrained" def __hash__(self): """Make ComponentSpec hashable, using load_id as the hash value.""" return hash((self.name, self.load_id, self.default_creation_method)) def __eq__(self, other): """Compare ComponentSpec objects based on name and load_id.""" if not isinstance(other, ComponentSpec): return False return ( self.name == other.name and self.load_id == other.load_id and self.default_creation_method == other.default_creation_method ) @classmethod def from_component(cls, name: str, component: Any) -> Any: """Create a ComponentSpec from a Component. Currently supports: - Components created with `ComponentSpec.load()` method - Components that are ConfigMixin subclasses but not nn.Modules (e.g. schedulers, guiders) Args: name: Name of the component component: Component object to create spec from Returns: ComponentSpec object Raises: ValueError: If component is not supported (e.g. nn.Module without load_id, non-ConfigMixin) """ # Check if component was created with ComponentSpec.load() if hasattr(component, "_diffusers_load_id") and component._diffusers_load_id != "null": # component has a usable load_id -> from_pretrained, no warning needed default_creation_method = "from_pretrained" else: # Component doesn't have a usable load_id, check if it's a nn.Module if isinstance(component, torch.nn.Module): raise ValueError( "Cannot create ComponentSpec from a nn.Module that was not created with `ComponentSpec.load()` method." ) # ConfigMixin objects without weights (e.g. scheduler & guider) can be recreated with from_config elif isinstance(component, ConfigMixin): # warn if component was not created with `ComponentSpec` if not hasattr(component, "_diffusers_load_id"): logger.warning( "Component was not created using `ComponentSpec`, defaulting to `from_config` creation method" ) default_creation_method = "from_config" else: # Not a ConfigMixin and not created with `ComponentSpec.load()` method -> throw error raise ValueError( f"Cannot create ComponentSpec from {name}({component.__class__.__name__}). Currently ComponentSpec.from_component() only supports: " f" - components created with `ComponentSpec.load()` method" f" - components that are a subclass of ConfigMixin but not a nn.Module (e.g. guider, scheduler)." ) type_hint = component.__class__ if isinstance(component, ConfigMixin) and default_creation_method == "from_config": config = component.config else: config = None if hasattr(component, "_diffusers_load_id") and component._diffusers_load_id != "null": load_spec = cls.decode_load_id(component._diffusers_load_id) else: load_spec = {} return cls( name=name, type_hint=type_hint, config=config, default_creation_method=default_creation_method, **load_spec ) @classmethod def loading_fields(cls) -> List[str]: """ Return the names of all loading‐related fields (i.e. those whose field.metadata["loading"] is True). """ return [f.name for f in fields(cls) if f.metadata.get("loading", False)] @property def load_id(self) -> str: """ Unique identifier for this spec's pretrained load, composed of repo|subfolder|variant|revision (no empty segments). """ if self.default_creation_method == "from_config": return "null" parts = [getattr(self, k) for k in self.loading_fields()] parts = ["null" if p is None else p for p in parts] return "|".join(p for p in parts if p) @classmethod def decode_load_id(cls, load_id: str) -> Dict[str, Optional[str]]: """ Decode a load_id string back into a dictionary of loading fields and values. Args: load_id: The load_id string to decode, format: "repo|subfolder|variant|revision" where None values are represented as "null" Returns: Dict mapping loading field names to their values. e.g. { "repo": "path/to/repo", "subfolder": "subfolder", "variant": "variant", "revision": "revision" } If a segment value is "null", it's replaced with None. Returns None if load_id is "null" (indicating component not created with `load` method). """ # Get all loading fields in order loading_fields = cls.loading_fields() result = dict.fromkeys(loading_fields) if load_id == "null": return result # Split the load_id parts = load_id.split("|") # Map parts to loading fields by position for i, part in enumerate(parts): if i < len(loading_fields): # Convert "null" string back to None result[loading_fields[i]] = None if part == "null" else part return result # YiYi TODO: I think we should only support ConfigMixin for this method (after we make guider and image_processors config mixin) # otherwise we cannot do spec -> spec.create() -> component -> ComponentSpec.from_component(component) # the config info is lost in the process # remove error check in from_component spec and ModularPipeline.update_components() if we remove support for non configmixin in `create()` method def create(self, config: Optional[Union[FrozenDict, Dict[str, Any]]] = None, **kwargs) -> Any: """Create component using from_config with config.""" if self.type_hint is None or not isinstance(self.type_hint, type): raise ValueError("`type_hint` is required when using from_config creation method.") config = config or self.config or {} if issubclass(self.type_hint, ConfigMixin): component = self.type_hint.from_config(config, **kwargs) else: signature_params = inspect.signature(self.type_hint.__init__).parameters init_kwargs = {} for k, v in config.items(): if k in signature_params: init_kwargs[k] = v for k, v in kwargs.items(): if k in signature_params: init_kwargs[k] = v component = self.type_hint(**init_kwargs) component._diffusers_load_id = "null" if hasattr(component, "config"): self.config = component.config return component # YiYi TODO: add guard for type of model, if it is supported by from_pretrained def load(self, **kwargs) -> Any: """Load component using from_pretrained.""" # select loading fields from kwargs passed from user: e.g. repo, subfolder, variant, revision, note the list could change passed_loading_kwargs = {key: kwargs.pop(key) for key in self.loading_fields() if key in kwargs} # merge loading field value in the spec with user passed values to create load_kwargs load_kwargs = {key: passed_loading_kwargs.get(key, getattr(self, key)) for key in self.loading_fields()} # repo is a required argument for from_pretrained, a.k.a. pretrained_model_name_or_path repo = load_kwargs.pop("repo", None) if repo is None: raise ValueError( "`repo` info is required when using `load` method (you can directly set it in `repo` field of the ComponentSpec or pass it as an argument)" ) if self.type_hint is None: try: from diffusers import AutoModel component = AutoModel.from_pretrained(repo, **load_kwargs, **kwargs) except Exception as e: raise ValueError(f"Unable to load {self.name} without `type_hint`: {e}") # update type_hint if AutoModel load successfully self.type_hint = component.__class__ else: try: component = self.type_hint.from_pretrained(repo, **load_kwargs, **kwargs) except Exception as e: raise ValueError(f"Unable to load {self.name} using load method: {e}") self.repo = repo for k, v in load_kwargs.items(): setattr(self, k, v) component._diffusers_load_id = self.load_id return component @dataclass class ConfigSpec: """Specification for a pipeline configuration parameter.""" name: str default: Any description: Optional[str] = None # YiYi Notes: both inputs and intermediate_inputs are InputParam objects # however some fields are not relevant for intermediate_inputs # e.g. unlike inputs, required only used in docstring for intermediate_inputs, we do not check if a required intermediate inputs is passed # default is not used for intermediate_inputs, we only use default from inputs, so it is ignored if it is set for intermediate_inputs # -> should we use different class for inputs and intermediate_inputs? @dataclass class InputParam: """Specification for an input parameter.""" name: str = None type_hint: Any = None default: Any = None required: bool = False description: str = "" kwargs_type: str = None # YiYi Notes: remove this feature (maybe) def __repr__(self): return f"<{self.name}: {'required' if self.required else 'optional'}, default={self.default}>" @dataclass class OutputParam: """Specification for an output parameter.""" name: str type_hint: Any = None description: str = "" kwargs_type: str = None # YiYi notes: remove this feature (maybe) def __repr__(self): return ( f"<{self.name}: {self.type_hint.__name__ if hasattr(self.type_hint, '__name__') else str(self.type_hint)}>" ) def format_inputs_short(inputs): """ Format input parameters into a string representation, with required params first followed by optional ones. Args: inputs: List of input parameters with 'required' and 'name' attributes, and 'default' for optional params Returns: str: Formatted string of input parameters Example: >>> inputs = [ ... InputParam(name="prompt", required=True), ... InputParam(name="image", required=True), ... InputParam(name="guidance_scale", required=False, default=7.5), ... InputParam(name="num_inference_steps", required=False, default=50) ... ] >>> format_inputs_short(inputs) 'prompt, image, guidance_scale=7.5, num_inference_steps=50' """ required_inputs = [param for param in inputs if param.required] optional_inputs = [param for param in inputs if not param.required] required_str = ", ".join(param.name for param in required_inputs) optional_str = ", ".join(f"{param.name}={param.default}" for param in optional_inputs) inputs_str = required_str if optional_str: inputs_str = f"{inputs_str}, {optional_str}" if required_str else optional_str return inputs_str def format_intermediates_short(intermediate_inputs, required_intermediate_inputs, intermediate_outputs): """ Formats intermediate inputs and outputs of a block into a string representation. Args: intermediate_inputs: List of intermediate input parameters required_intermediate_inputs: List of required intermediate input names intermediate_outputs: List of intermediate output parameters Returns: str: Formatted string like: Intermediates: - inputs: Required(latents), dtype - modified: latents # variables that appear in both inputs and outputs - outputs: images # new outputs only """ # Handle inputs input_parts = [] for inp in intermediate_inputs: if inp.name in required_intermediate_inputs: input_parts.append(f"Required({inp.name})") else: if inp.name is None and inp.kwargs_type is not None: inp_name = "*_" + inp.kwargs_type else: inp_name = inp.name input_parts.append(inp_name) # Handle modified variables (appear in both inputs and outputs) inputs_set = {inp.name for inp in intermediate_inputs} modified_parts = [] new_output_parts = [] for out in intermediate_outputs: if out.name in inputs_set: modified_parts.append(out.name) else: new_output_parts.append(out.name) result = [] if input_parts: result.append(f" - inputs: {', '.join(input_parts)}") if modified_parts: result.append(f" - modified: {', '.join(modified_parts)}") if new_output_parts: result.append(f" - outputs: {', '.join(new_output_parts)}") return "\n".join(result) if result else " (none)" def format_params(params, header="Args", indent_level=4, max_line_length=115): """Format a list of InputParam or OutputParam objects into a readable string representation. Args: params: List of InputParam or OutputParam objects to format header: Header text to use (e.g. "Args" or "Returns") indent_level: Number of spaces to indent each parameter line (default: 4) max_line_length: Maximum length for each line before wrapping (default: 115) Returns: A formatted string representing all parameters """ if not params: return "" base_indent = " " * indent_level param_indent = " " * (indent_level + 4) desc_indent = " " * (indent_level + 8) formatted_params = [] def get_type_str(type_hint): if hasattr(type_hint, "__origin__") and type_hint.__origin__ is Union: types = [t.__name__ if hasattr(t, "__name__") else str(t) for t in type_hint.__args__] return f"Union[{', '.join(types)}]" return type_hint.__name__ if hasattr(type_hint, "__name__") else str(type_hint) def wrap_text(text, indent, max_length): """Wrap text while preserving markdown links and maintaining indentation.""" words = text.split() lines = [] current_line = [] current_length = 0 for word in words: word_length = len(word) + (1 if current_line else 0) if current_line and current_length + word_length > max_length: lines.append(" ".join(current_line)) current_line = [word] current_length = len(word) else: current_line.append(word) current_length += word_length if current_line: lines.append(" ".join(current_line)) return f"\n{indent}".join(lines) # Add the header formatted_params.append(f"{base_indent}{header}:") for param in params: # Format parameter name and type type_str = get_type_str(param.type_hint) if param.type_hint != Any else "" # YiYi Notes: remove this line if we remove kwargs_type name = f"**{param.kwargs_type}" if param.name is None and param.kwargs_type is not None else param.name param_str = f"{param_indent}{name} (`{type_str}`" # Add optional tag and default value if parameter is an InputParam and optional if hasattr(param, "required"): if not param.required: param_str += ", *optional*" if param.default is not None: param_str += f", defaults to {param.default}" param_str += "):" # Add description on a new line with additional indentation and wrapping if param.description: desc = re.sub(r"\[(.*?)\]\((https?://[^\s\)]+)\)", r"[\1](\2)", param.description) wrapped_desc = wrap_text(desc, desc_indent, max_line_length) param_str += f"\n{desc_indent}{wrapped_desc}" formatted_params.append(param_str) return "\n\n".join(formatted_params) def format_input_params(input_params, indent_level=4, max_line_length=115): """Format a list of InputParam objects into a readable string representation. Args: input_params: List of InputParam objects to format indent_level: Number of spaces to indent each parameter line (default: 4) max_line_length: Maximum length for each line before wrapping (default: 115) Returns: A formatted string representing all input parameters """ return format_params(input_params, "Inputs", indent_level, max_line_length) def format_output_params(output_params, indent_level=4, max_line_length=115): """Format a list of OutputParam objects into a readable string representation. Args: output_params: List of OutputParam objects to format indent_level: Number of spaces to indent each parameter line (default: 4) max_line_length: Maximum length for each line before wrapping (default: 115) Returns: A formatted string representing all output parameters """ return format_params(output_params, "Outputs", indent_level, max_line_length) def format_components(components, indent_level=4, max_line_length=115, add_empty_lines=True): """Format a list of ComponentSpec objects into a readable string representation. Args: components: List of ComponentSpec objects to format indent_level: Number of spaces to indent each component line (default: 4) max_line_length: Maximum length for each line before wrapping (default: 115) add_empty_lines: Whether to add empty lines between components (default: True) Returns: A formatted string representing all components """ if not components: return "" base_indent = " " * indent_level component_indent = " " * (indent_level + 4) formatted_components = [] # Add the header formatted_components.append(f"{base_indent}Components:") if add_empty_lines: formatted_components.append("") # Add each component with optional empty lines between them for i, component in enumerate(components): # Get type name, handling special cases type_name = ( component.type_hint.__name__ if hasattr(component.type_hint, "__name__") else str(component.type_hint) ) component_desc = f"{component_indent}{component.name} (`{type_name}`)" if component.description: component_desc += f": {component.description}" # Get the loading fields dynamically loading_field_values = [] for field_name in component.loading_fields(): field_value = getattr(component, field_name) if field_value is not None: loading_field_values.append(f"{field_name}={field_value}") # Add loading field information if available if loading_field_values: component_desc += f" [{', '.join(loading_field_values)}]" formatted_components.append(component_desc) # Add an empty line after each component except the last one if add_empty_lines and i < len(components) - 1: formatted_components.append("") return "\n".join(formatted_components) def format_configs(configs, indent_level=4, max_line_length=115, add_empty_lines=True): """Format a list of ConfigSpec objects into a readable string representation. Args: configs: List of ConfigSpec objects to format indent_level: Number of spaces to indent each config line (default: 4) max_line_length: Maximum length for each line before wrapping (default: 115) add_empty_lines: Whether to add empty lines between configs (default: True) Returns: A formatted string representing all configs """ if not configs: return "" base_indent = " " * indent_level config_indent = " " * (indent_level + 4) formatted_configs = [] # Add the header formatted_configs.append(f"{base_indent}Configs:") if add_empty_lines: formatted_configs.append("") # Add each config with optional empty lines between them for i, config in enumerate(configs): config_desc = f"{config_indent}{config.name} (default: {config.default})" if config.description: config_desc += f": {config.description}" formatted_configs.append(config_desc) # Add an empty line after each config except the last one if add_empty_lines and i < len(configs) - 1: formatted_configs.append("") return "\n".join(formatted_configs) def make_doc_string( inputs, outputs, description="", class_name=None, expected_components=None, expected_configs=None, ): """ Generates a formatted documentation string describing the pipeline block's parameters and structure. Args: inputs: List of input parameters intermediate_inputs: List of intermediate input parameters outputs: List of output parameters description (str, *optional*): Description of the block class_name (str, *optional*): Name of the class to include in the documentation expected_components (List[ComponentSpec], *optional*): List of expected components expected_configs (List[ConfigSpec], *optional*): List of expected configurations Returns: str: A formatted string containing information about components, configs, call parameters, intermediate inputs/outputs, and final outputs. """ output = "" # Add class name if provided if class_name: output += f"class {class_name}\n\n" # Add description if description: desc_lines = description.strip().split("\n") aligned_desc = "\n".join(" " + line for line in desc_lines) output += aligned_desc + "\n\n" # Add components section if provided if expected_components and len(expected_components) > 0: components_str = format_components(expected_components, indent_level=2) output += components_str + "\n\n" # Add configs section if provided if expected_configs and len(expected_configs) > 0: configs_str = format_configs(expected_configs, indent_level=2) output += configs_str + "\n\n" # Add inputs section output += format_input_params(inputs, indent_level=2) # Add outputs section output += "\n\n" output += format_output_params(outputs, indent_level=2) return output
diffusers/src/diffusers/modular_pipelines/modular_pipeline_utils.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/modular_pipeline_utils.py", "repo_id": "diffusers", "token_count": 10220 }
163
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for diffusion models.""" import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging logger = logging.get_logger(__name__) class SchedulerType(Enum): LINEAR = "linear" COSINE = "cosine" COSINE_WITH_RESTARTS = "cosine_with_restarts" POLYNOMIAL = "polynomial" CONSTANT = "constant" CONSTANT_WITH_WARMUP = "constant_with_warmup" PIECEWISE_CONSTANT = "piecewise_constant" def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1) -> LambdaLR: """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1) -> LambdaLR: """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_piecewise_constant_schedule(optimizer: Optimizer, step_rules: str, last_epoch: int = -1) -> LambdaLR: """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. step_rules (`string`): The rules for the learning rate. ex: rule_steps="1:10,0.1:20,0.01:30,0.005" it means that the learning rate if multiple 1 for the first 10 steps, multiple 0.1 for the next 20 steps, multiple 0.01 for the next 30 steps and multiple 0.005 for the other steps. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ rules_dict = {} rule_list = step_rules.split(",") for rule_str in rule_list[:-1]: value_str, steps_str = rule_str.split(":") steps = int(steps_str) value = float(value_str) rules_dict[steps] = value last_lr_multiple = float(rule_list[-1]) def create_rules_function(rules_dict, last_lr_multiple): def rule_func(steps: int) -> float: sorted_steps = sorted(rules_dict.keys()) for i, sorted_step in enumerate(sorted_steps): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func rules_func = create_rules_function(rules_dict, last_lr_multiple) return LambdaLR(optimizer, rules_func, last_epoch=last_epoch) def get_linear_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, last_epoch: int = -1 ) -> LambdaLR: """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 ) -> LambdaLR: """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_periods (`float`, *optional*, defaults to 0.5): The number of periods of the cosine function in a schedule (the default is to just decrease from the max value to 0 following a half-cosine). last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 ) -> LambdaLR: """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_cycles (`int`, *optional*, defaults to 1): The number of hard restarts to use. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_polynomial_decay_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, lr_end: float = 1e-7, power: float = 1.0, last_epoch: int = -1, ) -> LambdaLR: """ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. lr_end (`float`, *optional*, defaults to 1e-7): The end LR. power (`float`, *optional*, defaults to 1.0): Power factor. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_init = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be smaller than initial lr ({lr_init})") def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(optimizer, lr_lambda, last_epoch) TYPE_TO_SCHEDULER_FUNCTION = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def get_scheduler( name: Union[str, SchedulerType], optimizer: Optimizer, step_rules: Optional[str] = None, num_warmup_steps: Optional[int] = None, num_training_steps: Optional[int] = None, num_cycles: int = 1, power: float = 1.0, last_epoch: int = -1, ) -> LambdaLR: """ Unified API to get any scheduler from its name. Args: name (`str` or `SchedulerType`): The name of the scheduler to use. optimizer (`torch.optim.Optimizer`): The optimizer that will be used during training. step_rules (`str`, *optional*): A string representing the step rules to use. This is only used by the `PIECEWISE_CONSTANT` scheduler. num_warmup_steps (`int`, *optional*): The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. num_training_steps (`int``, *optional*): The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. num_cycles (`int`, *optional*): The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler. power (`float`, *optional*, defaults to 1.0): Power factor. See `POLYNOMIAL` scheduler last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. """ name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(optimizer, last_epoch=last_epoch) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(optimizer, step_rules=step_rules, last_epoch=last_epoch) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, last_epoch=last_epoch) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, last_epoch=last_epoch, ) if name == SchedulerType.POLYNOMIAL: return schedule_func( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, power=power, last_epoch=last_epoch, ) return schedule_func( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, last_epoch=last_epoch )
diffusers/src/diffusers/optimization.py/0
{ "file_path": "diffusers/src/diffusers/optimization.py", "repo_id": "diffusers", "token_count": 5885 }
164